diff --git a/.dockerignore b/.dockerignore index 4a8cf7be9..d09b0e483 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,18 +1,18 @@ -# Ignore Go workspace files during Docker builds -go.work -go.work.sum - -# Common ignores -.git/ -.gitignore -README.md -.env -.env.local -*.log -tmp/ -.DS_Store - -**/node_modules -**/package-lock.json -**/.venv +# Ignore Go workspace files during Docker builds +go.work +go.work.sum + +# Common ignores +.git/ +.gitignore +README.md +.env +.env.local +*.log +tmp/ +.DS_Store + +**/node_modules +**/package-lock.json +**/.venv **/uv.lock \ No newline at end of file diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index fbf0c0d67..c37bea998 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1,2 @@ -**/ @agntcy/dir-maintainers -versions.yaml @agntcy/dir-maintainers +**/ @agntcy/dir-maintainers +versions.yaml @agntcy/dir-maintainers diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 65c2826f6..e45fc991c 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -1,60 +1,60 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - ---- -name: Bug Report -description: Report a bug to help us improve. -title: "[Bug]: " -labels: ["bug", "triage"] -body: - - type: markdown - attributes: - value: | - Thanks for taking the time to fill out this bug report! - - type: textarea - id: description - attributes: - label: Bug Description - description: Please provide a description of the problem - validations: - required: true - - type: textarea - id: expected - attributes: - label: Expected Behavior - description: Please describe what you expected would happen - validations: - required: true - - type: input - id: version - attributes: - label: Affected Version - description: | - Please specify the version where this issue was encountered. - Common versions: v0.3.0, v0.2.13, main (development) - You can find all releases at: https://github.com/agntcy/dir/releases - placeholder: "e.g., v0.3.0 or main" - validations: - required: true - - type: textarea - id: steps - attributes: - label: Steps to Reproduce - description: Please provide all steps to reproduce the behavior - placeholder: | - 1. In this environment... - 1. With this config... - 1. Run `this command`... - 1. See error... - validations: - required: true - - type: checkboxes - id: checklist - attributes: - label: Checklist - description: By submitting this issue, you agree to the following - options: - - label: I have read the [contributing guidelines](/agntcy/repo-template/blob/main/CONTRIBUTING.md) - required: true - - label: I have verified this does not duplicate an existing issue - required: true +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +--- +name: Bug Report +description: Report a bug to help us improve. +title: "[Bug]: " +labels: ["bug", "triage"] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to fill out this bug report! + - type: textarea + id: description + attributes: + label: Bug Description + description: Please provide a description of the problem + validations: + required: true + - type: textarea + id: expected + attributes: + label: Expected Behavior + description: Please describe what you expected would happen + validations: + required: true + - type: input + id: version + attributes: + label: Affected Version + description: | + Please specify the version where this issue was encountered. + Common versions: v0.3.0, v0.2.13, main (development) + You can find all releases at: https://github.com/agntcy/dir/releases + placeholder: "e.g., v0.3.0 or main" + validations: + required: true + - type: textarea + id: steps + attributes: + label: Steps to Reproduce + description: Please provide all steps to reproduce the behavior + placeholder: | + 1. In this environment... + 1. With this config... + 1. Run `this command`... + 1. See error... + validations: + required: true + - type: checkboxes + id: checklist + attributes: + label: Checklist + description: By submitting this issue, you agree to the following + options: + - label: I have read the [contributing guidelines](/agntcy/repo-template/blob/main/CONTRIBUTING.md) + required: true + - label: I have verified this does not duplicate an existing issue + required: true diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index 07280146e..dcdfc4a3d 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -1,58 +1,58 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - ---- -name: Feature Request -description: Suggest a feature for this project. -title: "[Feature]: " -labels: ["enhancement", "triage"] -body: - - type: markdown - attributes: - value: | - Thanks for taking the time to request a feature or enhancement! - - type: textarea - id: description - attributes: - label: Problem Statement - description: | - Please describe the problem or limitation to be addressed by - the proposed feature - validations: - required: true - - type: textarea - id: solution - attributes: - label: Proposed Solution - description: | - Please describe what you envision the solution to this - problem would look like - validations: - required: true - - type: textarea - id: alternatives - attributes: - label: Alternatives Considered - description: | - Please briefly describe which alternatives, if any, have been - considered, including merits of alternate approaches and any tradeoffs - validations: - required: false - - type: textarea - id: context - attributes: - label: Additional Context - description: Please provide any other information that may be relevant - validations: - required: false - - type: checkboxes - id: checklist - attributes: - label: Checklist - description: By submitting this request, you agree to the following - options: - - label: I have read the [contributing guidelines](/agntcy/repo-template/blob/main/CONTRIBUTING.md) - required: true - - label: | - I have verified this does not duplicate an existing feature request - required: true +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +--- +name: Feature Request +description: Suggest a feature for this project. +title: "[Feature]: " +labels: ["enhancement", "triage"] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to request a feature or enhancement! + - type: textarea + id: description + attributes: + label: Problem Statement + description: | + Please describe the problem or limitation to be addressed by + the proposed feature + validations: + required: true + - type: textarea + id: solution + attributes: + label: Proposed Solution + description: | + Please describe what you envision the solution to this + problem would look like + validations: + required: true + - type: textarea + id: alternatives + attributes: + label: Alternatives Considered + description: | + Please briefly describe which alternatives, if any, have been + considered, including merits of alternate approaches and any tradeoffs + validations: + required: false + - type: textarea + id: context + attributes: + label: Additional Context + description: Please provide any other information that may be relevant + validations: + required: false + - type: checkboxes + id: checklist + attributes: + label: Checklist + description: By submitting this request, you agree to the following + options: + - label: I have read the [contributing guidelines](/agntcy/repo-template/blob/main/CONTRIBUTING.md) + required: true + - label: | + I have verified this does not duplicate an existing feature request + required: true diff --git a/.github/actions/setup-dirctl/README.md b/.github/actions/setup-dirctl/README.md index 84892d5a8..e0e80a9ad 100644 --- a/.github/actions/setup-dirctl/README.md +++ b/.github/actions/setup-dirctl/README.md @@ -1,19 +1,19 @@ -# Setup DIRCTL in GitHub Action - -An action that sets up `dirctl` CLI in GitHub Actions. - -The GITHUB_TOKEN has to have "public repo" access. - -## Usage - -```yaml -- name: Setup dirctl - uses: agntcy/dir/.github/actions/setup-dirctl@main - with: - # Default: latest - version: v0.4.0 - # Default: linux - os: linux - # Default: amd64 - arch: amd64 -``` +# Setup DIRCTL in GitHub Action + +An action that sets up `dirctl` CLI in GitHub Actions. + +The GITHUB_TOKEN has to have "public repo" access. + +## Usage + +```yaml +- name: Setup dirctl + uses: agntcy/dir/.github/actions/setup-dirctl@main + with: + # Default: latest + version: v0.4.0 + # Default: linux + os: linux + # Default: amd64 + arch: amd64 +``` diff --git a/.github/actions/setup-dirctl/action.yaml b/.github/actions/setup-dirctl/action.yaml index e8880f815..68d55a5e6 100644 --- a/.github/actions/setup-dirctl/action.yaml +++ b/.github/actions/setup-dirctl/action.yaml @@ -1,45 +1,45 @@ -name: Setup dirctl -description: Set up dirctl CLI in your GitHub Actions workflow -author: agntcy -branding: - icon: settings - color: yellow -inputs: - version: - description: Version of dirctl to install - required: false - default: "latest" - os: - description: Operating system of dirctl to install (linux, darwin, windows) - required: false - default: "linux" - arch: - description: Architecture of dirctl to install (amd64, arm64) - required: false - default: "amd64" -runs: - using: "composite" - steps: - - name: Download binary - shell: bash - run: | - ## If latest, resolve tag - if [ "${{ inputs.version }}" = "latest" ]; then - TAG=$(curl -sSLf "https://api.github.com/repos/agntcy/dir/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/') - else - TAG="${{ inputs.version }}" - fi - - ## Download binary - curl -sSLf "https://github.com/agntcy/dir/releases/download/$TAG/dirctl-${{ inputs.os }}-${{ inputs.arch }}" -o dirctl - chmod +x dirctl - - ## Add to PATH - mkdir -p "$PWD/bin" - mv dirctl "$PWD/bin/dirctl" - echo "$PWD/bin" >> $GITHUB_PATH - - - name: Verify binary - shell: bash - run: | - dirctl version +name: Setup dirctl +description: Set up dirctl CLI in your GitHub Actions workflow +author: agntcy +branding: + icon: settings + color: yellow +inputs: + version: + description: Version of dirctl to install + required: false + default: "latest" + os: + description: Operating system of dirctl to install (linux, darwin, windows) + required: false + default: "linux" + arch: + description: Architecture of dirctl to install (amd64, arm64) + required: false + default: "amd64" +runs: + using: "composite" + steps: + - name: Download binary + shell: bash + run: | + ## If latest, resolve tag + if [ "${{ inputs.version }}" = "latest" ]; then + TAG=$(curl -sSLf "https://api.github.com/repos/agntcy/dir/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/') + else + TAG="${{ inputs.version }}" + fi + + ## Download binary + curl -sSLf "https://github.com/agntcy/dir/releases/download/$TAG/dirctl-${{ inputs.os }}-${{ inputs.arch }}" -o dirctl + chmod +x dirctl + + ## Add to PATH + mkdir -p "$PWD/bin" + mv dirctl "$PWD/bin/dirctl" + echo "$PWD/bin" >> $GITHUB_PATH + + - name: Verify binary + shell: bash + run: | + dirctl version diff --git a/.github/actions/trigger-integrations/action.yaml b/.github/actions/trigger-integrations/action.yaml index 7d573518d..c7d2e3c40 100644 --- a/.github/actions/trigger-integrations/action.yaml +++ b/.github/actions/trigger-integrations/action.yaml @@ -1,45 +1,45 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - ---- -name: Trigger integrations -description: Run the integration tests in CSIT repository -inputs: - github-token: - description: "GitHub token" - required: false - default: ${{ github.token }} - -runs: - using: "composite" - steps: - - name: Checkout repository - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Get latest tags and versions - id: tags - shell: bash - run: | - VERSION_TAG=$(git tag --sort=-creatordate --list 'v*.*.*' | head -1) - - echo "CHART_TAG=$VERSION_TAG" >> $GITHUB_OUTPUT - echo "IMAGE_TAG=$VERSION_TAG" >> $GITHUB_OUTPUT - - - name: Trigger integration tests - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 - with: - github-token: ${{ inputs.github-token }} - script: | - await github.rest.actions.createWorkflowDispatch({ - owner: 'agntcy', - repo: 'csit', - workflow_id: 'test-integrations.yaml', - ref: 'main', - inputs: { - skip_directory_test: false, - override_directory_image_tag: '${{ steps.tags.outputs.IMAGE_TAG }}', - override_directory_chart_tag: '${{ steps.tags.outputs.CHART_TAG }}', - }, - }); +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +--- +name: Trigger integrations +description: Run the integration tests in CSIT repository +inputs: + github-token: + description: "GitHub token" + required: false + default: ${{ github.token }} + +runs: + using: "composite" + steps: + - name: Checkout repository + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Get latest tags and versions + id: tags + shell: bash + run: | + VERSION_TAG=$(git tag --sort=-creatordate --list 'v*.*.*' | head -1) + + echo "CHART_TAG=$VERSION_TAG" >> $GITHUB_OUTPUT + echo "IMAGE_TAG=$VERSION_TAG" >> $GITHUB_OUTPUT + + - name: Trigger integration tests + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + with: + github-token: ${{ inputs.github-token }} + script: | + await github.rest.actions.createWorkflowDispatch({ + owner: 'agntcy', + repo: 'csit', + workflow_id: 'test-integrations.yaml', + ref: 'main', + inputs: { + skip_directory_test: false, + override_directory_image_tag: '${{ steps.tags.outputs.IMAGE_TAG }}', + override_directory_chart_tag: '${{ steps.tags.outputs.CHART_TAG }}', + }, + }); diff --git a/.github/labels.yml b/.github/labels.yml index 613bfb27b..f770b4e11 100644 --- a/.github/labels.yml +++ b/.github/labels.yml @@ -1,123 +1,123 @@ -### -# Lifecycle -### -- name: 'stale' - color: '795648' - description: 'Denotes an issue or PR that has become stale and will be auto-closed.' -- name: 'no-stale' - color: 'd3e1f1' - description: 'Denotes an issue or PR that should be preserved from going stale.' - -### -# Size labels -### -- name: 'size/XS' - color: '009900' - description: 'Denotes a PR that changes 0-49 lines' -- name: 'size/S' - color: '77bb02' - description: 'Denotes a PR that changes 50-199 lines' -- name: 'size/M' - color: 'eebb03' - description: 'Denotes a PR that changes 200-999 lines' -- name: 'size/L' - color: 'ee9902' - description: 'Denotes a PR that changes 1000-1999 lines' -- name: 'size/XL' - color: 'ed5500' - description: 'Denotes a PR that changes 2000+ lines' - -### -# Kind -### -- name: 'kind/docs' - color: 'c7def7' - description: 'Categorizes issue or PR as related to documentation.' -- name: 'kind/feature' - color: 'c7def7' - description: 'Categorizes issue or PR as related to a new feature.' -- name: 'kind/refactor' - color: 'c7def7' - description: 'Categorizes issue or PR as related to cleaning up code, process, or technical debt.' -- name: 'kind/support' - color: 'c7def7' - description: 'Categorizes issue or PR as support questions.' -- name: 'kind/test' - color: 'c7def7' - description: 'Categorizes issue or PR as related to testing.' -- name: 'kind/bug' - color: 'e11d21' - description: 'Categorizes issue or PR as related to a bug.' -- name: 'kind/security' - color: 'e11d21' - description: 'Categorizes issue or PR as related to security.' -- name: 'kind/regression' - color: 'e11d21' - description: 'Categorizes issue or PR as related to a regression from a prior release.' - -### -# Components / Area -### - -# General: -- name: 'buf skip breaking' - color: 'f9d0c4' - description: 'Denotes a PR that should skip breaking Buf change checks.' - -# Dir Core: -- name: 'area/dir' - color: '0052cc' -- name: 'area/dir/api' - color: '0052cc' -- name: 'area/dir/server' - color: '0052cc' -- name: 'area/dir/client' - color: '0052cc' - -# SDK: -- name: 'area/sdk' - color: '0052cc' -- name: 'area/sdk/go' - color: '0052cc' -- name: 'area/sdk/js' - color: '0052cc' -- name: 'area/sdk/py' - color: '0052cc' - -# CLI: -- name: 'area/cli' - color: '0052cc' -- name: 'area/cli/dir' - color: '0052cc' - -# Helm: -- name: 'area/helm' - color: '0052cc' -- name: 'area/helm/dir' - color: '0052cc' -- name: 'area/helm/dirctl' - color: '0052cc' -- name: 'area/docker' - color: '0052cc' - -### -# Community -### -- name: 'triage/duplicate' - color: 'c31fb5' - description: 'This issue or PR already exists.' -- name: 'triage/ready-for-review' - color: 'c31fb5' - description: 'This issue or PR is ready for review.' -- name: 'triage/blocked' - color: 'bfa808' - description: 'This issue or PR is blocked by another issue or PR.' -- name: 'triage/needs-information' - color: 'bfa808' - description: 'Indicates an issue or PR needs more information in order to work on it.' -- name: 'triage/not-reproducible' - color: 'bfa808' - description: 'Indicates an issue or PR can not be reproduced as described.' -- name: 'good first issue' - color: '7057ff' - description: 'Good for newcomers' +### +# Lifecycle +### +- name: 'stale' + color: '795648' + description: 'Denotes an issue or PR that has become stale and will be auto-closed.' +- name: 'no-stale' + color: 'd3e1f1' + description: 'Denotes an issue or PR that should be preserved from going stale.' + +### +# Size labels +### +- name: 'size/XS' + color: '009900' + description: 'Denotes a PR that changes 0-49 lines' +- name: 'size/S' + color: '77bb02' + description: 'Denotes a PR that changes 50-199 lines' +- name: 'size/M' + color: 'eebb03' + description: 'Denotes a PR that changes 200-999 lines' +- name: 'size/L' + color: 'ee9902' + description: 'Denotes a PR that changes 1000-1999 lines' +- name: 'size/XL' + color: 'ed5500' + description: 'Denotes a PR that changes 2000+ lines' + +### +# Kind +### +- name: 'kind/docs' + color: 'c7def7' + description: 'Categorizes issue or PR as related to documentation.' +- name: 'kind/feature' + color: 'c7def7' + description: 'Categorizes issue or PR as related to a new feature.' +- name: 'kind/refactor' + color: 'c7def7' + description: 'Categorizes issue or PR as related to cleaning up code, process, or technical debt.' +- name: 'kind/support' + color: 'c7def7' + description: 'Categorizes issue or PR as support questions.' +- name: 'kind/test' + color: 'c7def7' + description: 'Categorizes issue or PR as related to testing.' +- name: 'kind/bug' + color: 'e11d21' + description: 'Categorizes issue or PR as related to a bug.' +- name: 'kind/security' + color: 'e11d21' + description: 'Categorizes issue or PR as related to security.' +- name: 'kind/regression' + color: 'e11d21' + description: 'Categorizes issue or PR as related to a regression from a prior release.' + +### +# Components / Area +### + +# General: +- name: 'buf skip breaking' + color: 'f9d0c4' + description: 'Denotes a PR that should skip breaking Buf change checks.' + +# Dir Core: +- name: 'area/dir' + color: '0052cc' +- name: 'area/dir/api' + color: '0052cc' +- name: 'area/dir/server' + color: '0052cc' +- name: 'area/dir/client' + color: '0052cc' + +# SDK: +- name: 'area/sdk' + color: '0052cc' +- name: 'area/sdk/go' + color: '0052cc' +- name: 'area/sdk/js' + color: '0052cc' +- name: 'area/sdk/py' + color: '0052cc' + +# CLI: +- name: 'area/cli' + color: '0052cc' +- name: 'area/cli/dir' + color: '0052cc' + +# Helm: +- name: 'area/helm' + color: '0052cc' +- name: 'area/helm/dir' + color: '0052cc' +- name: 'area/helm/dirctl' + color: '0052cc' +- name: 'area/docker' + color: '0052cc' + +### +# Community +### +- name: 'triage/duplicate' + color: 'c31fb5' + description: 'This issue or PR already exists.' +- name: 'triage/ready-for-review' + color: 'c31fb5' + description: 'This issue or PR is ready for review.' +- name: 'triage/blocked' + color: 'bfa808' + description: 'This issue or PR is blocked by another issue or PR.' +- name: 'triage/needs-information' + color: 'bfa808' + description: 'Indicates an issue or PR needs more information in order to work on it.' +- name: 'triage/not-reproducible' + color: 'bfa808' + description: 'Indicates an issue or PR can not be reproduced as described.' +- name: 'good first issue' + color: '7057ff' + description: 'Good for newcomers' diff --git a/.github/workflows/buf-ci.yaml b/.github/workflows/buf-ci.yaml index 1d4872a96..6a81fe21c 100644 --- a/.github/workflows/buf-ci.yaml +++ b/.github/workflows/buf-ci.yaml @@ -1,37 +1,37 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -name: Buf CI - -on: - push: - delete: - pull_request: - types: - - opened - - synchronize - - reopened - - labeled - - unlabeled - -permissions: - contents: read - pull-requests: write - -jobs: - verify-proto: - name: Verify Proto API - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Verify - uses: bufbuild/buf-action@8f4a1456a0ab6a1eb80ba68e53832e6fcfacc16c # v1.3.0 - with: - token: ${{ secrets.BUF_TOKEN }} - input: proto/ - format: false - lint: false +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +name: Buf CI + +on: + push: + delete: + pull_request: + types: + - opened + - synchronize + - reopened + - labeled + - unlabeled + +permissions: + contents: read + pull-requests: write + +jobs: + verify-proto: + name: Verify Proto API + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Verify + uses: bufbuild/buf-action@8f4a1456a0ab6a1eb80ba68e53832e6fcfacc16c # v1.3.0 + with: + token: ${{ secrets.BUF_TOKEN }} + input: proto/ + format: false + lint: false diff --git a/.github/workflows/build-feature.yaml b/.github/workflows/build-feature.yaml index 8627af910..4cc74ba87 100644 --- a/.github/workflows/build-feature.yaml +++ b/.github/workflows/build-feature.yaml @@ -1,155 +1,155 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -name: Build Feature Branch - -# This workflow allows building Docker images and Helm charts from feature branches -# for testing before creating a release. It can be manually triggered from any branch. -# -# Usage: -# 1. Push your feature branch to GitHub -# 2. Go to Actions → "Build Feature Branch" → "Run workflow" -# 3. Select your branch and provide custom tags (or leave empty for auto-generated) -# 4. Use the generated tags in agntcy-deployment for testing - -on: - workflow_dispatch: - inputs: - component: - required: false - type: string - description: "Component suffix for images/charts (e.g., 'dev' creates dir-apiserver-dev, dir-dev/helm-charts). Default: 'dev'" - default: "dev" - image_tag: - required: false - type: string - description: "Image tag (leave empty for auto: feat--)" - default: "" - chart_version: - required: false - type: string - description: "Chart version (leave empty for auto: 0.0.0-test-)" - default: "" - push_images: - required: false - type: boolean - default: true - description: "Push images to registry (if false, only builds charts)" - -permissions: - contents: read - packages: write - -jobs: - prepare: - name: Prepare Tags - runs-on: ubuntu-latest - outputs: - image_tag: ${{ steps.tags.outputs.image_tag }} - chart_version: ${{ steps.tags.outputs.chart_version }} - image_repo: ${{ steps.tags.outputs.image_repo }} - image_name_suffix: ${{ steps.tags.outputs.image_name_suffix }} - chart_path: ${{ steps.tags.outputs.chart_path }} - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Generate tags - id: tags - shell: bash - run: | - SHORT_SHA=$(git rev-parse --short HEAD) - BRANCH_NAME=$(echo "${{ github.ref_name }}" | sed 's/[^a-zA-Z0-9-]/-/g' | tr '[:upper:]' '[:lower:]') - COMPONENT="${{ inputs.component }}" - - if [ -z "${{ inputs.image_tag }}" ]; then - IMAGE_TAG="feat-${BRANCH_NAME}-${SHORT_SHA}" - else - IMAGE_TAG="${{ inputs.image_tag }}" - fi - - if [ -z "${{ inputs.chart_version }}" ]; then - CHART_VERSION="0.0.0-test-${SHORT_SHA}" - else - CHART_VERSION="${{ inputs.chart_version }}" - fi - - # Generate image repo and suffix for component separation - IMAGE_REPO="ghcr.io/agntcy" - if [ -n "${COMPONENT}" ]; then - IMAGE_NAME_SUFFIX="-${COMPONENT}" - CHART_PATH="dir-${COMPONENT}" - else - IMAGE_NAME_SUFFIX="" - CHART_PATH="dir" - fi - - echo "image_tag=${IMAGE_TAG}" >> "$GITHUB_OUTPUT" - echo "chart_version=${CHART_VERSION}" >> "$GITHUB_OUTPUT" - echo "image_repo=${IMAGE_REPO}" >> "$GITHUB_OUTPUT" - echo "image_name_suffix=${IMAGE_NAME_SUFFIX}" >> "$GITHUB_OUTPUT" - echo "chart_path=${CHART_PATH}" >> "$GITHUB_OUTPUT" - echo "Generated image tag: ${IMAGE_TAG}" - echo "Generated chart version: ${CHART_VERSION}" - echo "Image name suffix: ${IMAGE_NAME_SUFFIX}" - echo "Chart path: ${CHART_PATH}" - - build-images: - name: Build Images - if: ${{ inputs.push_images == true }} - needs: - - prepare - uses: ./.github/workflows/reusable-build.yaml - with: - image_repo: ${{ needs.prepare.outputs.image_repo }} - image_tag: ${{ needs.prepare.outputs.image_tag }} - image_name_suffix: ${{ needs.prepare.outputs.image_name_suffix }} - push: true - - build-charts: - name: Build Charts - needs: - - prepare - uses: ./.github/workflows/reusable-release-helm.yaml - with: - image_repo: ${{ needs.prepare.outputs.image_repo }} - release_tag: ${{ needs.prepare.outputs.chart_version }} - chart_path: ${{ needs.prepare.outputs.chart_path }} - - summary: - name: Summary - needs: - - prepare - - build-images - - build-charts - runs-on: ubuntu-latest - # Allow summary even if build-images was skipped (when push_images=false) - if: ${{ always() && needs.prepare.result == 'success' && needs.build-charts.result == 'success' }} - steps: - - name: Build Summary - run: | - echo "## Build Summary" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "**Image Tag:** \`${{ needs.prepare.outputs.image_tag }}\`" >> $GITHUB_STEP_SUMMARY - echo "**Chart Version:** \`${{ needs.prepare.outputs.chart_version }}\`" >> $GITHUB_STEP_SUMMARY - echo "**Component:** \`${{ inputs.component }}\`" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "### Usage in agntcy-deployment" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "**config.json:**" >> $GITHUB_STEP_SUMMARY - echo '```json' >> $GITHUB_STEP_SUMMARY - echo '{' >> $GITHUB_STEP_SUMMARY - echo ' "chart_repo": "ghcr.io",' >> $GITHUB_STEP_SUMMARY - echo ' "chart_name": "agntcy/${{ needs.prepare.outputs.chart_path }}/helm-charts/dir",' >> $GITHUB_STEP_SUMMARY - echo ' "chart_version": "${{ needs.prepare.outputs.chart_version }}"' >> $GITHUB_STEP_SUMMARY - echo '}' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "**values.yaml:**" >> $GITHUB_STEP_SUMMARY - echo '```yaml' >> $GITHUB_STEP_SUMMARY - echo 'image:' >> $GITHUB_STEP_SUMMARY - echo ' repository: ghcr.io/agntcy/dir-apiserver${{ needs.prepare.outputs.image_name_suffix }}' >> $GITHUB_STEP_SUMMARY - echo ' tag: ${{ needs.prepare.outputs.image_tag }}' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +name: Build Feature Branch + +# This workflow allows building Docker images and Helm charts from feature branches +# for testing before creating a release. It can be manually triggered from any branch. +# +# Usage: +# 1. Push your feature branch to GitHub +# 2. Go to Actions → "Build Feature Branch" → "Run workflow" +# 3. Select your branch and provide custom tags (or leave empty for auto-generated) +# 4. Use the generated tags in agntcy-deployment for testing + +on: + workflow_dispatch: + inputs: + component: + required: false + type: string + description: "Component suffix for images/charts (e.g., 'dev' creates dir-apiserver-dev, dir-dev/helm-charts). Default: 'dev'" + default: "dev" + image_tag: + required: false + type: string + description: "Image tag (leave empty for auto: feat--)" + default: "" + chart_version: + required: false + type: string + description: "Chart version (leave empty for auto: 0.0.0-test-)" + default: "" + push_images: + required: false + type: boolean + default: true + description: "Push images to registry (if false, only builds charts)" + +permissions: + contents: read + packages: write + +jobs: + prepare: + name: Prepare Tags + runs-on: ubuntu-latest + outputs: + image_tag: ${{ steps.tags.outputs.image_tag }} + chart_version: ${{ steps.tags.outputs.chart_version }} + image_repo: ${{ steps.tags.outputs.image_repo }} + image_name_suffix: ${{ steps.tags.outputs.image_name_suffix }} + chart_path: ${{ steps.tags.outputs.chart_path }} + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Generate tags + id: tags + shell: bash + run: | + SHORT_SHA=$(git rev-parse --short HEAD) + BRANCH_NAME=$(echo "${{ github.ref_name }}" | sed 's/[^a-zA-Z0-9-]/-/g' | tr '[:upper:]' '[:lower:]') + COMPONENT="${{ inputs.component }}" + + if [ -z "${{ inputs.image_tag }}" ]; then + IMAGE_TAG="feat-${BRANCH_NAME}-${SHORT_SHA}" + else + IMAGE_TAG="${{ inputs.image_tag }}" + fi + + if [ -z "${{ inputs.chart_version }}" ]; then + CHART_VERSION="0.0.0-test-${SHORT_SHA}" + else + CHART_VERSION="${{ inputs.chart_version }}" + fi + + # Generate image repo and suffix for component separation + IMAGE_REPO="ghcr.io/agntcy" + if [ -n "${COMPONENT}" ]; then + IMAGE_NAME_SUFFIX="-${COMPONENT}" + CHART_PATH="dir-${COMPONENT}" + else + IMAGE_NAME_SUFFIX="" + CHART_PATH="dir" + fi + + echo "image_tag=${IMAGE_TAG}" >> "$GITHUB_OUTPUT" + echo "chart_version=${CHART_VERSION}" >> "$GITHUB_OUTPUT" + echo "image_repo=${IMAGE_REPO}" >> "$GITHUB_OUTPUT" + echo "image_name_suffix=${IMAGE_NAME_SUFFIX}" >> "$GITHUB_OUTPUT" + echo "chart_path=${CHART_PATH}" >> "$GITHUB_OUTPUT" + echo "Generated image tag: ${IMAGE_TAG}" + echo "Generated chart version: ${CHART_VERSION}" + echo "Image name suffix: ${IMAGE_NAME_SUFFIX}" + echo "Chart path: ${CHART_PATH}" + + build-images: + name: Build Images + if: ${{ inputs.push_images == true }} + needs: + - prepare + uses: ./.github/workflows/reusable-build.yaml + with: + image_repo: ${{ needs.prepare.outputs.image_repo }} + image_tag: ${{ needs.prepare.outputs.image_tag }} + image_name_suffix: ${{ needs.prepare.outputs.image_name_suffix }} + push: true + + build-charts: + name: Build Charts + needs: + - prepare + uses: ./.github/workflows/reusable-release-helm.yaml + with: + image_repo: ${{ needs.prepare.outputs.image_repo }} + release_tag: ${{ needs.prepare.outputs.chart_version }} + chart_path: ${{ needs.prepare.outputs.chart_path }} + + summary: + name: Summary + needs: + - prepare + - build-images + - build-charts + runs-on: ubuntu-latest + # Allow summary even if build-images was skipped (when push_images=false) + if: ${{ always() && needs.prepare.result == 'success' && needs.build-charts.result == 'success' }} + steps: + - name: Build Summary + run: | + echo "## Build Summary" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**Image Tag:** \`${{ needs.prepare.outputs.image_tag }}\`" >> $GITHUB_STEP_SUMMARY + echo "**Chart Version:** \`${{ needs.prepare.outputs.chart_version }}\`" >> $GITHUB_STEP_SUMMARY + echo "**Component:** \`${{ inputs.component }}\`" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "### Usage in agntcy-deployment" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**config.json:**" >> $GITHUB_STEP_SUMMARY + echo '```json' >> $GITHUB_STEP_SUMMARY + echo '{' >> $GITHUB_STEP_SUMMARY + echo ' "chart_repo": "ghcr.io",' >> $GITHUB_STEP_SUMMARY + echo ' "chart_name": "agntcy/${{ needs.prepare.outputs.chart_path }}/helm-charts/dir",' >> $GITHUB_STEP_SUMMARY + echo ' "chart_version": "${{ needs.prepare.outputs.chart_version }}"' >> $GITHUB_STEP_SUMMARY + echo '}' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "**values.yaml:**" >> $GITHUB_STEP_SUMMARY + echo '```yaml' >> $GITHUB_STEP_SUMMARY + echo 'image:' >> $GITHUB_STEP_SUMMARY + echo ' repository: ghcr.io/agntcy/dir-apiserver${{ needs.prepare.outputs.image_name_suffix }}' >> $GITHUB_STEP_SUMMARY + echo ' tag: ${{ needs.prepare.outputs.image_tag }}' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 94abf5e45..b66fe986c 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -1,175 +1,175 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -name: CI - -on: - push: - branches: - - main - tags: - - "v*.*.*" - pull_request: - -permissions: - contents: write - packages: write - id-token: write - pull-requests: write - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - lint: - name: Lint - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Setup Go - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 - with: - go-version: "1.25.2" - - - name: Setup Taskfile - shell: bash - run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin - - - name: Setup lint cache - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 - with: - path: | - ~/.cache/golangci-lint - ~/.cache/go-build - key: lint-cache-${{ runner.os }}-${{ hashFiles('**/*.sum') }} - restore-keys: | - lint-cache-${{ runner.os }}- - - - name: Run linters - run: | - task lint - - license: - name: License - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Setup Taskfile - shell: bash - run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin - - - name: Setup license cache - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 - with: - path: | - **/.licensei.cache - key: license-cache-${{ runner.os }}-${{ hashFiles('**/go.sum') }} - restore-keys: | - license-cache-${{ runner.os }}- - - - name: License cache - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - task license:cache - - - name: License - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - task license - - verify: - name: Verify - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Setup Taskfile - shell: bash - run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin - - - name: Check auto-generated code - run: | - task gen - if [[ -n "$(git status --porcelain)" ]]; then - echo "There are uncommitted changes after running 'task gen'. Please commit these changes." - exit 1 - fi - - build: - name: Build - needs: - - lint - - license - - verify - uses: ./.github/workflows/reusable-build.yaml - with: - image_repo: ghcr.io/agntcy - image_tag: ${{ github.sha }} - build_coverage_images: true - - test: - name: Test - needs: - - build - uses: ./.github/workflows/reusable-test.yaml - with: - image_repo: ghcr.io/agntcy - image_tag: ${{ github.sha }} - secrets: - CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} - - release: - name: Release - if: ${{ startsWith(github.ref, 'refs/tags/') }} - needs: - - test - uses: ./.github/workflows/reusable-release.yaml - with: - image_repo: ghcr.io/agntcy - release_tag: ${{ github.ref_name }} - secrets: - PYPI_API_TOKEN: ${{ secrets.PYPI_API_TOKEN }} - NPMJS_TOKEN: ${{ secrets.NPMJS_TOKEN }} - - integration: - name: Run integration tests - runs-on: ubuntu-latest - needs: - - release - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Run tests - uses: ./.github/actions/trigger-integrations - with: - github-token: ${{ secrets.AGNTCY_BUILD_BOT_GH_TOKEN }} - - success: - name: Success - # https://github.com/actions/runner/issues/2566 - # https://github.com/actions/toolkit/issues/581 - if: ${{ !cancelled() && !contains(needs.*.result, 'cancelled') && !contains(needs.*.result, 'failure') }} - needs: - - test - - release - runs-on: ubuntu-latest - steps: - - name: Echo Success - run: echo "::notice Success!" +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +name: CI + +on: + push: + branches: + - main + tags: + - "v*.*.*" + pull_request: + +permissions: + contents: write + packages: write + id-token: write + pull-requests: write + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Setup Go + uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 + with: + go-version: "1.25.2" + + - name: Setup Taskfile + shell: bash + run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin + + - name: Setup lint cache + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + with: + path: | + ~/.cache/golangci-lint + ~/.cache/go-build + key: lint-cache-${{ runner.os }}-${{ hashFiles('**/*.sum') }} + restore-keys: | + lint-cache-${{ runner.os }}- + + - name: Run linters + run: | + task lint + + license: + name: License + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Setup Taskfile + shell: bash + run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin + + - name: Setup license cache + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + with: + path: | + **/.licensei.cache + key: license-cache-${{ runner.os }}-${{ hashFiles('**/go.sum') }} + restore-keys: | + license-cache-${{ runner.os }}- + + - name: License cache + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + task license:cache + + - name: License + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + task license + + verify: + name: Verify + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Setup Taskfile + shell: bash + run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin + + - name: Check auto-generated code + run: | + task gen + if [[ -n "$(git status --porcelain)" ]]; then + echo "There are uncommitted changes after running 'task gen'. Please commit these changes." + exit 1 + fi + + build: + name: Build + needs: + - lint + - license + - verify + uses: ./.github/workflows/reusable-build.yaml + with: + image_repo: ghcr.io/agntcy + image_tag: ${{ github.sha }} + build_coverage_images: true + + test: + name: Test + needs: + - build + uses: ./.github/workflows/reusable-test.yaml + with: + image_repo: ghcr.io/agntcy + image_tag: ${{ github.sha }} + secrets: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + + release: + name: Release + if: ${{ startsWith(github.ref, 'refs/tags/') }} + needs: + - test + uses: ./.github/workflows/reusable-release.yaml + with: + image_repo: ghcr.io/agntcy + release_tag: ${{ github.ref_name }} + secrets: + PYPI_API_TOKEN: ${{ secrets.PYPI_API_TOKEN }} + NPMJS_TOKEN: ${{ secrets.NPMJS_TOKEN }} + + integration: + name: Run integration tests + runs-on: ubuntu-latest + needs: + - release + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Run tests + uses: ./.github/actions/trigger-integrations + with: + github-token: ${{ secrets.AGNTCY_BUILD_BOT_GH_TOKEN }} + + success: + name: Success + # https://github.com/actions/runner/issues/2566 + # https://github.com/actions/toolkit/issues/581 + if: ${{ !cancelled() && !contains(needs.*.result, 'cancelled') && !contains(needs.*.result, 'failure') }} + needs: + - test + - release + runs-on: ubuntu-latest + steps: + - name: Echo Success + run: echo "::notice Success!" diff --git a/.github/workflows/clear-docker-cache.yaml b/.github/workflows/clear-docker-cache.yaml index 186edf822..9088e96f8 100644 --- a/.github/workflows/clear-docker-cache.yaml +++ b/.github/workflows/clear-docker-cache.yaml @@ -1,15 +1,15 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -name: Clear Docker Cache - -on: - workflow_dispatch: -jobs: - clear-docker-cache: - name: Clear Docker Cache - runs-on: ubuntu-latest - steps: - - name: Clear Github Action runner cache - run: | - gh cache delete --all --succeed-on-no-caches --repo "${{ github.repository }}" +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +name: Clear Docker Cache + +on: + workflow_dispatch: +jobs: + clear-docker-cache: + name: Clear Docker Cache + runs-on: ubuntu-latest + steps: + - name: Clear Github Action runner cache + run: | + gh cache delete --all --succeed-on-no-caches --repo "${{ github.repository }}" diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 563e1bea4..e1ac0828f 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -1,258 +1,258 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -name: "CodeQL Advanced" - -on: - push: - branches: [ "main" ] - pull_request: - branches: [ "main" ] - schedule: - - cron: '42 5 * * 6' - workflow_dispatch: - -jobs: - analyze: - name: Analyze (${{ matrix.language }}) - # Runner size impacts CodeQL analysis time. To learn more, please see: - # - https://gh.io/recommended-hardware-resources-for-running-codeql - # - https://gh.io/supported-runners-and-hardware-resources - # - https://gh.io/using-larger-runners (GitHub.com only) - # Consider using larger runners or machines with greater resources for possible analysis time improvements. - runs-on: ubuntu-latest - permissions: - # required for all workflows - security-events: write - - # required to fetch internal or private CodeQL packs - packages: read - - # only required for workflows in private repositories - actions: read - contents: read - - strategy: - fail-fast: false - matrix: - include: - # Note: Temporarily disabling 'actions' language due to recognition issue in CodeQL 2.23.3 - # - language: actions - # build-mode: none - - language: go - build-mode: autobuild - - language: javascript-typescript - build-mode: none - - language: python - build-mode: none - # CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'rust', 'swift' - # Use `c-cpp` to analyze code written in C, C++ or both - # Use 'java-kotlin' to analyze code written in Java, Kotlin or both - # Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both - # To learn more about changing the languages that are analyzed or customizing the build mode for your analysis, - # see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning. - # If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how - # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages - steps: - - name: Checkout repository - uses: actions/checkout@v4 # v4.2.2 - - - name: Log language being analyzed - run: | - echo "🔍 Starting CodeQL analysis for: ${{ matrix.language }}" - echo "Build mode: ${{ matrix.build-mode }}" - - # All setup steps MUST be performed before running the `github/codeql-action/init` action. - # This includes steps like installing compilers or runtimes (`actions/setup-node` - # or others). Installing tools after init can interfere with CodeQL analysis. - - - name: Set up Go - if: matrix.language == 'go' - uses: actions/setup-go@v5 - with: - go-version: '1.23.2' - cache: true - - - name: Install Task - if: matrix.language == 'go' - uses: arduino/setup-task@b91d5d2c96a56797b48ac1e0e89220bf64044611 # v2.0.0 - with: - version: 3.x - repo-token: ${{ secrets.GITHUB_TOKEN }} - - - name: Install buf CLI - if: matrix.language == 'go' - shell: bash - run: | - # Install buf for protobuf generation - curl -sSL "https://github.com/bufbuild/buf/releases/latest/download/buf-$(uname -s)-$(uname -m)" -o /tmp/buf - sudo mv /tmp/buf /usr/local/bin/buf - sudo chmod +x /usr/local/bin/buf - - - name: Setup Node.js - if: matrix.language == 'javascript-typescript' - uses: actions/setup-node@v4 - with: - node-version: '20' - cache: 'npm' - cache-dependency-path: '**/package*.json' - - - name: Set up Python - if: matrix.language == 'python' - uses: actions/setup-python@v4 - with: - python-version: '3.11' - cache: 'pip' - - - name: Install JavaScript/TypeScript dependencies - if: matrix.language == 'javascript-typescript' - shell: bash - run: | - echo "📦 Installing JavaScript/TypeScript dependencies..." - # Install dependencies for JavaScript SDK - if [ -f "sdk/dir-js/package.json" ]; then - cd sdk/dir-js - npm ci || npm install - cd ../.. - fi - # Install dependencies for examples - if [ -f "sdk/examples/example-js/package.json" ]; then - cd sdk/examples/example-js - npm ci || npm install - cd ../../.. - fi - echo "✅ JavaScript/TypeScript dependencies installed" - - - name: Install Python dependencies - if: matrix.language == 'python' - shell: bash - run: | - echo "📦 Installing Python dependencies..." - # Install dependencies for Python SDK - if [ -f "sdk/dir-py/pyproject.toml" ]; then - cd sdk/dir-py - pip install -e . || echo "Failed to install Python SDK" - cd ../.. - fi - # Install dependencies for examples - if [ -f "sdk/examples/example-py/requirements.txt" ]; then - cd sdk/examples/example-py - pip install -r requirements.txt || echo "Failed to install example requirements" - cd ../../.. - fi - echo "✅ Python dependencies installed" - - - name: Prepare Go environment for autobuild - if: matrix.language == 'go' - shell: bash - run: | - echo "Preparing Go environment for CodeQL autobuild..." - echo "Go version: $(go version)" - echo "GOPATH: $GOPATH" - echo "GOROOT: $GOROOT" - echo "Working directory: $(pwd)" - - # Make sure all go.mod files have their dependencies downloaded - echo "Pre-downloading Go module dependencies..." - for gomod in $(find . -name "go.mod" -not -path "./vendor/*" | head -10); do - module_dir=$(dirname "$gomod") - echo "Downloading deps for $module_dir" - (cd "$module_dir" && go mod download) || echo "Failed to download deps for $module_dir" - done - - echo "Go environment prepared for autobuild" - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7 - with: - languages: ${{ matrix.language }} - build-mode: ${{ matrix.build-mode }} - queries: +security-extended,security-and-quality - config: | - name: "CodeQL Config" - queries: - - uses: security-extended - - uses: security-and-quality - query-filters: - - exclude: - # Helm values files use empty strings as defaults - id: js/empty-password-in-configuration-file - paths-ignore: - - "**/*.pb.go" - - "**/mock_*.go" - - "**/*_pb2.py" - - "**/*_pb2_grpc.py" - - "**/*_pb.js" - - "**/*_pb.d.ts" - - "**/testdata/**" - - "**/vendor/**" - - - name: Run manual build steps - if: matrix.build-mode == 'manual' - shell: bash - run: | - if [ "${{ matrix.language }}" == "go" ]; then - echo "� Building Go project for CodeQL analysis (using proven local approach)..." - - # Step 1: Install project dependencies - echo "Building Go project for CodeQL analysis..." - - # Show environment for debugging - echo "=== Environment Debug ===" - echo "Go version: $(go version)" - echo "GOPATH: $GOPATH" - echo "GOROOT: $GOROOT" - echo "Working directory: $(pwd)" - echo "Go modules found:" - find . -name "go.mod" -not -path "./vendor/*" | head -10 - echo "=========================" - - # The simplest possible approach - just build everything - echo "Attempting to build all Go packages..." - - # Method 1: Single command to build everything - if go build ./...; then - echo "SUCCESS: Global go build ./... worked!" - else - echo "Global build failed, trying individual packages..." - - # Method 2: Build specific known packages - echo "Building known main packages..." - - # Find and build main packages - for main_pkg in server cli client; do - if [ -d "$main_pkg" ] && [ -f "$main_pkg/go.mod" ]; then - echo "Building $main_pkg..." - (cd "$main_pkg" && go build -v .) || echo "$main_pkg build failed" - fi - done - - - # Method 3: Build each module individually - echo "Building individual modules..." - for gomod in $(find . -name "go.mod" -not -path "./vendor/*" | head -10); do - module_dir=$(dirname "$gomod") - echo "Building module: $module_dir" - - (cd "$module_dir" && { - # Try to build all packages in this module - if go build -v ./...; then - echo " SUCCESS: Built $module_dir" - else - echo " FAILED: Could not build $module_dir" - fi - }) || echo "Error building $module_dir" - done - fi - - echo "Build attempt completed." - else - echo "❌ Manual build mode not supported for language: ${{ matrix.language }}" - exit 1 - fi - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7 - with: - category: "/language:${{matrix.language}}" +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +name: "CodeQL Advanced" + +on: + push: + branches: [ "main" ] + pull_request: + branches: [ "main" ] + schedule: + - cron: '42 5 * * 6' + workflow_dispatch: + +jobs: + analyze: + name: Analyze (${{ matrix.language }}) + # Runner size impacts CodeQL analysis time. To learn more, please see: + # - https://gh.io/recommended-hardware-resources-for-running-codeql + # - https://gh.io/supported-runners-and-hardware-resources + # - https://gh.io/using-larger-runners (GitHub.com only) + # Consider using larger runners or machines with greater resources for possible analysis time improvements. + runs-on: ubuntu-latest + permissions: + # required for all workflows + security-events: write + + # required to fetch internal or private CodeQL packs + packages: read + + # only required for workflows in private repositories + actions: read + contents: read + + strategy: + fail-fast: false + matrix: + include: + # Note: Temporarily disabling 'actions' language due to recognition issue in CodeQL 2.23.3 + # - language: actions + # build-mode: none + - language: go + build-mode: autobuild + - language: javascript-typescript + build-mode: none + - language: python + build-mode: none + # CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'rust', 'swift' + # Use `c-cpp` to analyze code written in C, C++ or both + # Use 'java-kotlin' to analyze code written in Java, Kotlin or both + # Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both + # To learn more about changing the languages that are analyzed or customizing the build mode for your analysis, + # see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning. + # If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how + # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages + steps: + - name: Checkout repository + uses: actions/checkout@v4 # v4.2.2 + + - name: Log language being analyzed + run: | + echo "🔍 Starting CodeQL analysis for: ${{ matrix.language }}" + echo "Build mode: ${{ matrix.build-mode }}" + + # All setup steps MUST be performed before running the `github/codeql-action/init` action. + # This includes steps like installing compilers or runtimes (`actions/setup-node` + # or others). Installing tools after init can interfere with CodeQL analysis. + + - name: Set up Go + if: matrix.language == 'go' + uses: actions/setup-go@v5 + with: + go-version: '1.23.2' + cache: true + + - name: Install Task + if: matrix.language == 'go' + uses: arduino/setup-task@b91d5d2c96a56797b48ac1e0e89220bf64044611 # v2.0.0 + with: + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Install buf CLI + if: matrix.language == 'go' + shell: bash + run: | + # Install buf for protobuf generation + curl -sSL "https://github.com/bufbuild/buf/releases/latest/download/buf-$(uname -s)-$(uname -m)" -o /tmp/buf + sudo mv /tmp/buf /usr/local/bin/buf + sudo chmod +x /usr/local/bin/buf + + - name: Setup Node.js + if: matrix.language == 'javascript-typescript' + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'npm' + cache-dependency-path: '**/package*.json' + + - name: Set up Python + if: matrix.language == 'python' + uses: actions/setup-python@v4 + with: + python-version: '3.11' + cache: 'pip' + + - name: Install JavaScript/TypeScript dependencies + if: matrix.language == 'javascript-typescript' + shell: bash + run: | + echo "📦 Installing JavaScript/TypeScript dependencies..." + # Install dependencies for JavaScript SDK + if [ -f "sdk/dir-js/package.json" ]; then + cd sdk/dir-js + npm ci || npm install + cd ../.. + fi + # Install dependencies for examples + if [ -f "sdk/examples/example-js/package.json" ]; then + cd sdk/examples/example-js + npm ci || npm install + cd ../../.. + fi + echo "✅ JavaScript/TypeScript dependencies installed" + + - name: Install Python dependencies + if: matrix.language == 'python' + shell: bash + run: | + echo "📦 Installing Python dependencies..." + # Install dependencies for Python SDK + if [ -f "sdk/dir-py/pyproject.toml" ]; then + cd sdk/dir-py + pip install -e . || echo "Failed to install Python SDK" + cd ../.. + fi + # Install dependencies for examples + if [ -f "sdk/examples/example-py/requirements.txt" ]; then + cd sdk/examples/example-py + pip install -r requirements.txt || echo "Failed to install example requirements" + cd ../../.. + fi + echo "✅ Python dependencies installed" + + - name: Prepare Go environment for autobuild + if: matrix.language == 'go' + shell: bash + run: | + echo "Preparing Go environment for CodeQL autobuild..." + echo "Go version: $(go version)" + echo "GOPATH: $GOPATH" + echo "GOROOT: $GOROOT" + echo "Working directory: $(pwd)" + + # Make sure all go.mod files have their dependencies downloaded + echo "Pre-downloading Go module dependencies..." + for gomod in $(find . -name "go.mod" -not -path "./vendor/*" | head -10); do + module_dir=$(dirname "$gomod") + echo "Downloading deps for $module_dir" + (cd "$module_dir" && go mod download) || echo "Failed to download deps for $module_dir" + done + + echo "Go environment prepared for autobuild" + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7 + with: + languages: ${{ matrix.language }} + build-mode: ${{ matrix.build-mode }} + queries: +security-extended,security-and-quality + config: | + name: "CodeQL Config" + queries: + - uses: security-extended + - uses: security-and-quality + query-filters: + - exclude: + # Helm values files use empty strings as defaults + id: js/empty-password-in-configuration-file + paths-ignore: + - "**/*.pb.go" + - "**/mock_*.go" + - "**/*_pb2.py" + - "**/*_pb2_grpc.py" + - "**/*_pb.js" + - "**/*_pb.d.ts" + - "**/testdata/**" + - "**/vendor/**" + + - name: Run manual build steps + if: matrix.build-mode == 'manual' + shell: bash + run: | + if [ "${{ matrix.language }}" == "go" ]; then + echo "� Building Go project for CodeQL analysis (using proven local approach)..." + + # Step 1: Install project dependencies + echo "Building Go project for CodeQL analysis..." + + # Show environment for debugging + echo "=== Environment Debug ===" + echo "Go version: $(go version)" + echo "GOPATH: $GOPATH" + echo "GOROOT: $GOROOT" + echo "Working directory: $(pwd)" + echo "Go modules found:" + find . -name "go.mod" -not -path "./vendor/*" | head -10 + echo "=========================" + + # The simplest possible approach - just build everything + echo "Attempting to build all Go packages..." + + # Method 1: Single command to build everything + if go build ./...; then + echo "SUCCESS: Global go build ./... worked!" + else + echo "Global build failed, trying individual packages..." + + # Method 2: Build specific known packages + echo "Building known main packages..." + + # Find and build main packages + for main_pkg in server cli client; do + if [ -d "$main_pkg" ] && [ -f "$main_pkg/go.mod" ]; then + echo "Building $main_pkg..." + (cd "$main_pkg" && go build -v .) || echo "$main_pkg build failed" + fi + done + + + # Method 3: Build each module individually + echo "Building individual modules..." + for gomod in $(find . -name "go.mod" -not -path "./vendor/*" | head -10); do + module_dir=$(dirname "$gomod") + echo "Building module: $module_dir" + + (cd "$module_dir" && { + # Try to build all packages in this module + if go build -v ./...; then + echo " SUCCESS: Built $module_dir" + else + echo " FAILED: Could not build $module_dir" + fi + }) || echo "Error building $module_dir" + done + fi + + echo "Build attempt completed." + else + echo "❌ Manual build mode not supported for language: ${{ matrix.language }}" + exit 1 + fi + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/container-security-scan.yml b/.github/workflows/container-security-scan.yml index 1db963bb4..ce0ecd37e 100644 --- a/.github/workflows/container-security-scan.yml +++ b/.github/workflows/container-security-scan.yml @@ -1,141 +1,141 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 -name: Container Security Scan - -on: - # Nightly scan on main branch only; manual dispatch allowed. - schedule: - - cron: "0 3 * * *" # Daily at 03:00 UTC (runs on default branch context: main) - workflow_dispatch: - -permissions: - contents: read - security-events: write # for uploading SARIF - actions: read - issues: write # create issues for critical CVEs - -jobs: - get-version: - name: Get Latest Release Version - runs-on: ubuntu-latest - outputs: - version: ${{ steps.get-release.outputs.version }} - steps: - - name: Get latest release - id: get-release - run: | - LATEST_TAG=$(curl -s https://api.github.com/repos/${{ github.repository }}/releases/latest | jq -r .tag_name) - echo "version=${LATEST_TAG}" >> $GITHUB_OUTPUT - echo "Latest release version: ${LATEST_TAG}" - - trivy-scan: - name: Trivy Image Scan (Pull from GHCR) - runs-on: ubuntu-latest - needs: get-version - strategy: - fail-fast: false - matrix: - include: - - image: dir-apiserver - repo: ghcr.io/${{ github.repository_owner }}/dir-apiserver - version: ${{ needs.get-version.outputs.version }} - - image: dir-ctl - repo: ghcr.io/${{ github.repository_owner }}/dir-ctl - version: ${{ needs.get-version.outputs.version }} - - image: zot - repo: ghcr.io/project-zot/zot - version: v2.1.11 - - image: spire-server - repo: ghcr.io/spiffe/spire-server - version: 1.13.3 - - image: spire-agent - repo: ghcr.io/spiffe/spire-agent - version: 1.13.3 - steps: - - name: Checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - - - name: Log in to GHCR - uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 - with: - registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Pull image (explicit version) - run: | - set -euo pipefail - IMAGE_REF="${{ matrix.repo }}:${{ matrix.version }}" - echo "Pulling $IMAGE_REF" - docker pull "$IMAGE_REF" - docker image inspect "$IMAGE_REF" >/dev/null 2>&1 - - - name: Run Trivy vulnerability scan (image) - uses: aquasecurity/trivy-action@9ab158e8597f3b310480b9a69402b419bc03dbd5 # v0.24.0 - with: - image-ref: ${{ matrix.repo }}:${{ matrix.version }} - format: sarif - output: trivy-${{ matrix.image }}.sarif - vuln-type: "os,library" - severity: "CRITICAL,HIGH,MEDIUM" - ignore-unfixed: true - - - name: Upload SARIF - uses: github/codeql-action/upload-sarif@1b1aada464948af03b950897e5eb522f92603cc2 # v3.26.9 - with: - sarif_file: trivy-${{ matrix.image }}.sarif - # Distinguish reports per container image in Code Scanning UI - category: trivy-${{ matrix.image }} - - - name: Upload raw report artifact - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 - with: - name: trivy-report-${{ matrix.image }} - path: trivy-${{ matrix.image }}.sarif - retention-days: 7 - - summarize: - name: Summarize Results - needs: [trivy-scan] - runs-on: ubuntu-latest - if: always() - steps: - - name: Checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - - - name: Download artifacts - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - with: - path: trivy-artifacts - - - name: Debug artifact contents - run: | - echo "Downloaded artifact directory tree:" >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - find trivy-artifacts -maxdepth 3 -type f -print >> $GITHUB_STEP_SUMMARY || true - echo '```' >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - - - name: Generate summary - run: | - chmod +x .github/workflows/scripts/security/generate_trivy_summary.sh - .github/workflows/scripts/security/generate_trivy_summary.sh - - - name: Fail if critical vulns found (optional gate) - if: ${{ github.event_name != 'pull_request' }} - run: | - set -e - found=$(grep -R "CRITICAL" -c trivy-artifacts || true) - if [ "${found}" != "0" ]; then - echo "Critical vulnerabilities detected. (Gate currently informational.)" >&2 - fi - - name: Create GitHub issues for critical CVEs - if: ${{ github.event_name != 'pull_request' }} - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - GITHUB_REPOSITORY: ${{ github.repository }} - run: | - set -euo pipefail - echo "Installing dependencies for issue creation script"; - npm init -y >/dev/null 2>&1 || true - npm install @octokit/rest@21 glob >/dev/null 2>&1 - node .github/workflows/scripts/security/create_critical_cve_issues.js +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 +name: Container Security Scan + +on: + # Nightly scan on main branch only; manual dispatch allowed. + schedule: + - cron: "0 3 * * *" # Daily at 03:00 UTC (runs on default branch context: main) + workflow_dispatch: + +permissions: + contents: read + security-events: write # for uploading SARIF + actions: read + issues: write # create issues for critical CVEs + +jobs: + get-version: + name: Get Latest Release Version + runs-on: ubuntu-latest + outputs: + version: ${{ steps.get-release.outputs.version }} + steps: + - name: Get latest release + id: get-release + run: | + LATEST_TAG=$(curl -s https://api.github.com/repos/${{ github.repository }}/releases/latest | jq -r .tag_name) + echo "version=${LATEST_TAG}" >> $GITHUB_OUTPUT + echo "Latest release version: ${LATEST_TAG}" + + trivy-scan: + name: Trivy Image Scan (Pull from GHCR) + runs-on: ubuntu-latest + needs: get-version + strategy: + fail-fast: false + matrix: + include: + - image: dir-apiserver + repo: ghcr.io/${{ github.repository_owner }}/dir-apiserver + version: ${{ needs.get-version.outputs.version }} + - image: dir-ctl + repo: ghcr.io/${{ github.repository_owner }}/dir-ctl + version: ${{ needs.get-version.outputs.version }} + - image: zot + repo: ghcr.io/project-zot/zot + version: v2.1.11 + - image: spire-server + repo: ghcr.io/spiffe/spire-server + version: 1.13.3 + - image: spire-agent + repo: ghcr.io/spiffe/spire-agent + version: 1.13.3 + steps: + - name: Checkout + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + + - name: Log in to GHCR + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Pull image (explicit version) + run: | + set -euo pipefail + IMAGE_REF="${{ matrix.repo }}:${{ matrix.version }}" + echo "Pulling $IMAGE_REF" + docker pull "$IMAGE_REF" + docker image inspect "$IMAGE_REF" >/dev/null 2>&1 + + - name: Run Trivy vulnerability scan (image) + uses: aquasecurity/trivy-action@9ab158e8597f3b310480b9a69402b419bc03dbd5 # v0.24.0 + with: + image-ref: ${{ matrix.repo }}:${{ matrix.version }} + format: sarif + output: trivy-${{ matrix.image }}.sarif + vuln-type: "os,library" + severity: "CRITICAL,HIGH,MEDIUM" + ignore-unfixed: true + + - name: Upload SARIF + uses: github/codeql-action/upload-sarif@1b1aada464948af03b950897e5eb522f92603cc2 # v3.26.9 + with: + sarif_file: trivy-${{ matrix.image }}.sarif + # Distinguish reports per container image in Code Scanning UI + category: trivy-${{ matrix.image }} + + - name: Upload raw report artifact + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 + with: + name: trivy-report-${{ matrix.image }} + path: trivy-${{ matrix.image }}.sarif + retention-days: 7 + + summarize: + name: Summarize Results + needs: [trivy-scan] + runs-on: ubuntu-latest + if: always() + steps: + - name: Checkout + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + + - name: Download artifacts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + path: trivy-artifacts + + - name: Debug artifact contents + run: | + echo "Downloaded artifact directory tree:" >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + find trivy-artifacts -maxdepth 3 -type f -print >> $GITHUB_STEP_SUMMARY || true + echo '```' >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + + - name: Generate summary + run: | + chmod +x .github/workflows/scripts/security/generate_trivy_summary.sh + .github/workflows/scripts/security/generate_trivy_summary.sh + + - name: Fail if critical vulns found (optional gate) + if: ${{ github.event_name != 'pull_request' }} + run: | + set -e + found=$(grep -R "CRITICAL" -c trivy-artifacts || true) + if [ "${found}" != "0" ]; then + echo "Critical vulnerabilities detected. (Gate currently informational.)" >&2 + fi + - name: Create GitHub issues for critical CVEs + if: ${{ github.event_name != 'pull_request' }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_REPOSITORY: ${{ github.repository }} + run: | + set -euo pipefail + echo "Installing dependencies for issue creation script"; + npm init -y >/dev/null 2>&1 || true + npm install @octokit/rest@21 glob >/dev/null 2>&1 + node .github/workflows/scripts/security/create_critical_cve_issues.js diff --git a/.github/workflows/demo-dir.yaml b/.github/workflows/demo-dir.yaml index 08f0130a4..cfb41a5a1 100644 --- a/.github/workflows/demo-dir.yaml +++ b/.github/workflows/demo-dir.yaml @@ -1,274 +1,274 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -name: Demo - DIR - -# This workflow demonstrates how to use the Dir CLI with a Kind cluster and Helm chart. -# It can be run in either local mode or network mode. -# It includes steps for building, pushing, publishing, listing and pulling records. - -on: - workflow_dispatch: - inputs: - git-ref: - required: true - type: string - default: "main" - description: "Git branch, tag, or commit hash to build from" - network: - required: true - type: boolean - default: false - description: "Deploy in network mode" - -permissions: - id-token: write - contents: read - -jobs: - demo: - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - ref: ${{ inputs.git-ref }} - - - name: Setup Go - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 - with: - go-version: "1.25.2" - - - name: Setup Taskfile - shell: bash - run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin - - - name: Build Dirctl - run: | - # Generate API code and compile CLI - task cli:compile - ls -la bin/ - - - name: Build Docker Images - run: | - # Build Docker images from source using commit SHA as tag - COMMIT_SHA=$(git rev-parse --short HEAD) - IMAGE_TAG=${COMMIT_SHA} task build - - - name: Create k8s Kind Cluster - uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v.1.12.0 - with: - version: v0.24.0 - cluster_name: dir-demo - install_only: true - - - name: Deploy Dir - run: | - COMMIT_SHA=$(git rev-parse --short HEAD) - if [ ${{ inputs.network }} = "true" ]; then - echo "Using network mode" - IMAGE_TAG=${COMMIT_SHA} task deploy:network - task deploy:network:port-forward - else - echo "Using local mode" - IMAGE_TAG=${COMMIT_SHA} task deploy:local - task deploy:local:port-forward - fi - - - name: Run push command - run: | - if [ "${{ inputs.network }}" = "true" ]; then - echo "Running dir push command on Peer 1" - bin/dirctl push e2e/shared/testdata/record_070.json --server-addr 127.0.0.1:8890 > digest.txt - else - echo "Running dir push command" - bin/dirctl push e2e/shared/testdata/record_070.json > digest.txt - fi - echo "Pushed image digest:" - cat digest.txt - - - name: Run publish command - run: | - - if [ "${{ inputs.network }}" = "true" ]; then - echo "Running dir publish command on Peer 1" - bin/dirctl publish $(cat digest.txt) --server-addr 127.0.0.1:8890 - echo "Waiting for publish to complete" - sleep 120 - else - echo "Running dir publish command" - bin/dirctl publish $(cat digest.txt) - fi - - - name: Run list info command - continue-on-error: true - run: | - if [ "${{ inputs.network }}" = "true" ]; then - echo "Running dir list info command on Peer 1" - bin/dirctl list info --server-addr 127.0.0.1:8890 - echo "Running dir list info command on Peer 2" - bin/dirctl list info --server-addr 127.0.0.1:8891 - echo "Running dir list info command on Peer 3" - bin/dirctl list info --server-addr 127.0.0.1:8892 - else - echo "Running dir list info command" - bin/dirctl list info - fi - - - name: Run list search by skill command - continue-on-error: true - run: | - if [ "${{ inputs.network }}" = "true" ]; then - echo "Running dir list search by skill command on Peer 1" - bin/dirctl list "/skills/Natural Language Processing" --server-addr 127.0.0.1:8890 - echo "Running dir list search by skill command on Peer 2" - bin/dirctl list "/skills/Natural Language Processing" --server-addr 127.0.0.1:8891 - echo "Running dir list search by skill command on Peer 3" - bin/dirctl list "/skills/Natural Language Processing" --server-addr 127.0.0.1:8892 - else - echo "Running dir list search by skill command" - bin/dirctl list "/skills/Natural Language Processing" - fi - - - name: Run pull command - run: | - if [ "${{ inputs.network }}" = "true" ]; then - echo "Running dir pull command on Peer 1" - bin/dirctl pull $(cat digest.txt) --server-addr 127.0.0.1:8890 - else - echo "Running dir pull command" - bin/dirctl pull $(cat digest.txt) - fi - - - name: Clean up - run: | - echo "Cleaning up" - if [ "${{ inputs.network }}" = "true" ]; then - task deploy:network:port-forward:cleanup - task deploy:network:cleanup - else - task deploy:local:port-forward:cleanup - task deploy:local:cleanup - fi - - sign-and-verify-with-oidc: - # This job demonstrates how to sign and verify a record using OIDC tokens. - # It requires the `id-token` permission to generate OIDC tokens. - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - ref: ${{ inputs.git-ref }} - - - name: Setup Go - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 - with: - go-version: "1.25.2" - - - name: Setup Taskfile - shell: bash - run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin - - - name: Build Dirctl - run: | - task cli:compile - - - name: Install Cosign - uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0 - - - name: Get Github OIDC token - id: oidc-token - run: | - OIDC_TOKEN=$(curl -s -H "Authorization: bearer $ACTIONS_ID_TOKEN_REQUEST_TOKEN" \ - "$ACTIONS_ID_TOKEN_REQUEST_URL&audience=sigstore" | jq -r '.value') - echo "::add-mask::$OIDC_TOKEN" - echo "token=$OIDC_TOKEN" >> $GITHUB_OUTPUT - - - name: Build Docker Images - run: | - COMMIT_SHA=$(git rev-parse --short HEAD) - IMAGE_TAG=${COMMIT_SHA} task build - - - name: Deploy Dir - run: | - echo "Deploying Dir" - COMMIT_SHA=$(git rev-parse --short HEAD) - IMAGE_TAG=${COMMIT_SHA} task deploy:local - task deploy:local:port-forward - - - name: Push record - run: | - echo "Pushing record" - bin/dirctl push e2e/shared/testdata/record_070.json > digest.txt - - - name: Sign record - run: | - echo "Running dir sign command" - bin/dirctl sign $(cat digest.txt) \ - --oidc-token ${{ steps.oidc-token.outputs.token }} \ - --oidc-provider-url "https://token.actions.githubusercontent.com" \ - --oidc-client-id "https://github.com/${{ github.repository }}/.github/workflows/demo.yaml@${{ github.ref }}" - - - name: Run verify command - run: | - echo "Running dir verify command" - bin/dirctl verify $(cat digest.txt) - - sign-and-verify-with-key: - # This job demonstrates how to sign and verify a record using Cosign keys. - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - ref: ${{ inputs.git-ref }} - - - name: Setup Go - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 - with: - go-version: "1.25.2" - - - name: Setup Taskfile - shell: bash - run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin - - - name: Build Dirctl - run: | - task cli:compile - - - name: Install Cosign - uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0 - - - name: Generate Cosign keys - run: | - echo "Generating Cosign keys" - cosign generate-key-pair - echo "Cosign keys generated successfully" - - - name: Build Docker Images - run: | - COMMIT_SHA=$(git rev-parse --short HEAD) - IMAGE_TAG=${COMMIT_SHA} task build - - - name: Deploy Dir - run: | - echo "Deploying Dir" - COMMIT_SHA=$(git rev-parse --short HEAD) - IMAGE_TAG=${COMMIT_SHA} task deploy:local - task deploy:local:port-forward - - - name: Push record - run: | - echo "Pushing record" - bin/dirctl push e2e/shared/testdata/record_070.json > digest.txt - - - name: Sign record - run: | - echo "Running dir sign command" - bin/dirctl sign $(cat digest.txt) --key cosign.key - - - name: Verify record - run: | - echo "Running dir verify command" - bin/dirctl verify $(cat digest.txt) +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +name: Demo - DIR + +# This workflow demonstrates how to use the Dir CLI with a Kind cluster and Helm chart. +# It can be run in either local mode or network mode. +# It includes steps for building, pushing, publishing, listing and pulling records. + +on: + workflow_dispatch: + inputs: + git-ref: + required: true + type: string + default: "main" + description: "Git branch, tag, or commit hash to build from" + network: + required: true + type: boolean + default: false + description: "Deploy in network mode" + +permissions: + id-token: write + contents: read + +jobs: + demo: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + ref: ${{ inputs.git-ref }} + + - name: Setup Go + uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 + with: + go-version: "1.25.2" + + - name: Setup Taskfile + shell: bash + run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin + + - name: Build Dirctl + run: | + # Generate API code and compile CLI + task cli:compile + ls -la bin/ + + - name: Build Docker Images + run: | + # Build Docker images from source using commit SHA as tag + COMMIT_SHA=$(git rev-parse --short HEAD) + IMAGE_TAG=${COMMIT_SHA} task build + + - name: Create k8s Kind Cluster + uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v.1.12.0 + with: + version: v0.24.0 + cluster_name: dir-demo + install_only: true + + - name: Deploy Dir + run: | + COMMIT_SHA=$(git rev-parse --short HEAD) + if [ ${{ inputs.network }} = "true" ]; then + echo "Using network mode" + IMAGE_TAG=${COMMIT_SHA} task deploy:network + task deploy:network:port-forward + else + echo "Using local mode" + IMAGE_TAG=${COMMIT_SHA} task deploy:local + task deploy:local:port-forward + fi + + - name: Run push command + run: | + if [ "${{ inputs.network }}" = "true" ]; then + echo "Running dir push command on Peer 1" + bin/dirctl push e2e/shared/testdata/record_070.json --server-addr 127.0.0.1:8890 > digest.txt + else + echo "Running dir push command" + bin/dirctl push e2e/shared/testdata/record_070.json > digest.txt + fi + echo "Pushed image digest:" + cat digest.txt + + - name: Run publish command + run: | + + if [ "${{ inputs.network }}" = "true" ]; then + echo "Running dir publish command on Peer 1" + bin/dirctl publish $(cat digest.txt) --server-addr 127.0.0.1:8890 + echo "Waiting for publish to complete" + sleep 120 + else + echo "Running dir publish command" + bin/dirctl publish $(cat digest.txt) + fi + + - name: Run list info command + continue-on-error: true + run: | + if [ "${{ inputs.network }}" = "true" ]; then + echo "Running dir list info command on Peer 1" + bin/dirctl list info --server-addr 127.0.0.1:8890 + echo "Running dir list info command on Peer 2" + bin/dirctl list info --server-addr 127.0.0.1:8891 + echo "Running dir list info command on Peer 3" + bin/dirctl list info --server-addr 127.0.0.1:8892 + else + echo "Running dir list info command" + bin/dirctl list info + fi + + - name: Run list search by skill command + continue-on-error: true + run: | + if [ "${{ inputs.network }}" = "true" ]; then + echo "Running dir list search by skill command on Peer 1" + bin/dirctl list "/skills/Natural Language Processing" --server-addr 127.0.0.1:8890 + echo "Running dir list search by skill command on Peer 2" + bin/dirctl list "/skills/Natural Language Processing" --server-addr 127.0.0.1:8891 + echo "Running dir list search by skill command on Peer 3" + bin/dirctl list "/skills/Natural Language Processing" --server-addr 127.0.0.1:8892 + else + echo "Running dir list search by skill command" + bin/dirctl list "/skills/Natural Language Processing" + fi + + - name: Run pull command + run: | + if [ "${{ inputs.network }}" = "true" ]; then + echo "Running dir pull command on Peer 1" + bin/dirctl pull $(cat digest.txt) --server-addr 127.0.0.1:8890 + else + echo "Running dir pull command" + bin/dirctl pull $(cat digest.txt) + fi + + - name: Clean up + run: | + echo "Cleaning up" + if [ "${{ inputs.network }}" = "true" ]; then + task deploy:network:port-forward:cleanup + task deploy:network:cleanup + else + task deploy:local:port-forward:cleanup + task deploy:local:cleanup + fi + + sign-and-verify-with-oidc: + # This job demonstrates how to sign and verify a record using OIDC tokens. + # It requires the `id-token` permission to generate OIDC tokens. + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + ref: ${{ inputs.git-ref }} + + - name: Setup Go + uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 + with: + go-version: "1.25.2" + + - name: Setup Taskfile + shell: bash + run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin + + - name: Build Dirctl + run: | + task cli:compile + + - name: Install Cosign + uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0 + + - name: Get Github OIDC token + id: oidc-token + run: | + OIDC_TOKEN=$(curl -s -H "Authorization: bearer $ACTIONS_ID_TOKEN_REQUEST_TOKEN" \ + "$ACTIONS_ID_TOKEN_REQUEST_URL&audience=sigstore" | jq -r '.value') + echo "::add-mask::$OIDC_TOKEN" + echo "token=$OIDC_TOKEN" >> $GITHUB_OUTPUT + + - name: Build Docker Images + run: | + COMMIT_SHA=$(git rev-parse --short HEAD) + IMAGE_TAG=${COMMIT_SHA} task build + + - name: Deploy Dir + run: | + echo "Deploying Dir" + COMMIT_SHA=$(git rev-parse --short HEAD) + IMAGE_TAG=${COMMIT_SHA} task deploy:local + task deploy:local:port-forward + + - name: Push record + run: | + echo "Pushing record" + bin/dirctl push e2e/shared/testdata/record_070.json > digest.txt + + - name: Sign record + run: | + echo "Running dir sign command" + bin/dirctl sign $(cat digest.txt) \ + --oidc-token ${{ steps.oidc-token.outputs.token }} \ + --oidc-provider-url "https://token.actions.githubusercontent.com" \ + --oidc-client-id "https://github.com/${{ github.repository }}/.github/workflows/demo.yaml@${{ github.ref }}" + + - name: Run verify command + run: | + echo "Running dir verify command" + bin/dirctl verify $(cat digest.txt) + + sign-and-verify-with-key: + # This job demonstrates how to sign and verify a record using Cosign keys. + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + ref: ${{ inputs.git-ref }} + + - name: Setup Go + uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 + with: + go-version: "1.25.2" + + - name: Setup Taskfile + shell: bash + run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin + + - name: Build Dirctl + run: | + task cli:compile + + - name: Install Cosign + uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0 + + - name: Generate Cosign keys + run: | + echo "Generating Cosign keys" + cosign generate-key-pair + echo "Cosign keys generated successfully" + + - name: Build Docker Images + run: | + COMMIT_SHA=$(git rev-parse --short HEAD) + IMAGE_TAG=${COMMIT_SHA} task build + + - name: Deploy Dir + run: | + echo "Deploying Dir" + COMMIT_SHA=$(git rev-parse --short HEAD) + IMAGE_TAG=${COMMIT_SHA} task deploy:local + task deploy:local:port-forward + + - name: Push record + run: | + echo "Pushing record" + bin/dirctl push e2e/shared/testdata/record_070.json > digest.txt + + - name: Sign record + run: | + echo "Running dir sign command" + bin/dirctl sign $(cat digest.txt) --key cosign.key + + - name: Verify record + run: | + echo "Running dir verify command" + bin/dirctl verify $(cat digest.txt) diff --git a/.github/workflows/demo-dirctl.yaml b/.github/workflows/demo-dirctl.yaml index fdb855ebb..1cddf6951 100644 --- a/.github/workflows/demo-dirctl.yaml +++ b/.github/workflows/demo-dirctl.yaml @@ -1,28 +1,28 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -name: Demo - DIRCTL - -# This workflow demonstrates how to use the Dir CLI in GitHub Actions. - -on: - workflow_dispatch: - -jobs: - demo: - runs-on: ubuntu-latest - steps: - - name: Setup dirctl - uses: agntcy/dir/.github/actions/setup-dirctl@main - - - name: Check dirctl version - run: | - echo "Running version command" - dirctl version - - # Continue developer flows such as: - # - Push - # - Verify - # - Publish - # - Search - # - Discover +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +name: Demo - DIRCTL + +# This workflow demonstrates how to use the Dir CLI in GitHub Actions. + +on: + workflow_dispatch: + +jobs: + demo: + runs-on: ubuntu-latest + steps: + - name: Setup dirctl + uses: agntcy/dir/.github/actions/setup-dirctl@main + + - name: Check dirctl version + run: | + echo "Running version command" + dirctl version + + # Continue developer flows such as: + # - Push + # - Verify + # - Publish + # - Search + # - Discover diff --git a/.github/workflows/lint-pr-title.yaml b/.github/workflows/lint-pr-title.yaml index 9eb05e2ed..9e683807c 100644 --- a/.github/workflows/lint-pr-title.yaml +++ b/.github/workflows/lint-pr-title.yaml @@ -1,67 +1,67 @@ -name: Lint and comment PR Title - -on: - workflow_call: - -permissions: - contents: read - pull-requests: write - -jobs: - validate: - name: Validate - runs-on: ubuntu-latest - steps: - - uses: amannn/action-semantic-pull-request@48f256284bd46cdaab1048c3721360e808335d50 # v6.1.1 - id: lint_pr_title - env: - GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" - with: - ignoreLabels: | - ignore-semantic-pr - subjectPattern: "^(?![A-Z]).+$" - types: | - build - chore - ci - deps - docs - feat - fix - perf - refactor - revert - style - test - release - scopes: | - dir - dir/api - dir/server - dir/client - sdk - sdk/go - sdk/js - sdk/py - cli - cli/dir - helm - helm/dir - helm/dirctl - docker - importer - mcp - .* - requireScope: true - - - uses: marocchino/sticky-pull-request-comment@773744901bac0e8cbb5a0dc842800d45e9b2b405 # v2.9.4 - if: always() && (steps.lint_pr_title.outputs.error_message != null) - with: - header: pr-title-lint-error - message: "Hey there and thank you for opening this pull request! \U0001F44B\U0001F3FC\n\nWe require pull request titles to follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/),\nand it looks like your proposed title needs to be adjusted.\n\nDetails:\n\n```\n${{ steps.lint_pr_title.outputs.error_message }}\n```\n\nExamples:\n\n```\n- feat(dir/api): add new authentication endpoint\n- fix(sdk/js): resolve issue with token refresh\n- chore(deps): update dependencies\n```\n" - - - if: "${{ steps.lint_pr_title.outputs.error_message == null }}" - uses: marocchino/sticky-pull-request-comment@773744901bac0e8cbb5a0dc842800d45e9b2b405 # v2.9.4 - with: - header: pr-title-lint-error - delete: true +name: Lint and comment PR Title + +on: + workflow_call: + +permissions: + contents: read + pull-requests: write + +jobs: + validate: + name: Validate + runs-on: ubuntu-latest + steps: + - uses: amannn/action-semantic-pull-request@48f256284bd46cdaab1048c3721360e808335d50 # v6.1.1 + id: lint_pr_title + env: + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + with: + ignoreLabels: | + ignore-semantic-pr + subjectPattern: "^(?![A-Z]).+$" + types: | + build + chore + ci + deps + docs + feat + fix + perf + refactor + revert + style + test + release + scopes: | + dir + dir/api + dir/server + dir/client + sdk + sdk/go + sdk/js + sdk/py + cli + cli/dir + helm + helm/dir + helm/dirctl + docker + importer + mcp + .* + requireScope: true + + - uses: marocchino/sticky-pull-request-comment@773744901bac0e8cbb5a0dc842800d45e9b2b405 # v2.9.4 + if: always() && (steps.lint_pr_title.outputs.error_message != null) + with: + header: pr-title-lint-error + message: "Hey there and thank you for opening this pull request! \U0001F44B\U0001F3FC\n\nWe require pull request titles to follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/),\nand it looks like your proposed title needs to be adjusted.\n\nDetails:\n\n```\n${{ steps.lint_pr_title.outputs.error_message }}\n```\n\nExamples:\n\n```\n- feat(dir/api): add new authentication endpoint\n- fix(sdk/js): resolve issue with token refresh\n- chore(deps): update dependencies\n```\n" + + - if: "${{ steps.lint_pr_title.outputs.error_message == null }}" + uses: marocchino/sticky-pull-request-comment@773744901bac0e8cbb5a0dc842800d45e9b2b405 # v2.9.4 + with: + header: pr-title-lint-error + delete: true diff --git a/.github/workflows/post-release.yml b/.github/workflows/post-release.yml index be71b8165..e9c534396 100644 --- a/.github/workflows/post-release.yml +++ b/.github/workflows/post-release.yml @@ -1,49 +1,49 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -name: Post Release -on: - release: - types: [published] - -permissions: - contents: write - -jobs: - prepare: - name: Prepare - runs-on: ubuntu-latest - outputs: - modules: ${{ steps.modules.outputs.modules }} - steps: - - name: Checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - - - name: Get modules - id: modules - run: | - # shellcheck disable=SC2016 - echo "modules=$(find . -name go.mod -type f -print0 | xargs -0 awk '/module/ {print $2}' | jq -c -R '[.,inputs] | map(sub("^github.com\/agntcy\/dir\/";""))')" >> "$GITHUB_OUTPUT" - - create-module-tags: - needs: prepare - name: Create module tags - runs-on: ubuntu-latest - strategy: - matrix: - tags: ${{ fromJson(needs.prepare.outputs.modules) }} - steps: - - name: Checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - - - name: Create tags - uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - script: | - github.rest.git.createRef({ - owner: context.repo.owner, - repo: context.repo.repo, - ref: 'refs/tags/${{ matrix.tags }}/${{ github.ref_name }}', - sha: context.sha - }) +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +name: Post Release +on: + release: + types: [published] + +permissions: + contents: write + +jobs: + prepare: + name: Prepare + runs-on: ubuntu-latest + outputs: + modules: ${{ steps.modules.outputs.modules }} + steps: + - name: Checkout + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + + - name: Get modules + id: modules + run: | + # shellcheck disable=SC2016 + echo "modules=$(find . -name go.mod -type f -print0 | xargs -0 awk '/module/ {print $2}' | jq -c -R '[.,inputs] | map(sub("^github.com\/agntcy\/dir\/";""))')" >> "$GITHUB_OUTPUT" + + create-module-tags: + needs: prepare + name: Create module tags + runs-on: ubuntu-latest + strategy: + matrix: + tags: ${{ fromJson(needs.prepare.outputs.modules) }} + steps: + - name: Checkout + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + + - name: Create tags + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + github.rest.git.createRef({ + owner: context.repo.owner, + repo: context.repo.repo, + ref: 'refs/tags/${{ matrix.tags }}/${{ github.ref_name }}', + sha: context.sha + }) diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 1eabb9574..cb80bba31 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -1,53 +1,53 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -name: PR - -on: - pull_request_target: - types: - - opened - - edited - - reopened - - synchronize - -permissions: - pull-requests: write - contents: read - -jobs: - validate_pr_title: - name: Validate PR Title - uses: ./.github/workflows/lint-pr-title.yaml - - label: - name: Label - runs-on: ubuntu-latest - steps: - # Return release version when merged: https://github.com/CodelyTV/pr-size-labeler/pull/97 - - uses: codelytv/pr-size-labeler@7410ab25f68d95323ceb6fc4b53e8556323b52a7 # 7410ab2 - with: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - xs_label: "size/XS" - xs_max_size: "50" - s_label: "size/S" - s_max_size: "200" - m_label: "size/M" - m_max_size: "1000" - l_label: "size/L" - l_max_size: "2000" - xl_label: "size/XL" - fail_if_xl: "false" - message_if_xl: > - This PR exceeds the recommended size of 2000 lines. - Please make sure you are NOT addressing multiple issues with one PR. - Note that this PR might take longer to review due to large size. - files_to_ignore: | - "*.md" - "**/*.pb.go" - "**/*_pb2.py" - "**/*_pb2_grpc.py" - "**/*_pb.js" - "**/*_pb.d.ts" - "**/go.sum" - "**/go.mod" +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +name: PR + +on: + pull_request_target: + types: + - opened + - edited + - reopened + - synchronize + +permissions: + pull-requests: write + contents: read + +jobs: + validate_pr_title: + name: Validate PR Title + uses: ./.github/workflows/lint-pr-title.yaml + + label: + name: Label + runs-on: ubuntu-latest + steps: + # Return release version when merged: https://github.com/CodelyTV/pr-size-labeler/pull/97 + - uses: codelytv/pr-size-labeler@7410ab25f68d95323ceb6fc4b53e8556323b52a7 # 7410ab2 + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + xs_label: "size/XS" + xs_max_size: "50" + s_label: "size/S" + s_max_size: "200" + m_label: "size/M" + m_max_size: "1000" + l_label: "size/L" + l_max_size: "2000" + xl_label: "size/XL" + fail_if_xl: "false" + message_if_xl: > + This PR exceeds the recommended size of 2000 lines. + Please make sure you are NOT addressing multiple issues with one PR. + Note that this PR might take longer to review due to large size. + files_to_ignore: | + "*.md" + "**/*.pb.go" + "**/*_pb2.py" + "**/*_pb2_grpc.py" + "**/*_pb.js" + "**/*_pb.d.ts" + "**/go.sum" + "**/go.mod" diff --git a/.github/workflows/project.yaml b/.github/workflows/project.yaml index 336f58861..34a712997 100644 --- a/.github/workflows/project.yaml +++ b/.github/workflows/project.yaml @@ -1,66 +1,66 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -name: Project - -on: - schedule: - - cron: "0 0 * * 0" - workflow_dispatch: - -permissions: - issues: write - pull-requests: write - -jobs: - sync-items: - name: Sync items - runs-on: ubuntu-latest - steps: - - name: Sync stale items - uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - days-before-stale: 60 - days-before-close: 90 - stale-issue-label: stale - stale-pr-label: stale - exempt-issue-labels: no-stale - exempt-pr-labels: no-stale - stale-issue-message: > - Thank you for your contribution! This issue has been automatically - marked as `stale` because it has no recent activity in the last - 60 days. It will be closed in 90 days, if no further activity - occurs. If this issue is still relevant, please leave a comment to - let us know, and the `stale` label will be automatically removed. - stale-pr-message: > - Thank you for your contribution! This PR has been automatically - marked as `stale` because it has no recent activity in the last - 60 days. It will be closed in 90 days, if no further activity occurs. - If this pull request is still relevant, please leave a comment to - let us know, and the `stale` label will be automatically removed. - close-issue-message: > - This issue has been marked `stale` for 90 days, and is now closed - due to inactivity. If the issue is still relevant, please re-open - this issue or file a new one. Thank you! - close-pr-message: > - This PR has been marked `stale` for 90 days, and is now closed - due to inactivity. If this contribution is still relevant, please - re-open this PR or file a new one. Thank you! - - sync-labels: - name: Sync labels - runs-on: ubuntu-latest - steps: - - name: Checkout labels - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - sparse-checkout: .github/labels.yml - - - name: Sync labels - uses: EndBug/label-sync@52074158190acb45f3077f9099fea818aa43f97a # v2.3.3 - with: - dry-run: false - token: ${{ secrets.GITHUB_TOKEN }} - config-file: .github/labels.yml - delete-other-labels: false +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +name: Project + +on: + schedule: + - cron: "0 0 * * 0" + workflow_dispatch: + +permissions: + issues: write + pull-requests: write + +jobs: + sync-items: + name: Sync items + runs-on: ubuntu-latest + steps: + - name: Sync stale items + uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + days-before-stale: 60 + days-before-close: 90 + stale-issue-label: stale + stale-pr-label: stale + exempt-issue-labels: no-stale + exempt-pr-labels: no-stale + stale-issue-message: > + Thank you for your contribution! This issue has been automatically + marked as `stale` because it has no recent activity in the last + 60 days. It will be closed in 90 days, if no further activity + occurs. If this issue is still relevant, please leave a comment to + let us know, and the `stale` label will be automatically removed. + stale-pr-message: > + Thank you for your contribution! This PR has been automatically + marked as `stale` because it has no recent activity in the last + 60 days. It will be closed in 90 days, if no further activity occurs. + If this pull request is still relevant, please leave a comment to + let us know, and the `stale` label will be automatically removed. + close-issue-message: > + This issue has been marked `stale` for 90 days, and is now closed + due to inactivity. If the issue is still relevant, please re-open + this issue or file a new one. Thank you! + close-pr-message: > + This PR has been marked `stale` for 90 days, and is now closed + due to inactivity. If this contribution is still relevant, please + re-open this PR or file a new one. Thank you! + + sync-labels: + name: Sync labels + runs-on: ubuntu-latest + steps: + - name: Checkout labels + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + sparse-checkout: .github/labels.yml + + - name: Sync labels + uses: EndBug/label-sync@52074158190acb45f3077f9099fea818aa43f97a # v2.3.3 + with: + dry-run: false + token: ${{ secrets.GITHUB_TOKEN }} + config-file: .github/labels.yml + delete-other-labels: false diff --git a/.github/workflows/reusable-brew-update.yaml b/.github/workflows/reusable-brew-update.yaml index 3217e5265..33d329ab5 100644 --- a/.github/workflows/reusable-brew-update.yaml +++ b/.github/workflows/reusable-brew-update.yaml @@ -1,105 +1,105 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -name: Brew formula update - -# This workflow automatize the brew formula file update process with replacing the version number to the latest, -# recalculate all hash for the binaries and create a new PR with the changes. - -on: - release: - types: [released] - - workflow_call: - workflow_dispatch: - -permissions: - contents: write - pull-requests: write - -jobs: - formula-update: - name: Update Formula - runs-on: ubuntu-latest - env: - # Required for `gh` CLI authentication - GH_TOKEN: ${{ secrets.AGNTCY_BUILD_BOT_GH_TOKEN }} - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Download latest binary releases - id: release-infos - run: | - LATEST_VERSION=$(gh release list --limit 1 --json tagName --jq '.[] | .tagName') - echo "LATEST_VERSION=$LATEST_VERSION" >> $GITHUB_OUTPUT - - gh release download $LATEST_VERSION - - echo "DIRCTL_HASH_DARWIN_ARM=$(sha256sum ./dirctl-darwin-arm64 | cut -d ' ' -f 1)" >> $GITHUB_OUTPUT - echo "DIRCTL_HASH_DARWIN_AMD=$(sha256sum ./dirctl-darwin-amd64 | cut -d ' ' -f 1)" >> $GITHUB_OUTPUT - echo "DIRCTL_HASH_LINUX_ARM=$(sha256sum ./dirctl-linux-arm64 | cut -d ' ' -f 1)" >> $GITHUB_OUTPUT - echo "DIRCTL_HASH_LINUX_AMD=$(sha256sum ./dirctl-linux-amd64 | cut -d ' ' -f 1)" >> $GITHUB_OUTPUT - - - name: Update Brew formula - id: brew-formula - run: | - # Note: the following account information will not work on GHES - git config user.name "github-actions[bot]" - git config user.email "41898282+github-actions[bot]@users.noreply.github.com" - - git remote set-url origin https://github.com/agntcy/dir.git - - git push origin --delete chore/brew-formula-update || true - - git checkout -B chore/brew-formula-update origin/main - - # Replace version - sed -i "s/version \"v*.*.*\"/version \""${{ steps.release-infos.outputs.LATEST_VERSION }}"\"/" HomebrewFormula/dirctl.rb - - # Replace hashes - sed -i "/url \".*\/dirctl-darwin-arm64\"/ {N;s/sha256 \".*\"/sha256 \"${{ steps.release-infos.outputs.DIRCTL_HASH_DARWIN_ARM }}\"/}" ./HomebrewFormula/dirctl.rb - sed -i "/url \".*\/dirctl-darwin-amd64\"/ {N;s/sha256 \".*\"/sha256 \"${{ steps.release-infos.outputs.DIRCTL_HASH_DARWIN_AMD }}\"/}" ./HomebrewFormula/dirctl.rb - sed -i "/url \".*\/dirctl-linux-arm64\"/ {N;s/sha256 \".*\"/sha256 \"${{ steps.release-infos.outputs.DIRCTL_HASH_LINUX_ARM }}\"/}" ./HomebrewFormula/dirctl.rb - sed -i "/url \".*\/dirctl-linux-amd64\"/ {N;s/sha256 \".*\"/sha256 \"${{ steps.release-infos.outputs.DIRCTL_HASH_LINUX_AMD }}\"/}" ./HomebrewFormula/dirctl.rb - - DIFF_FOUND=0 - - if ! git diff --exit-code; then - DIFF_FOUND=1 - fi - - echo "DIFF_FOUND=$DIFF_FOUND" >> $GITHUB_OUTPUT - - if [ $DIFF_FOUND -eq 1 ]; then - git commit ./HomebrewFormula/dirctl.rb -m "chore: update brew formula version" --signoff - - git push --set-upstream origin chore/brew-formula-update - fi - - - name: Test new formula - id: test-brew-formula - if: ${{ steps.brew-formula.outputs.DIFF_FOUND == 1 }} - run: | - apt update && apt install curl git -y - - NONINTERACTIVE=1 /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" - - eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)" - - # Make local formula for testing from the repo homebrew formula file - brew tap-new --no-git agntcy/dir-test - - FORMULA_DIR="$(brew --repository)/Library/Taps/agntcy/homebrew-dir-test/Formula/" - cp ./HomebrewFormula/dirctl.rb "$FORMULA_DIR" - - brew install agntcy/dir-test/dirctl --verbose - - dirctl --help - - - name: Create PR - if: ${{ steps.brew-formula.outputs.DIFF_FOUND == 1 && steps.test-brew-formula.outcome == 'success' }} - run: | - gh pr create --title "chore(dirctl): update brew formula to ${{ steps.release-infos.outputs.LATEST_VERSION }}" --body "This PR is created by brew-formula-update workflow." +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +name: Brew formula update + +# This workflow automatize the brew formula file update process with replacing the version number to the latest, +# recalculate all hash for the binaries and create a new PR with the changes. + +on: + release: + types: [released] + + workflow_call: + workflow_dispatch: + +permissions: + contents: write + pull-requests: write + +jobs: + formula-update: + name: Update Formula + runs-on: ubuntu-latest + env: + # Required for `gh` CLI authentication + GH_TOKEN: ${{ secrets.AGNTCY_BUILD_BOT_GH_TOKEN }} + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Download latest binary releases + id: release-infos + run: | + LATEST_VERSION=$(gh release list --limit 1 --json tagName --jq '.[] | .tagName') + echo "LATEST_VERSION=$LATEST_VERSION" >> $GITHUB_OUTPUT + + gh release download $LATEST_VERSION + + echo "DIRCTL_HASH_DARWIN_ARM=$(sha256sum ./dirctl-darwin-arm64 | cut -d ' ' -f 1)" >> $GITHUB_OUTPUT + echo "DIRCTL_HASH_DARWIN_AMD=$(sha256sum ./dirctl-darwin-amd64 | cut -d ' ' -f 1)" >> $GITHUB_OUTPUT + echo "DIRCTL_HASH_LINUX_ARM=$(sha256sum ./dirctl-linux-arm64 | cut -d ' ' -f 1)" >> $GITHUB_OUTPUT + echo "DIRCTL_HASH_LINUX_AMD=$(sha256sum ./dirctl-linux-amd64 | cut -d ' ' -f 1)" >> $GITHUB_OUTPUT + + - name: Update Brew formula + id: brew-formula + run: | + # Note: the following account information will not work on GHES + git config user.name "github-actions[bot]" + git config user.email "41898282+github-actions[bot]@users.noreply.github.com" + + git remote set-url origin https://github.com/agntcy/dir.git + + git push origin --delete chore/brew-formula-update || true + + git checkout -B chore/brew-formula-update origin/main + + # Replace version + sed -i "s/version \"v*.*.*\"/version \""${{ steps.release-infos.outputs.LATEST_VERSION }}"\"/" HomebrewFormula/dirctl.rb + + # Replace hashes + sed -i "/url \".*\/dirctl-darwin-arm64\"/ {N;s/sha256 \".*\"/sha256 \"${{ steps.release-infos.outputs.DIRCTL_HASH_DARWIN_ARM }}\"/}" ./HomebrewFormula/dirctl.rb + sed -i "/url \".*\/dirctl-darwin-amd64\"/ {N;s/sha256 \".*\"/sha256 \"${{ steps.release-infos.outputs.DIRCTL_HASH_DARWIN_AMD }}\"/}" ./HomebrewFormula/dirctl.rb + sed -i "/url \".*\/dirctl-linux-arm64\"/ {N;s/sha256 \".*\"/sha256 \"${{ steps.release-infos.outputs.DIRCTL_HASH_LINUX_ARM }}\"/}" ./HomebrewFormula/dirctl.rb + sed -i "/url \".*\/dirctl-linux-amd64\"/ {N;s/sha256 \".*\"/sha256 \"${{ steps.release-infos.outputs.DIRCTL_HASH_LINUX_AMD }}\"/}" ./HomebrewFormula/dirctl.rb + + DIFF_FOUND=0 + + if ! git diff --exit-code; then + DIFF_FOUND=1 + fi + + echo "DIFF_FOUND=$DIFF_FOUND" >> $GITHUB_OUTPUT + + if [ $DIFF_FOUND -eq 1 ]; then + git commit ./HomebrewFormula/dirctl.rb -m "chore: update brew formula version" --signoff + + git push --set-upstream origin chore/brew-formula-update + fi + + - name: Test new formula + id: test-brew-formula + if: ${{ steps.brew-formula.outputs.DIFF_FOUND == 1 }} + run: | + apt update && apt install curl git -y + + NONINTERACTIVE=1 /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" + + eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)" + + # Make local formula for testing from the repo homebrew formula file + brew tap-new --no-git agntcy/dir-test + + FORMULA_DIR="$(brew --repository)/Library/Taps/agntcy/homebrew-dir-test/Formula/" + cp ./HomebrewFormula/dirctl.rb "$FORMULA_DIR" + + brew install agntcy/dir-test/dirctl --verbose + + dirctl --help + + - name: Create PR + if: ${{ steps.brew-formula.outputs.DIFF_FOUND == 1 && steps.test-brew-formula.outcome == 'success' }} + run: | + gh pr create --title "chore(dirctl): update brew formula to ${{ steps.release-infos.outputs.LATEST_VERSION }}" --body "This PR is created by brew-formula-update workflow." diff --git a/.github/workflows/reusable-build.yaml b/.github/workflows/reusable-build.yaml index e059ad85a..5231853d2 100644 --- a/.github/workflows/reusable-build.yaml +++ b/.github/workflows/reusable-build.yaml @@ -1,240 +1,240 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -name: Build - -on: - workflow_call: - inputs: - image_repo: - required: true - type: string - description: "Image repo to use." - image_tag: - required: true - type: string - description: "Image tag to use." - image_name_suffix: - required: false - type: string - description: "Suffix to append to image names (e.g., '-dev'). Defaults to empty." - default: "" - push: - required: false - type: boolean - default: false - description: "Whether to push the image to the registry." - build_coverage_images: - required: false - type: boolean - default: false - description: "Whether to also build coverage-instrumented images for E2E tests." - -jobs: - cli: - name: CLI - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Setup Taskfile - shell: bash - run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin - - - name: Setup Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 - with: - go-version: "1.25.2" - cache-dependency-path: "**/*.sum" - cache: true # NOTE: Default value, just to be explicit - - - name: Create artifacts directory - run: | - mkdir /tmp/artifacts - - - name: Build CLI - run: | - task cli:compile - mv ./bin/dirctl /tmp/artifacts/dirctl - - - name: Upload CLI artifact - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 - with: - name: dirctl-bin - path: /tmp/artifacts/dirctl - retention-days: 7 - - prepare: - name: Prepare - outputs: - matrix: ${{ steps.generate.outputs.matrix }} - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Generate matrix - id: generate - uses: docker/bake-action/subaction/matrix@5be5f02ff8819ecd3092ea6b2e6261c31774f2b4 # v6.10.0 - - image: - name: Image - runs-on: ubuntu-latest - needs: - - prepare - strategy: - matrix: - include: ${{ fromJson(needs.prepare.outputs.matrix) }} - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Login to ghcr.io - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 - with: - registry: ghcr.io - username: notused - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Setup QEMU - uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 - - - name: Setup Docker Buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 - - - name: Docker metadata - id: metadata - uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0 - with: - images: | - ${{ inputs.image_repo }}/${{ matrix.target }}${{ inputs.image_name_suffix }},enable=true - tags: | - type=raw,value=${{ inputs.image_tag }} - type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') }} - - - name: Create artifacts directory - run: | - mkdir /tmp/artifacts - - - name: Build and push - if: ${{ inputs.push }} - uses: docker/bake-action@5be5f02ff8819ecd3092ea6b2e6261c31774f2b4 # v6.10.0 - with: - push: true - provenance: false - files: | - ./docker-bake.hcl - cwd://${{ steps.metadata.outputs.bake-file }} - targets: ${{ matrix.target }} - set: | - *.platform=linux/amd64,linux/arm64 - *.output=type=registry - *.cache-to=type=gha,mode=max,scope=${{ matrix.target }} - *.cache-from=type=gha,scope=${{ matrix.target }} - env: - IMAGE_REPO: ${{ inputs.image_repo }} - IMAGE_TAG: ${{ inputs.image_tag }} - IMAGE_NAME_SUFFIX: ${{ inputs.image_name_suffix }} - - - name: Build and load - if: ${{ !inputs.push }} - uses: docker/bake-action@5be5f02ff8819ecd3092ea6b2e6261c31774f2b4 # v6.10.0 - with: - provenance: false - files: | - ./docker-bake.hcl - cwd://${{ steps.metadata.outputs.bake-file }} - targets: ${{ matrix.target }} - set: | - *.platform=linux/amd64 - *.output=type=docker,dest=/tmp/artifacts/${{ matrix.target }}.tar - *.cache-to=type=gha,mode=max,scope=${{ matrix.target }} - *.cache-from=type=gha,scope=${{ matrix.target }} - env: - IMAGE_REPO: ${{ inputs.image_repo }} - IMAGE_TAG: ${{ inputs.image_tag }} - IMAGE_NAME_SUFFIX: ${{ inputs.image_name_suffix }} - DOCKER_BUILD_RECORD_UPLOAD: false - - - name: Upload artifacts - if: ${{ !inputs.push }} - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 - with: - name: ${{ matrix.target }} - path: /tmp/artifacts/${{ matrix.target }}.tar - retention-days: 7 - - coverage-image: - name: Coverage Image - runs-on: ubuntu-latest - if: ${{ inputs.build_coverage_images && !inputs.push }} - needs: - - prepare - strategy: - matrix: - include: ${{ fromJson(needs.prepare.outputs.matrix) }} - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Login to ghcr.io - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 - with: - registry: ghcr.io - username: notused - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Setup QEMU - uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 - - - name: Setup Docker Buildx - uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 - - - name: Docker metadata for coverage image - id: metadata-coverage - uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0 - with: - images: | - ${{ inputs.image_repo }}/${{ matrix.target }}${{ inputs.image_name_suffix }},enable=true - tags: | - type=raw,value=${{ inputs.image_tag }}-coverage - - - name: Create artifacts directory - run: | - mkdir /tmp/artifacts - - - name: Build coverage image - uses: docker/bake-action@5be5f02ff8819ecd3092ea6b2e6261c31774f2b4 # v6.10.0 - with: - provenance: false - files: | - ./docker-bake.hcl - cwd://${{ steps.metadata-coverage.outputs.bake-file }} - targets: coverage - set: | - *.platform=linux/amd64 - *.output=type=docker,dest=/tmp/artifacts/${{ matrix.target }}-coverage.tar - *.cache-to=type=gha,mode=max,scope=${{ matrix.target }}-coverage - *.cache-from=type=gha,scope=${{ matrix.target }}-coverage - *.args.BUILD_OPTS=-cover -covermode=atomic - env: - IMAGE_REPO: ${{ inputs.image_repo }} - IMAGE_TAG: ${{ inputs.image_tag }}-coverage - IMAGE_NAME_SUFFIX: ${{ inputs.image_name_suffix }} - DOCKER_BUILD_RECORD_UPLOAD: false - - - name: Upload coverage artifacts - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 - with: - name: ${{ matrix.target }}-coverage - path: /tmp/artifacts/${{ matrix.target }}-coverage.tar - retention-days: 7 +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +name: Build + +on: + workflow_call: + inputs: + image_repo: + required: true + type: string + description: "Image repo to use." + image_tag: + required: true + type: string + description: "Image tag to use." + image_name_suffix: + required: false + type: string + description: "Suffix to append to image names (e.g., '-dev'). Defaults to empty." + default: "" + push: + required: false + type: boolean + default: false + description: "Whether to push the image to the registry." + build_coverage_images: + required: false + type: boolean + default: false + description: "Whether to also build coverage-instrumented images for E2E tests." + +jobs: + cli: + name: CLI + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Setup Taskfile + shell: bash + run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin + + - name: Setup Go + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 + with: + go-version: "1.25.2" + cache-dependency-path: "**/*.sum" + cache: true # NOTE: Default value, just to be explicit + + - name: Create artifacts directory + run: | + mkdir /tmp/artifacts + + - name: Build CLI + run: | + task cli:compile + mv ./bin/dirctl /tmp/artifacts/dirctl + + - name: Upload CLI artifact + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + with: + name: dirctl-bin + path: /tmp/artifacts/dirctl + retention-days: 7 + + prepare: + name: Prepare + outputs: + matrix: ${{ steps.generate.outputs.matrix }} + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Generate matrix + id: generate + uses: docker/bake-action/subaction/matrix@5be5f02ff8819ecd3092ea6b2e6261c31774f2b4 # v6.10.0 + + image: + name: Image + runs-on: ubuntu-latest + needs: + - prepare + strategy: + matrix: + include: ${{ fromJson(needs.prepare.outputs.matrix) }} + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Login to ghcr.io + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 + with: + registry: ghcr.io + username: notused + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup QEMU + uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 + + - name: Setup Docker Buildx + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 + + - name: Docker metadata + id: metadata + uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0 + with: + images: | + ${{ inputs.image_repo }}/${{ matrix.target }}${{ inputs.image_name_suffix }},enable=true + tags: | + type=raw,value=${{ inputs.image_tag }} + type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/') }} + + - name: Create artifacts directory + run: | + mkdir /tmp/artifacts + + - name: Build and push + if: ${{ inputs.push }} + uses: docker/bake-action@5be5f02ff8819ecd3092ea6b2e6261c31774f2b4 # v6.10.0 + with: + push: true + provenance: false + files: | + ./docker-bake.hcl + cwd://${{ steps.metadata.outputs.bake-file }} + targets: ${{ matrix.target }} + set: | + *.platform=linux/amd64,linux/arm64 + *.output=type=registry + *.cache-to=type=gha,mode=max,scope=${{ matrix.target }} + *.cache-from=type=gha,scope=${{ matrix.target }} + env: + IMAGE_REPO: ${{ inputs.image_repo }} + IMAGE_TAG: ${{ inputs.image_tag }} + IMAGE_NAME_SUFFIX: ${{ inputs.image_name_suffix }} + + - name: Build and load + if: ${{ !inputs.push }} + uses: docker/bake-action@5be5f02ff8819ecd3092ea6b2e6261c31774f2b4 # v6.10.0 + with: + provenance: false + files: | + ./docker-bake.hcl + cwd://${{ steps.metadata.outputs.bake-file }} + targets: ${{ matrix.target }} + set: | + *.platform=linux/amd64 + *.output=type=docker,dest=/tmp/artifacts/${{ matrix.target }}.tar + *.cache-to=type=gha,mode=max,scope=${{ matrix.target }} + *.cache-from=type=gha,scope=${{ matrix.target }} + env: + IMAGE_REPO: ${{ inputs.image_repo }} + IMAGE_TAG: ${{ inputs.image_tag }} + IMAGE_NAME_SUFFIX: ${{ inputs.image_name_suffix }} + DOCKER_BUILD_RECORD_UPLOAD: false + + - name: Upload artifacts + if: ${{ !inputs.push }} + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + with: + name: ${{ matrix.target }} + path: /tmp/artifacts/${{ matrix.target }}.tar + retention-days: 7 + + coverage-image: + name: Coverage Image + runs-on: ubuntu-latest + if: ${{ inputs.build_coverage_images && !inputs.push }} + needs: + - prepare + strategy: + matrix: + include: ${{ fromJson(needs.prepare.outputs.matrix) }} + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Login to ghcr.io + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 + with: + registry: ghcr.io + username: notused + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup QEMU + uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0 + + - name: Setup Docker Buildx + uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 + + - name: Docker metadata for coverage image + id: metadata-coverage + uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0 + with: + images: | + ${{ inputs.image_repo }}/${{ matrix.target }}${{ inputs.image_name_suffix }},enable=true + tags: | + type=raw,value=${{ inputs.image_tag }}-coverage + + - name: Create artifacts directory + run: | + mkdir /tmp/artifacts + + - name: Build coverage image + uses: docker/bake-action@5be5f02ff8819ecd3092ea6b2e6261c31774f2b4 # v6.10.0 + with: + provenance: false + files: | + ./docker-bake.hcl + cwd://${{ steps.metadata-coverage.outputs.bake-file }} + targets: coverage + set: | + *.platform=linux/amd64 + *.output=type=docker,dest=/tmp/artifacts/${{ matrix.target }}-coverage.tar + *.cache-to=type=gha,mode=max,scope=${{ matrix.target }}-coverage + *.cache-from=type=gha,scope=${{ matrix.target }}-coverage + *.args.BUILD_OPTS=-cover -covermode=atomic + env: + IMAGE_REPO: ${{ inputs.image_repo }} + IMAGE_TAG: ${{ inputs.image_tag }}-coverage + IMAGE_NAME_SUFFIX: ${{ inputs.image_name_suffix }} + DOCKER_BUILD_RECORD_UPLOAD: false + + - name: Upload coverage artifacts + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + with: + name: ${{ matrix.target }}-coverage + path: /tmp/artifacts/${{ matrix.target }}-coverage.tar + retention-days: 7 diff --git a/.github/workflows/reusable-release-helm.yaml b/.github/workflows/reusable-release-helm.yaml index 3c5ac49f7..572f0dfa2 100644 --- a/.github/workflows/reusable-release-helm.yaml +++ b/.github/workflows/reusable-release-helm.yaml @@ -1,80 +1,80 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -name: Release Helm - -on: - workflow_dispatch: - inputs: - image_repo: - required: true - type: string - description: "Image repo to use." - default: "ghcr.io/agntcy" - release_tag: - required: true - type: string - description: "Release tag for all components." - workflow_call: - inputs: - image_repo: - required: true - type: string - description: "Image repo to use." - release_tag: - required: true - type: string - description: "Release tag for all components." - chart_path: - required: false - type: string - description: "Chart path component (e.g., 'dir' or 'dir-dev'). Defaults to 'dir'." - default: "dir" - -jobs: - chart: - name: Helm chart - runs-on: ubuntu-latest - strategy: - matrix: - chart: [dir, dirctl] - permissions: - packages: write - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Log in to GitHub Container Registry - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 - with: - registry: ghcr.io - username: notused - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Setup Helm - uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4.3.1 - - - name: Helm update - shell: bash - run: helm dependency update install/charts/${{ matrix.chart }} - - - name: Helm lint - shell: bash - run: helm lint install/charts/${{ matrix.chart }} --with-subcharts - - - name: Helm package - id: build - shell: bash - run: | - helm package install/charts/${{ matrix.chart }} --dependency-update --version ${{ inputs.release_tag }} - echo "package=${{ matrix.chart }}-${{ inputs.release_tag }}.tgz" >> "$GITHUB_OUTPUT" - - - name: Helm push to GHCR OCI registry - shell: bash - run: | - CHART_PATH="${{ inputs.chart_path || 'dir' }}/helm-charts" - echo "🚧 Pushing ${{ steps.build.outputs.package }} to GHCR OCI registry" - echo "Chart path: ${CHART_PATH}" - helm push ${{ steps.build.outputs.package }} oci://${{ inputs.image_repo }}/${CHART_PATH} +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +name: Release Helm + +on: + workflow_dispatch: + inputs: + image_repo: + required: true + type: string + description: "Image repo to use." + default: "ghcr.io/agntcy" + release_tag: + required: true + type: string + description: "Release tag for all components." + workflow_call: + inputs: + image_repo: + required: true + type: string + description: "Image repo to use." + release_tag: + required: true + type: string + description: "Release tag for all components." + chart_path: + required: false + type: string + description: "Chart path component (e.g., 'dir' or 'dir-dev'). Defaults to 'dir'." + default: "dir" + +jobs: + chart: + name: Helm chart + runs-on: ubuntu-latest + strategy: + matrix: + chart: [dir, dirctl] + permissions: + packages: write + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 + with: + registry: ghcr.io + username: notused + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup Helm + uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4.3.1 + + - name: Helm update + shell: bash + run: helm dependency update install/charts/${{ matrix.chart }} + + - name: Helm lint + shell: bash + run: helm lint install/charts/${{ matrix.chart }} --with-subcharts + + - name: Helm package + id: build + shell: bash + run: | + helm package install/charts/${{ matrix.chart }} --dependency-update --version ${{ inputs.release_tag }} + echo "package=${{ matrix.chart }}-${{ inputs.release_tag }}.tgz" >> "$GITHUB_OUTPUT" + + - name: Helm push to GHCR OCI registry + shell: bash + run: | + CHART_PATH="${{ inputs.chart_path || 'dir' }}/helm-charts" + echo "🚧 Pushing ${{ steps.build.outputs.package }} to GHCR OCI registry" + echo "Chart path: ${CHART_PATH}" + helm push ${{ steps.build.outputs.package }} oci://${{ inputs.image_repo }}/${CHART_PATH} diff --git a/.github/workflows/reusable-release-sdk.yaml b/.github/workflows/reusable-release-sdk.yaml index 25b5f88fd..899a8c772 100644 --- a/.github/workflows/reusable-release-sdk.yaml +++ b/.github/workflows/reusable-release-sdk.yaml @@ -1,107 +1,107 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -name: Release SDK - -on: - workflow_dispatch: - inputs: - javascript-release: - required: false - type: boolean - description: "Make a javascript SDK release." - default: false - python-release: - required: false - type: boolean - description: "Make a python SDK release." - default: false - workflow_call: - inputs: - javascript-release: - required: false - type: boolean - description: "Make a javascript SDK release." - default: false - python-release: - required: false - type: boolean - description: "Make a python SDK release." - default: false - secrets: - PYPI_API_TOKEN: - description: "PyPI API token for publishing Python SDK" - required: true - NPMJS_TOKEN: - description: "NPM.js token for publishing JavaScript SDK" - required: true - -permissions: - contents: read - -jobs: - python: - name: Python - if: ${{ inputs.python-release == true || inputs.python-release == 'true' }} - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Setup Taskfile - shell: bash - run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin - - - name: Install deps - shell: bash - run: | - task deps - - - name: Build the Python package - run: | - task sdk:build:python - - - name: Publish the Python SDK - env: - UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }} - run: | - task sdk:release:python - - javascript: - name: JavaScript - if: ${{ inputs.javascript-release == true || inputs.javascript-release == 'true' }} - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Setup Taskfile - shell: bash - run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin - - - name: Setup Node - uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0 - with: - node-version: "24.x" - registry-url: https://registry.npmjs.org/ - scope: "@agntcy" - - - name: Install deps - shell: bash - run: | - task deps - - - name: Build the Javascript package - run: | - task sdk:deps:javascript - task sdk:build:javascript - - - name: Publish the Javascript SDK - env: - NODE_AUTH_TOKEN: ${{ secrets.NPMJS_TOKEN }} - run: | - task sdk:release:javascript +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +name: Release SDK + +on: + workflow_dispatch: + inputs: + javascript-release: + required: false + type: boolean + description: "Make a javascript SDK release." + default: false + python-release: + required: false + type: boolean + description: "Make a python SDK release." + default: false + workflow_call: + inputs: + javascript-release: + required: false + type: boolean + description: "Make a javascript SDK release." + default: false + python-release: + required: false + type: boolean + description: "Make a python SDK release." + default: false + secrets: + PYPI_API_TOKEN: + description: "PyPI API token for publishing Python SDK" + required: true + NPMJS_TOKEN: + description: "NPM.js token for publishing JavaScript SDK" + required: true + +permissions: + contents: read + +jobs: + python: + name: Python + if: ${{ inputs.python-release == true || inputs.python-release == 'true' }} + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Setup Taskfile + shell: bash + run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin + + - name: Install deps + shell: bash + run: | + task deps + + - name: Build the Python package + run: | + task sdk:build:python + + - name: Publish the Python SDK + env: + UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }} + run: | + task sdk:release:python + + javascript: + name: JavaScript + if: ${{ inputs.javascript-release == true || inputs.javascript-release == 'true' }} + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Setup Taskfile + shell: bash + run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin + + - name: Setup Node + uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0 + with: + node-version: "24.x" + registry-url: https://registry.npmjs.org/ + scope: "@agntcy" + + - name: Install deps + shell: bash + run: | + task deps + + - name: Build the Javascript package + run: | + task sdk:deps:javascript + task sdk:build:javascript + + - name: Publish the Javascript SDK + env: + NODE_AUTH_TOKEN: ${{ secrets.NPMJS_TOKEN }} + run: | + task sdk:release:javascript diff --git a/.github/workflows/reusable-release.yaml b/.github/workflows/reusable-release.yaml index eff5e4e2a..4def69232 100644 --- a/.github/workflows/reusable-release.yaml +++ b/.github/workflows/reusable-release.yaml @@ -1,143 +1,143 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -name: Release - -on: - workflow_call: - inputs: - image_repo: - required: true - type: string - description: "Image repo to use." - release_tag: - required: true - type: string - description: "Release tag for all components." - secrets: - PYPI_API_TOKEN: - description: "PyPI API token for publishing Python SDK" - required: true - NPMJS_TOKEN: - description: "NPM.js token for publishing JavaScript SDK" - required: true - -jobs: - image: - name: Image - uses: ./.github/workflows/reusable-build.yaml - with: - image_repo: ${{ inputs.image_repo }} - image_tag: ${{ inputs.release_tag }} - push: true - - chart: - name: Helm chart - uses: ./.github/workflows/reusable-release-helm.yaml - with: - image_repo: ${{ inputs.image_repo }} - release_tag: ${{ inputs.release_tag }} - - cli: - name: CLI - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Setup Taskfile - shell: bash - run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin - - - name: Setup Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 - with: - go-version: "1.25.2" - cache-dependency-path: "**/*.sum" - cache: true # NOTE: Default value, just to be explicit - - - name: Build - run: | - task cli:compile:all - - - name: Upload artifact - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 - with: - name: cli-artifacts - path: bin - if-no-files-found: error - - sdk: - name: SDK - uses: ./.github/workflows/reusable-release-sdk.yaml - with: - javascript-release: true - python-release: true - secrets: - PYPI_API_TOKEN: ${{ secrets.PYPI_API_TOKEN }} - NPMJS_TOKEN: ${{ secrets.NPMJS_TOKEN }} - - release: - name: Release - needs: - - image - - chart - - cli - - sdk - runs-on: ubuntu-latest - outputs: - upload_url: ${{ steps.create_release.outputs.upload_url }} - steps: - - name: Create Release - id: create_release - uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - tag_name: ${{ inputs.release_tag }} - release_name: Release ${{ inputs.release_tag }} - draft: true - prerelease: false - - upload: - name: Upload Asset - needs: - - release - runs-on: ubuntu-latest - strategy: - matrix: - os: [linux, darwin, windows] - arch: [amd64, arm64] - steps: - - name: Checkout code - if: ${{ !(contains(matrix.os, 'windows') && contains(matrix.arch, 'arm64')) }} - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - with: - fetch-depth: 0 - - - name: Download CLI artifacts - if: ${{ !(contains(matrix.os, 'windows') && contains(matrix.arch, 'arm64')) }} - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 - with: - name: cli-artifacts - path: bin - - - name: Verify file - if: ${{ !(contains(matrix.os, 'windows') && contains(matrix.arch, 'arm64')) }} - run: | - ls -l bin - file bin/dirctl-${{ matrix.os }}-${{ matrix.arch }} - - - name: Upload Release Asset - if: ${{ !(contains(matrix.os, 'windows') && contains(matrix.arch, 'arm64')) }} - id: upload-release-asset - uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 # v1.0.2 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - upload_url: ${{ needs.release.outputs.upload_url }} - asset_path: bin/dirctl-${{ matrix.os }}-${{ matrix.arch }} - asset_name: dirctl-${{ matrix.os }}-${{ matrix.arch }} - asset_content_type: application/octet-stream +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +name: Release + +on: + workflow_call: + inputs: + image_repo: + required: true + type: string + description: "Image repo to use." + release_tag: + required: true + type: string + description: "Release tag for all components." + secrets: + PYPI_API_TOKEN: + description: "PyPI API token for publishing Python SDK" + required: true + NPMJS_TOKEN: + description: "NPM.js token for publishing JavaScript SDK" + required: true + +jobs: + image: + name: Image + uses: ./.github/workflows/reusable-build.yaml + with: + image_repo: ${{ inputs.image_repo }} + image_tag: ${{ inputs.release_tag }} + push: true + + chart: + name: Helm chart + uses: ./.github/workflows/reusable-release-helm.yaml + with: + image_repo: ${{ inputs.image_repo }} + release_tag: ${{ inputs.release_tag }} + + cli: + name: CLI + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Setup Taskfile + shell: bash + run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin + + - name: Setup Go + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 + with: + go-version: "1.25.2" + cache-dependency-path: "**/*.sum" + cache: true # NOTE: Default value, just to be explicit + + - name: Build + run: | + task cli:compile:all + + - name: Upload artifact + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + with: + name: cli-artifacts + path: bin + if-no-files-found: error + + sdk: + name: SDK + uses: ./.github/workflows/reusable-release-sdk.yaml + with: + javascript-release: true + python-release: true + secrets: + PYPI_API_TOKEN: ${{ secrets.PYPI_API_TOKEN }} + NPMJS_TOKEN: ${{ secrets.NPMJS_TOKEN }} + + release: + name: Release + needs: + - image + - chart + - cli + - sdk + runs-on: ubuntu-latest + outputs: + upload_url: ${{ steps.create_release.outputs.upload_url }} + steps: + - name: Create Release + id: create_release + uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ inputs.release_tag }} + release_name: Release ${{ inputs.release_tag }} + draft: true + prerelease: false + + upload: + name: Upload Asset + needs: + - release + runs-on: ubuntu-latest + strategy: + matrix: + os: [linux, darwin, windows] + arch: [amd64, arm64] + steps: + - name: Checkout code + if: ${{ !(contains(matrix.os, 'windows') && contains(matrix.arch, 'arm64')) }} + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + with: + fetch-depth: 0 + + - name: Download CLI artifacts + if: ${{ !(contains(matrix.os, 'windows') && contains(matrix.arch, 'arm64')) }} + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + with: + name: cli-artifacts + path: bin + + - name: Verify file + if: ${{ !(contains(matrix.os, 'windows') && contains(matrix.arch, 'arm64')) }} + run: | + ls -l bin + file bin/dirctl-${{ matrix.os }}-${{ matrix.arch }} + + - name: Upload Release Asset + if: ${{ !(contains(matrix.os, 'windows') && contains(matrix.arch, 'arm64')) }} + id: upload-release-asset + uses: actions/upload-release-asset@e8f9f06c4b078e705bd2ea027f0926603fc9b4d5 # v1.0.2 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ needs.release.outputs.upload_url }} + asset_path: bin/dirctl-${{ matrix.os }}-${{ matrix.arch }} + asset_name: dirctl-${{ matrix.os }}-${{ matrix.arch }} + asset_content_type: application/octet-stream diff --git a/.github/workflows/reusable-test-e2e.yaml b/.github/workflows/reusable-test-e2e.yaml index e4e4a2f12..3dfe5a9ce 100644 --- a/.github/workflows/reusable-test-e2e.yaml +++ b/.github/workflows/reusable-test-e2e.yaml @@ -1,130 +1,130 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -name: Test E2E - -on: - workflow_call: - inputs: - image_repo: - required: true - type: string - description: "Image repo to use." - image_tag: - required: true - type: string - description: "Image tag to use." - enable_coverage: - required: false - type: boolean - default: false - description: "Whether to collect and upload E2E coverage to Codecov." - secrets: - CODECOV_TOKEN: - description: "Codecov token for uploading coverage reports" - required: false - -jobs: - e2e: - name: Test ${{ matrix.label }} - runs-on: ubuntu-latest - strategy: - matrix: - include: - - task: test:e2e:local - label: "Local" - coverage-files: ".coverage/e2e/local-cli.out,.coverage/e2e/local-client.out,.coverage/e2e/server.out" - - task: test:e2e:network - label: "Network" - coverage-files: ".coverage/e2e/network.out,.coverage/e2e/server.out" - - task: test:e2e:spire - label: "Federation" - coverage-files: ".coverage/e2e/server.out" - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Login to ghcr.io - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 - with: - registry: ghcr.io - username: notused - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Setup Taskfile - shell: bash - run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin - - - name: Setup Cosign - uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0 - - - name: Setup Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 - with: - go-version: "1.25.2" - cache-dependency-path: "**/*.sum" - cache: true # NOTE: Default value, just to be explicit - - - name: Download artifacts - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 - with: - path: tmp/artifacts - merge-multiple: true - - - name: Download coverage image artifacts - if: ${{ inputs.enable_coverage }} - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 - with: - path: tmp/coverage-artifacts - pattern: "*-coverage" - merge-multiple: true - - - name: Load images to local Docker registry - run: | - for image_archive in tmp/artifacts/*.tar; do - docker load --input "$image_archive" - done - docker images - - - name: Load coverage images to local Docker registry - if: ${{ inputs.enable_coverage }} - run: | - for image_archive in tmp/coverage-artifacts/*.tar; do - docker load --input "$image_archive" - done - docker images - - - name: Load compiled dirctl binary - run: | - mkdir ./bin - cp tmp/artifacts/dirctl ./bin/dirctl - chmod +x ./bin/dirctl - - - name: Run end-to-end tests - env: - IMAGE_REPO: ${{ inputs.image_repo }} - IMAGE_TAG: ${{ inputs.image_tag }} - E2E_COMPILE_OUTPUT_DIR: tmp - E2E_COVERAGE_ENABLED: ${{ matrix.task != 'test:e2e:spire' && inputs.enable_coverage && 'true' || 'false' }} - run: | - # Use a random cluster name to avoid conflicts in concurrent runs. - # Kind cluster names must be lowercase and can contain only letters and numbers. - export KIND_CLUSTER_NAME=test-$(openssl rand -base64 12 | tr -dc 'a-z0-9') - task ${{ matrix.task }} - - - name: Process E2E coverage - if: ${{ inputs.enable_coverage && matrix.task != 'test:e2e:spire' }} - run: | - task test:e2e:coverage:process - - - name: Upload E2E coverage to Codecov - if: ${{ inputs.enable_coverage && matrix.task != 'test:e2e:spire' }} - uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1 - with: - files: ${{ matrix.coverage-files }} - flags: e2e-${{ matrix.label }} - fail_ci_if_error: false - verbose: true - token: ${{ secrets.CODECOV_TOKEN }} +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +name: Test E2E + +on: + workflow_call: + inputs: + image_repo: + required: true + type: string + description: "Image repo to use." + image_tag: + required: true + type: string + description: "Image tag to use." + enable_coverage: + required: false + type: boolean + default: false + description: "Whether to collect and upload E2E coverage to Codecov." + secrets: + CODECOV_TOKEN: + description: "Codecov token for uploading coverage reports" + required: false + +jobs: + e2e: + name: Test ${{ matrix.label }} + runs-on: ubuntu-latest + strategy: + matrix: + include: + - task: test:e2e:local + label: "Local" + coverage-files: ".coverage/e2e/local-cli.out,.coverage/e2e/local-client.out,.coverage/e2e/server.out" + - task: test:e2e:network + label: "Network" + coverage-files: ".coverage/e2e/network.out,.coverage/e2e/server.out" + - task: test:e2e:spire + label: "Federation" + coverage-files: ".coverage/e2e/server.out" + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Login to ghcr.io + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 + with: + registry: ghcr.io + username: notused + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup Taskfile + shell: bash + run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin + + - name: Setup Cosign + uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0 + + - name: Setup Go + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 + with: + go-version: "1.25.2" + cache-dependency-path: "**/*.sum" + cache: true # NOTE: Default value, just to be explicit + + - name: Download artifacts + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + with: + path: tmp/artifacts + merge-multiple: true + + - name: Download coverage image artifacts + if: ${{ inputs.enable_coverage }} + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + with: + path: tmp/coverage-artifacts + pattern: "*-coverage" + merge-multiple: true + + - name: Load images to local Docker registry + run: | + for image_archive in tmp/artifacts/*.tar; do + docker load --input "$image_archive" + done + docker images + + - name: Load coverage images to local Docker registry + if: ${{ inputs.enable_coverage }} + run: | + for image_archive in tmp/coverage-artifacts/*.tar; do + docker load --input "$image_archive" + done + docker images + + - name: Load compiled dirctl binary + run: | + mkdir ./bin + cp tmp/artifacts/dirctl ./bin/dirctl + chmod +x ./bin/dirctl + + - name: Run end-to-end tests + env: + IMAGE_REPO: ${{ inputs.image_repo }} + IMAGE_TAG: ${{ inputs.image_tag }} + E2E_COMPILE_OUTPUT_DIR: tmp + E2E_COVERAGE_ENABLED: ${{ matrix.task != 'test:e2e:spire' && inputs.enable_coverage && 'true' || 'false' }} + run: | + # Use a random cluster name to avoid conflicts in concurrent runs. + # Kind cluster names must be lowercase and can contain only letters and numbers. + export KIND_CLUSTER_NAME=test-$(openssl rand -base64 12 | tr -dc 'a-z0-9') + task ${{ matrix.task }} + + - name: Process E2E coverage + if: ${{ inputs.enable_coverage && matrix.task != 'test:e2e:spire' }} + run: | + task test:e2e:coverage:process + + - name: Upload E2E coverage to Codecov + if: ${{ inputs.enable_coverage && matrix.task != 'test:e2e:spire' }} + uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1 + with: + files: ${{ matrix.coverage-files }} + flags: e2e-${{ matrix.label }} + fail_ci_if_error: false + verbose: true + token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/reusable-test-sdk.yaml b/.github/workflows/reusable-test-sdk.yaml index 6bfd5a7a6..e75ec9e49 100644 --- a/.github/workflows/reusable-test-sdk.yaml +++ b/.github/workflows/reusable-test-sdk.yaml @@ -1,142 +1,142 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -name: Test SDK - -on: - workflow_call: - inputs: - image_repo: - required: true - type: string - description: "Image repo to use." - image_tag: - required: true - type: string - description: "Image tag to use." - -permissions: - id-token: write - contents: read - -jobs: - python: - name: Python - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Login to ghcr.io - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 - with: - registry: ghcr.io - username: notused - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Setup Taskfile - shell: bash - run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin - - - name: Download artifacts - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 - with: - path: tmp/artifacts - merge-multiple: true - - - name: Load images to local Docker registry - run: | - for image_archive in tmp/artifacts/*.tar; do - docker load --input "$image_archive" - done - docker images - - - name: Load compiled dirctl binary - run: | - mkdir ./bin - cp tmp/artifacts/dirctl ./bin/dirctl - chmod +x ./bin/dirctl - - - name: Add dependencies for SDKs testing - env: - CLIENT_ID: "https://github.com/${{ github.repository }}/.github/workflows/reusable-test-sdk.yaml@${{ github.ref }}" - run: | - TRY_SKIP_COMPILE=true task sdk:deps:common - task sdk:deps:cicd:iodc-token-generation >> $GITHUB_ENV - - - name: Test Python SDK - env: - IMAGE_REPO: ${{ inputs.image_repo }} - IMAGE_TAG: ${{ inputs.image_tag }} - run: | - task sdk:test:python - - javascript: - name: JavaScript - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Login to ghcr.io - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 - with: - registry: ghcr.io - username: notused - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Setup Taskfile - shell: bash - run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin - - - name: Setup Node env - uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0 - with: - node-version: "24.x" - registry-url: https://registry.npmjs.org/ - scope: "@agntcy" - - - name: Download artifacts - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 - with: - path: tmp/artifacts - merge-multiple: true - - - name: Load images to local Docker registry - run: | - for image_archive in tmp/artifacts/*.tar; do - docker load --input "$image_archive" - done - docker images - - - name: Load compiled dirctl binary - run: | - mkdir ./bin - cp tmp/artifacts/dirctl ./bin/dirctl - chmod +x ./bin/dirctl - - - name: Cache npm dependencies - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 - with: - path: "~/.npm" - key: node-${{ runner.os }}-${{ hashFiles('**/package-lock.json') }} - restore-keys: | - node-${{ runner.os }}- - - - name: Add dependencies for SDKs testing - env: - CLIENT_ID: "https://github.com/${{ github.repository }}/.github/workflows/reusable-test-sdk.yaml@${{ github.ref }}" - run: | - TRY_SKIP_COMPILE=true task sdk:deps:common - task sdk:deps:cicd:iodc-token-generation >> $GITHUB_ENV - - - name: Test JavaScript SDK - env: - IMAGE_REPO: ${{ inputs.image_repo }} - IMAGE_TAG: ${{ inputs.image_tag }} - run: | - task sdk:test:javascript +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +name: Test SDK + +on: + workflow_call: + inputs: + image_repo: + required: true + type: string + description: "Image repo to use." + image_tag: + required: true + type: string + description: "Image tag to use." + +permissions: + id-token: write + contents: read + +jobs: + python: + name: Python + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Login to ghcr.io + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 + with: + registry: ghcr.io + username: notused + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup Taskfile + shell: bash + run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin + + - name: Download artifacts + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + with: + path: tmp/artifacts + merge-multiple: true + + - name: Load images to local Docker registry + run: | + for image_archive in tmp/artifacts/*.tar; do + docker load --input "$image_archive" + done + docker images + + - name: Load compiled dirctl binary + run: | + mkdir ./bin + cp tmp/artifacts/dirctl ./bin/dirctl + chmod +x ./bin/dirctl + + - name: Add dependencies for SDKs testing + env: + CLIENT_ID: "https://github.com/${{ github.repository }}/.github/workflows/reusable-test-sdk.yaml@${{ github.ref }}" + run: | + TRY_SKIP_COMPILE=true task sdk:deps:common + task sdk:deps:cicd:iodc-token-generation >> $GITHUB_ENV + + - name: Test Python SDK + env: + IMAGE_REPO: ${{ inputs.image_repo }} + IMAGE_TAG: ${{ inputs.image_tag }} + run: | + task sdk:test:python + + javascript: + name: JavaScript + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Login to ghcr.io + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 + with: + registry: ghcr.io + username: notused + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup Taskfile + shell: bash + run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin + + - name: Setup Node env + uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0 + with: + node-version: "24.x" + registry-url: https://registry.npmjs.org/ + scope: "@agntcy" + + - name: Download artifacts + uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 + with: + path: tmp/artifacts + merge-multiple: true + + - name: Load images to local Docker registry + run: | + for image_archive in tmp/artifacts/*.tar; do + docker load --input "$image_archive" + done + docker images + + - name: Load compiled dirctl binary + run: | + mkdir ./bin + cp tmp/artifacts/dirctl ./bin/dirctl + chmod +x ./bin/dirctl + + - name: Cache npm dependencies + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0 + with: + path: "~/.npm" + key: node-${{ runner.os }}-${{ hashFiles('**/package-lock.json') }} + restore-keys: | + node-${{ runner.os }}- + + - name: Add dependencies for SDKs testing + env: + CLIENT_ID: "https://github.com/${{ github.repository }}/.github/workflows/reusable-test-sdk.yaml@${{ github.ref }}" + run: | + TRY_SKIP_COMPILE=true task sdk:deps:common + task sdk:deps:cicd:iodc-token-generation >> $GITHUB_ENV + + - name: Test JavaScript SDK + env: + IMAGE_REPO: ${{ inputs.image_repo }} + IMAGE_TAG: ${{ inputs.image_tag }} + run: | + task sdk:test:javascript diff --git a/.github/workflows/reusable-test.yaml b/.github/workflows/reusable-test.yaml index dfa5de70e..b88090ad3 100644 --- a/.github/workflows/reusable-test.yaml +++ b/.github/workflows/reusable-test.yaml @@ -1,78 +1,78 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -name: Test - -on: - workflow_call: - inputs: - image_repo: - required: true - type: string - description: "Image repo to use." - image_tag: - required: true - type: string - description: "Image tag to use." - secrets: - CODECOV_TOKEN: - description: "Codecov token for uploading coverage reports" - required: true - -permissions: - id-token: write - contents: read - pull-requests: write - -jobs: - unit: - name: Unit - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - with: - fetch-depth: 0 - - - name: Setup Taskfile - shell: bash - run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin - - - name: Setup Go - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 - with: - go-version: "1.25.2" - cache-dependency-path: "**/*.sum" - cache: true # NOTE: Default value, just to be explicit - - - name: Generate coverage report - run: | - task test:unit:coverage - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1 - with: - files: .coverage/unit/api.out,.coverage/unit/client.out,.coverage/unit/server.out,.coverage/unit/cli.out,.coverage/unit/importer.out,.coverage/unit/mcp.out,.coverage/unit/utils.out - flags: unit - verbose: true - token: ${{ secrets.CODECOV_TOKEN }} - - - name: Run unit tests - run: | - task test:unit - sdk: - name: SDK - uses: ./.github/workflows/reusable-test-sdk.yaml - with: - image_repo: ${{ inputs.image_repo }} - image_tag: ${{ inputs.image_tag }} - - e2e: - name: E2E - uses: ./.github/workflows/reusable-test-e2e.yaml - with: - image_repo: ${{ inputs.image_repo }} - image_tag: ${{ inputs.image_tag }} - enable_coverage: true - secrets: - CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +name: Test + +on: + workflow_call: + inputs: + image_repo: + required: true + type: string + description: "Image repo to use." + image_tag: + required: true + type: string + description: "Image tag to use." + secrets: + CODECOV_TOKEN: + description: "Codecov token for uploading coverage reports" + required: true + +permissions: + id-token: write + contents: read + pull-requests: write + +jobs: + unit: + name: Unit + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + fetch-depth: 0 + + - name: Setup Taskfile + shell: bash + run: sh -c "$(curl --location https://taskfile.dev/install.sh)" -- -d -b ~/.local/bin + + - name: Setup Go + uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 + with: + go-version: "1.25.2" + cache-dependency-path: "**/*.sum" + cache: true # NOTE: Default value, just to be explicit + + - name: Generate coverage report + run: | + task test:unit:coverage + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1 + with: + files: .coverage/unit/api.out,.coverage/unit/client.out,.coverage/unit/server.out,.coverage/unit/cli.out,.coverage/unit/importer.out,.coverage/unit/mcp.out,.coverage/unit/utils.out + flags: unit + verbose: true + token: ${{ secrets.CODECOV_TOKEN }} + + - name: Run unit tests + run: | + task test:unit + sdk: + name: SDK + uses: ./.github/workflows/reusable-test-sdk.yaml + with: + image_repo: ${{ inputs.image_repo }} + image_tag: ${{ inputs.image_tag }} + + e2e: + name: E2E + uses: ./.github/workflows/reusable-test-e2e.yaml + with: + image_repo: ${{ inputs.image_repo }} + image_tag: ${{ inputs.image_tag }} + enable_coverage: true + secrets: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/scripts/security/create_critical_cve_issues.js b/.github/workflows/scripts/security/create_critical_cve_issues.js index e01de45c1..93325ad34 100644 --- a/.github/workflows/scripts/security/create_critical_cve_issues.js +++ b/.github/workflows/scripts/security/create_critical_cve_issues.js @@ -1,97 +1,97 @@ -#!/usr/bin/env node -/** - * create_critical_cve_issues.js - * Scans downloaded Trivy SARIF artifacts and creates GitHub issues for unique CRITICAL CVEs. - */ -const fs = require('fs'); -const glob = require('glob'); -const { Octokit } = require('@octokit/rest'); - -function main() { - const token = process.env.GITHUB_TOKEN; - if (!token) { - console.error('GITHUB_TOKEN not set'); - process.exit(1); - } - const repoSlug = process.env.GITHUB_REPOSITORY; - if (!repoSlug) { - console.error('GITHUB_REPOSITORY not set'); - process.exit(1); - } - const [owner, repo] = repoSlug.split('/'); - const octokit = new Octokit({ auth: token }); - const sarifFiles = glob.sync('trivy-artifacts/*/trivy-*.sarif'); - if (sarifFiles.length === 0) { - console.log('No SARIF files found'); - return; - } - const criticalFindings = []; - for (const file of sarifFiles) { - try { - const sarif = JSON.parse(fs.readFileSync(file, 'utf8')); - for (const run of sarif.runs || []) { - const rules = (run.tool && run.tool.driver && run.tool.driver.rules) || []; - for (const result of run.results || []) { - const rule = rules[result.ruleIndex]; - if (!rule) continue; - const tags = ((rule.properties || {}).tags) || []; - if (tags.includes('CRITICAL')) { - const cve = rule.id || result.ruleId; - const level = result.level; - const message = (result.message && result.message.text) || ''; - const pkgMatch = message.match(/Package:\s([^\n]+)/); - const fixMatch = message.match(/Fixed Version:\s([^\n]+)/); - criticalFindings.push({ cve, level, package: pkgMatch ? pkgMatch[1] : 'unknown', fixed: fixMatch ? fixMatch[1] : 'unknown', file }); - } - } - } - } catch (e) { - console.error(`Failed to parse ${file}:`, e); - } - } - if (criticalFindings.length === 0) { - console.log('No critical CVEs detected'); - return; - } - const unique = Object.values(criticalFindings.reduce((acc, f) => { acc[f.cve] = acc[f.cve] || f; return acc; }, {})); - console.log(`Unique critical CVEs: ${unique.map(u => u.cve).join(', ')}`); - - octokit.paginate(octokit.issues.listForRepo, { owner, repo, state: 'open', per_page: 100 }) - .then(existing => { - const existingTitles = new Set(existing.map(i => i.title)); - return Promise.all(unique.map(finding => { - const title = `CRITICAL CVE ${finding.cve} in image scan`; - if (existingTitles.has(title)) { - console.log(`Issue already exists for ${finding.cve}`); - return null; - } - const body = [ - 'Automated security scan detected a CRITICAL vulnerability.', - '', - `CVE: ${finding.cve}`, - `Package: ${finding.package}`, - `Fixed Version: ${finding.fixed}`, - `Severity Level (SARIF level): ${finding.level}`, - `Source SARIF file: ${finding.file}`, - 'Generated by nightly container security scan workflow.', - '', - 'Action items:', - '- [ ] Assess exploitability for our deployment context', - '- [ ] Upgrade to fixed version or apply mitigation', - '- [ ] Verify remediation and close issue', - '', - 'This issue was created automatically. Edit as needed.' - ].join('\n'); - return octokit.issues.create({ owner, repo, title, body, labels: ['security', 'critical', 'cve'] }) - .then(r => console.log(`Created issue: ${r.data.html_url}`)); - })); - }) - .catch(err => { - console.error('Failed to list existing issues:', err); - process.exit(1); - }); -} - -if (require.main === module) { - main(); -} +#!/usr/bin/env node +/** + * create_critical_cve_issues.js + * Scans downloaded Trivy SARIF artifacts and creates GitHub issues for unique CRITICAL CVEs. + */ +const fs = require('fs'); +const glob = require('glob'); +const { Octokit } = require('@octokit/rest'); + +function main() { + const token = process.env.GITHUB_TOKEN; + if (!token) { + console.error('GITHUB_TOKEN not set'); + process.exit(1); + } + const repoSlug = process.env.GITHUB_REPOSITORY; + if (!repoSlug) { + console.error('GITHUB_REPOSITORY not set'); + process.exit(1); + } + const [owner, repo] = repoSlug.split('/'); + const octokit = new Octokit({ auth: token }); + const sarifFiles = glob.sync('trivy-artifacts/*/trivy-*.sarif'); + if (sarifFiles.length === 0) { + console.log('No SARIF files found'); + return; + } + const criticalFindings = []; + for (const file of sarifFiles) { + try { + const sarif = JSON.parse(fs.readFileSync(file, 'utf8')); + for (const run of sarif.runs || []) { + const rules = (run.tool && run.tool.driver && run.tool.driver.rules) || []; + for (const result of run.results || []) { + const rule = rules[result.ruleIndex]; + if (!rule) continue; + const tags = ((rule.properties || {}).tags) || []; + if (tags.includes('CRITICAL')) { + const cve = rule.id || result.ruleId; + const level = result.level; + const message = (result.message && result.message.text) || ''; + const pkgMatch = message.match(/Package:\s([^\n]+)/); + const fixMatch = message.match(/Fixed Version:\s([^\n]+)/); + criticalFindings.push({ cve, level, package: pkgMatch ? pkgMatch[1] : 'unknown', fixed: fixMatch ? fixMatch[1] : 'unknown', file }); + } + } + } + } catch (e) { + console.error(`Failed to parse ${file}:`, e); + } + } + if (criticalFindings.length === 0) { + console.log('No critical CVEs detected'); + return; + } + const unique = Object.values(criticalFindings.reduce((acc, f) => { acc[f.cve] = acc[f.cve] || f; return acc; }, {})); + console.log(`Unique critical CVEs: ${unique.map(u => u.cve).join(', ')}`); + + octokit.paginate(octokit.issues.listForRepo, { owner, repo, state: 'open', per_page: 100 }) + .then(existing => { + const existingTitles = new Set(existing.map(i => i.title)); + return Promise.all(unique.map(finding => { + const title = `CRITICAL CVE ${finding.cve} in image scan`; + if (existingTitles.has(title)) { + console.log(`Issue already exists for ${finding.cve}`); + return null; + } + const body = [ + 'Automated security scan detected a CRITICAL vulnerability.', + '', + `CVE: ${finding.cve}`, + `Package: ${finding.package}`, + `Fixed Version: ${finding.fixed}`, + `Severity Level (SARIF level): ${finding.level}`, + `Source SARIF file: ${finding.file}`, + 'Generated by nightly container security scan workflow.', + '', + 'Action items:', + '- [ ] Assess exploitability for our deployment context', + '- [ ] Upgrade to fixed version or apply mitigation', + '- [ ] Verify remediation and close issue', + '', + 'This issue was created automatically. Edit as needed.' + ].join('\n'); + return octokit.issues.create({ owner, repo, title, body, labels: ['security', 'critical', 'cve'] }) + .then(r => console.log(`Created issue: ${r.data.html_url}`)); + })); + }) + .catch(err => { + console.error('Failed to list existing issues:', err); + process.exit(1); + }); +} + +if (require.main === module) { + main(); +} diff --git a/.github/workflows/scripts/security/generate_trivy_summary.sh b/.github/workflows/scripts/security/generate_trivy_summary.sh index 5b81b5c9e..39ae6d943 100644 --- a/.github/workflows/scripts/security/generate_trivy_summary.sh +++ b/.github/workflows/scripts/security/generate_trivy_summary.sh @@ -1,33 +1,33 @@ -#!/usr/bin/env bash -set -euo pipefail -# generate_trivy_summary.sh -# Reads Trivy SARIF artifacts and appends a Markdown table to $GITHUB_STEP_SUMMARY. - -if [[ -z "${GITHUB_STEP_SUMMARY:-}" ]]; then - echo "GITHUB_STEP_SUMMARY not set; aborting." >&2 - exit 1 -fi - -echo '## Container Security Scan Summary' >> "$GITHUB_STEP_SUMMARY" -echo '' >> "$GITHUB_STEP_SUMMARY" - -files=$(ls trivy-artifacts/*/trivy-*.sarif 2>/dev/null || true) -if [[ -z "$files" ]]; then - echo 'No SARIF files found in artifacts (check previous job logs).' >> "$GITHUB_STEP_SUMMARY" - exit 0 -fi - -echo '| Image | Critical | High | Medium | Total | File |' >> "$GITHUB_STEP_SUMMARY" -echo '|-------|----------|------|--------|-------|------|' >> "$GITHUB_STEP_SUMMARY" - -for f in $files; do - img=$(basename "$f" .sarif | sed 's/^trivy-//') - critical=$(jq -r '.runs[] as $run | [ $run.results[] | select(($run.tool.driver.rules[.ruleIndex].properties.tags // []) | index("CRITICAL")) ] | length' "$f" 2>/dev/null || echo 0) - high=$(jq -r '.runs[] as $run | [ $run.results[] | select(($run.tool.driver.rules[.ruleIndex].properties.tags // []) | index("HIGH")) ] | length' "$f" 2>/dev/null || echo 0) - medium=$(jq -r '.runs[] as $run | [ $run.results[] | select(($run.tool.driver.rules[.ruleIndex].properties.tags // []) | index("MEDIUM")) ] | length' "$f" 2>/dev/null || echo 0) - total=$(jq -r '[.runs[].results[]] | length' "$f" 2>/dev/null || echo 0) - echo "| $img | $critical | $high | $medium | $total | $(basename "$f") |" >> "$GITHUB_STEP_SUMMARY" -done - -echo '' >> "$GITHUB_STEP_SUMMARY" -echo 'Severity counts derived from rule tags (CRITICAL/HIGH/MEDIUM) mapped via result.ruleIndex.' >> "$GITHUB_STEP_SUMMARY" +#!/usr/bin/env bash +set -euo pipefail +# generate_trivy_summary.sh +# Reads Trivy SARIF artifacts and appends a Markdown table to $GITHUB_STEP_SUMMARY. + +if [[ -z "${GITHUB_STEP_SUMMARY:-}" ]]; then + echo "GITHUB_STEP_SUMMARY not set; aborting." >&2 + exit 1 +fi + +echo '## Container Security Scan Summary' >> "$GITHUB_STEP_SUMMARY" +echo '' >> "$GITHUB_STEP_SUMMARY" + +files=$(ls trivy-artifacts/*/trivy-*.sarif 2>/dev/null || true) +if [[ -z "$files" ]]; then + echo 'No SARIF files found in artifacts (check previous job logs).' >> "$GITHUB_STEP_SUMMARY" + exit 0 +fi + +echo '| Image | Critical | High | Medium | Total | File |' >> "$GITHUB_STEP_SUMMARY" +echo '|-------|----------|------|--------|-------|------|' >> "$GITHUB_STEP_SUMMARY" + +for f in $files; do + img=$(basename "$f" .sarif | sed 's/^trivy-//') + critical=$(jq -r '.runs[] as $run | [ $run.results[] | select(($run.tool.driver.rules[.ruleIndex].properties.tags // []) | index("CRITICAL")) ] | length' "$f" 2>/dev/null || echo 0) + high=$(jq -r '.runs[] as $run | [ $run.results[] | select(($run.tool.driver.rules[.ruleIndex].properties.tags // []) | index("HIGH")) ] | length' "$f" 2>/dev/null || echo 0) + medium=$(jq -r '.runs[] as $run | [ $run.results[] | select(($run.tool.driver.rules[.ruleIndex].properties.tags // []) | index("MEDIUM")) ] | length' "$f" 2>/dev/null || echo 0) + total=$(jq -r '[.runs[].results[]] | length' "$f" 2>/dev/null || echo 0) + echo "| $img | $critical | $high | $medium | $total | $(basename "$f") |" >> "$GITHUB_STEP_SUMMARY" +done + +echo '' >> "$GITHUB_STEP_SUMMARY" +echo 'Severity counts derived from rule tags (CRITICAL/HIGH/MEDIUM) mapped via result.ruleIndex.' >> "$GITHUB_STEP_SUMMARY" diff --git a/.gitignore b/.gitignore index 2c7661167..25a17024d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,46 +1,46 @@ -# IDEs -.vscode/ -.idea/ -.continue/ - -# Secrets -.env -.envrc - -# Builds -bin/ - -# Python -__pycache__/ -*.egg-info - -# JS -node_modules - -# Helm -install/charts/dir/charts/ -install/charts/dirctl/charts/ - -# MacOS -.DS_Store - -# Test output -e2e/tmp/* - -# Coverage output -.coverage/ - -# Licensei -.licensei.cache - -tmp/ - -# AI -.cursor/ - -# Go workspace files -go.work -go.work.sum - -# MCP server binary -mcp/mcp-server +# IDEs +.vscode/ +.idea/ +.continue/ + +# Secrets +.env +.envrc + +# Builds +bin/ + +# Python +__pycache__/ +*.egg-info + +# JS +node_modules + +# Helm +install/charts/dir/charts/ +install/charts/dirctl/charts/ + +# MacOS +.DS_Store + +# Test output +e2e/tmp/* + +# Coverage output +.coverage/ + +# Licensei +.licensei.cache + +tmp/ + +# AI +.cursor/ + +# Go workspace files +go.work +go.work.sum + +# MCP server binary +mcp/mcp-server diff --git a/.golangci.yml b/.golangci.yml index c47c17aaa..27735b0da 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,65 +1,65 @@ -version: "2" -linters: - default: all - disable: - - depguard # Allow all package imports - - embeddedstructfieldcheck # Allow embedded fields in any position - - err113 - - exhaustruct # Allow structures with uninitialized fields - - funcorder # Allow flexible function ordering within structs - - funlen # Allow long functions - - godoclint # Allow flexible godoc comment styles - - gochecknoglobals # Allow global variables - - gochecknoinits # Allow init function - - godox # Allow TODOs - - inamedparam # Allow unnamed args - - ireturn # Allow returning with interfaces - - lll # Allow long lines - - noinlineerr # Allow inline error handling - - nolintlint # Allow nolint - - paralleltest # Allow missing t.Parallel() in tests - - revive # Allow flexible code style conventions - - tagliatelle # Allow json(camel) - - testpackage # Allow not having a test package - - varnamelen # Allow short var names - - wsl # Deprecated - settings: - cyclop: - max-complexity: 15 - goheader: - values: - regexp: - YEAR: 202[4-5] - template: |- - Copyright AGNTCY Contributors (https://github.com/agntcy) - SPDX-License-Identifier: Apache-2.0 - gomoddirectives: - replace-allow-list: [] - replace-local: true - exclusions: - generated: lax - presets: - - comments - - common-false-positives - - legacy - - std-error-handling - rules: - - linters: - - dupl - path: types/adapters/record_oasfv.*\.go - paths: - - third_party$ - - builtin$ - - examples$ -formatters: - enable: - - gci - - gofmt - - gofumpt - - goimports - exclusions: - generated: lax - paths: - - third_party$ - - builtin$ - - examples$ +version: "2" +linters: + default: all + disable: + - depguard # Allow all package imports + - embeddedstructfieldcheck # Allow embedded fields in any position + - err113 + - exhaustruct # Allow structures with uninitialized fields + - funcorder # Allow flexible function ordering within structs + - funlen # Allow long functions + - godoclint # Allow flexible godoc comment styles + - gochecknoglobals # Allow global variables + - gochecknoinits # Allow init function + - godox # Allow TODOs + - inamedparam # Allow unnamed args + - ireturn # Allow returning with interfaces + - lll # Allow long lines + - noinlineerr # Allow inline error handling + - nolintlint # Allow nolint + - paralleltest # Allow missing t.Parallel() in tests + - revive # Allow flexible code style conventions + - tagliatelle # Allow json(camel) + - testpackage # Allow not having a test package + - varnamelen # Allow short var names + - wsl # Deprecated + settings: + cyclop: + max-complexity: 15 + goheader: + values: + regexp: + YEAR: 202[4-5] + template: |- + Copyright AGNTCY Contributors (https://github.com/agntcy) + SPDX-License-Identifier: Apache-2.0 + gomoddirectives: + replace-allow-list: [] + replace-local: true + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - linters: + - dupl + path: types/adapters/record_oasfv.*\.go + paths: + - third_party$ + - builtin$ + - examples$ +formatters: + enable: + - gci + - gofmt + - gofumpt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/.licensei.toml b/.licensei.toml index c9c0901ca..527c324d3 100644 --- a/.licensei.toml +++ b/.licensei.toml @@ -1,38 +1,38 @@ -approved = [ - "mit", - "apache-2.0", - "bsd-3-clause", - "bsd-2-clause", - "mpl-2.0", - "Apache-2.0", - "Apache-2.0 WITH LLVM-exception", - "BSD-2-Clause", - "BSD-3-Clause", - "ISC", - "MIT", - "OpenSSL", - "CC0-1.0", -] - -ignored = [ - "github.com/agntcy/dir/api", - "buf.build/gen/go/agntcy/oasf/protocolbuffers/go", - - "google.golang.org/protobuf", # BSD-3-Clause - "github.com/xi2/xz", # Public Domain https://github.com/xi2/xz/blob/master/LICENSE - "github.com/libp2p/go-libp2p-gorpc", # MIT https://github.com/libp2p/go-libp2p-gorpc/blob/master/LICENSE-MIT - "github.com/libp2p/go-libp2p-pubsub", # MIT https://github.com/libp2p/go-libp2p-pubsub/blob/master/LICENSE-MIT - "go.mongodb.org/mongo-driver", # Apache License 2.0 https://github.com/mongodb/mongo-go-driver/blob/master/LICENSE - "modernc.org/sqlite", # BSD-3-Clause https://gitlab.com/cznic/sqlite/-/blob/master/LICENSE - "modernc.org/libc", # BSD-3-Clause https://gitlab.com/cznic/libc/-/blob/master/LICENSE - "modernc.org/mathutil", # BSD-3-Clausehttps://gitlab.com/cznic/mathutil/-/blob/master/LICENSE - "modernc.org/memory", # BSD-3-Clause https://gitlab.com/cznic/memory/-/blob/master/LICENSE - "gitlab.com/gitlab-org/api/client-go", # Apache License 2.0 https://gitlab.com/gitlab-org/api/client-go/-/blob/main/LICENSE - "github.com/xeipuuv/gojsonpointer", # Apache License 2.0 https://github.com/xeipuuv/gojsonpointer/blob/master/LICENSE-APACHE-2.0.txt - "github.com/xeipuuv/gojsonreference", # Apache License 2.0 https://github.com/xeipuuv/gojsonreference/blob/master/LICENSE-APACHE-2.0.txt - "github.com/xeipuuv/gojsonschema", # Apache License 2.0 https://github.com/xeipuuv/gojsonschema/blob/master/LICENSE-APACHE-2.0.txt - "buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go", # Apache License 2.0 https://github.com/agntcy/oasf-sdk/blob/main/LICENSE.md - "buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go", # Generated protobuf code, no license file - "github.com/grpc-ecosystem/go-grpc-middleware/v2", # Apache License 2.0 https://github.com/grpc-ecosystem/go-grpc-middleware/blob/main/LICENSE - "gonum.org/v1/gonum", # BSD-3-Clause https://github.com/gonum/gonum/blob/master/LICENSE -] +approved = [ + "mit", + "apache-2.0", + "bsd-3-clause", + "bsd-2-clause", + "mpl-2.0", + "Apache-2.0", + "Apache-2.0 WITH LLVM-exception", + "BSD-2-Clause", + "BSD-3-Clause", + "ISC", + "MIT", + "OpenSSL", + "CC0-1.0", +] + +ignored = [ + "github.com/agntcy/dir/api", + "buf.build/gen/go/agntcy/oasf/protocolbuffers/go", + + "google.golang.org/protobuf", # BSD-3-Clause + "github.com/xi2/xz", # Public Domain https://github.com/xi2/xz/blob/master/LICENSE + "github.com/libp2p/go-libp2p-gorpc", # MIT https://github.com/libp2p/go-libp2p-gorpc/blob/master/LICENSE-MIT + "github.com/libp2p/go-libp2p-pubsub", # MIT https://github.com/libp2p/go-libp2p-pubsub/blob/master/LICENSE-MIT + "go.mongodb.org/mongo-driver", # Apache License 2.0 https://github.com/mongodb/mongo-go-driver/blob/master/LICENSE + "modernc.org/sqlite", # BSD-3-Clause https://gitlab.com/cznic/sqlite/-/blob/master/LICENSE + "modernc.org/libc", # BSD-3-Clause https://gitlab.com/cznic/libc/-/blob/master/LICENSE + "modernc.org/mathutil", # BSD-3-Clausehttps://gitlab.com/cznic/mathutil/-/blob/master/LICENSE + "modernc.org/memory", # BSD-3-Clause https://gitlab.com/cznic/memory/-/blob/master/LICENSE + "gitlab.com/gitlab-org/api/client-go", # Apache License 2.0 https://gitlab.com/gitlab-org/api/client-go/-/blob/main/LICENSE + "github.com/xeipuuv/gojsonpointer", # Apache License 2.0 https://github.com/xeipuuv/gojsonpointer/blob/master/LICENSE-APACHE-2.0.txt + "github.com/xeipuuv/gojsonreference", # Apache License 2.0 https://github.com/xeipuuv/gojsonreference/blob/master/LICENSE-APACHE-2.0.txt + "github.com/xeipuuv/gojsonschema", # Apache License 2.0 https://github.com/xeipuuv/gojsonschema/blob/master/LICENSE-APACHE-2.0.txt + "buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go", # Apache License 2.0 https://github.com/agntcy/oasf-sdk/blob/main/LICENSE.md + "buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go", # Generated protobuf code, no license file + "github.com/grpc-ecosystem/go-grpc-middleware/v2", # Apache License 2.0 https://github.com/grpc-ecosystem/go-grpc-middleware/blob/main/LICENSE + "gonum.org/v1/gonum", # BSD-3-Clause https://github.com/gonum/gonum/blob/master/LICENSE +] diff --git a/API_SPEC.md b/API_SPEC.md index e98ec959d..0fcd2dd80 100644 --- a/API_SPEC.md +++ b/API_SPEC.md @@ -1,23 +1,23 @@ -# API Specification - -This document describes Directory API interfaces and usage scenarios. -The API specification is defined and exposed via gRPC services. -All code snippets below are tested against the Directory `v0.2.0` release. - -## Models - -Defines all objects used to define schema and API specification. - -It is defined in [api/proto/core/v1](api/proto/core/v1). - -## Storage API - -This API is responsible for managing content-addressable object storage operations. - -It is defined in [api/proto/store/v1/store_service.proto](api/proto/store/v1/store_service.proto). - -## Routing API - -This API is responsible for managing peer and content routing data. - -It is defined in [api/proto/routing/v1/routing_service.proto](api/proto/routing/v1/routing_service.proto). +# API Specification + +This document describes Directory API interfaces and usage scenarios. +The API specification is defined and exposed via gRPC services. +All code snippets below are tested against the Directory `v0.2.0` release. + +## Models + +Defines all objects used to define schema and API specification. + +It is defined in [api/proto/core/v1](api/proto/core/v1). + +## Storage API + +This API is responsible for managing content-addressable object storage operations. + +It is defined in [api/proto/store/v1/store_service.proto](api/proto/store/v1/store_service.proto). + +## Routing API + +This API is responsible for managing peer and content routing data. + +It is defined in [api/proto/routing/v1/routing_service.proto](api/proto/routing/v1/routing_service.proto). diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f51cc3f1..427fc2377 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,856 +1,856 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [v0.6.0] - 2025-12-19 - -### Key Highlights - -This release consolidates improvements from v0.5.1 through v0.5.7, focusing on operational -reliability, integration enhancements, and cross-registry support, including: - -**Tooling & Integration** -- Enhanced local search implementation with wildcard support -- Configurable server-side OASF validation with auto-deployment support -- Extended MCP tools for record enrichment and import/export workflows -- Domain-based enrichment capabilities for importer service -- Support across different OCI Registry storage backends - -**Observability & Operations** -- Enhanced SPIRE support for reliability and multi-SPIRE deployments -- Prometheus metrics with ServiceMonitor and gRPC interceptors - -### Compatibility Matrix - -| Component | Version | Compatible With | -| ---------------------- | ------- | --------------------------- | -| **dir-apiserver** | v0.6.0 | oasf v0.3.x, v0.7.x, v0.8.x | -| **dirctl** | v0.6.0 | dir-apiserver >= v0.5.0 | -| **dir-go** | v0.6.0 | dir-apiserver >= v0.5.0 | -| **dir-py** | v0.6.0 | dir-apiserver >= v0.5.0 | -| **dir-js** | v0.6.0 | dir-apiserver >= v0.5.0 | -| **helm-charts/dir** | v0.6.0 | dir-apiserver >= v0.5.0 | -| **helm-charts/dirctl** | v0.6.0 | dirctl >= v0.5.0 | - -### Added -- **SPIRE**: SPIFFE CSI driver support for reliable identity injection (#724) -- **SPIRE**: Automatic writable home directory when `readOnlyRootFilesystem` is enabled (#724) -- **SPIRE**: ClusterSPIFFEID className field for proper workload registration (#774) -- **Helm**: External Secrets Operator integration for credential management (#691) -- **Helm**: DNS name templates for SPIRE ClusterSPIFFEID (#681) -- **Helm**: SQLite PVC configuration support (#713) -- **Helm**: OASF configuration for API validation (#769) -- **Helm**: Recreate deployment strategy to prevent PVC lock conflicts (#720) -- **Observability**: Prometheus metrics with ServiceMonitor and gRPC interceptors (#757) -- **Security**: Container security scan workflow with automated version detection (#746) -- **SDK**: Additional gRPC service methods in Python and JavaScript SDKs (#709) -- **MCP**: Import/export tools and prompts for workflow automation (#705) -- **MCP**: OASF schema exploration tools for enricher workflow (#680) -- **Importer**: Domain-based record enrichment (#696) -- **Importer**: Deduplication checker prioritization (#743) -- **Validation**: Server-side OASF API validation (#711) -- **CI/CD**: Feature branch build workflows for images and charts (#739) - -### Changed -- **Search**: Refactored local search implementation (#747) -- **Search**: Removed search subcommands for simplified CLI (#759) -- **Importer**: Migration to oasf-sdk/translator (#624) -- **Configuration**: Updated OASF validation environment variables (#754) -- **Toolchain**: Disabled Go workspace mode (#732) -- **Dependencies**: Bump golang.org/x/crypto to v0.45.0 (#744) -- **Dependencies**: Bump js-yaml to v4.1.1 (#745) -- **Dependencies**: Update zot and fix CodeQL warnings (#761) -- **Dependencies**: Bump github.com/sigstore/cosign (#773) - -### Fixed -- **SPIRE**: X.509-SVID retry logic for timing issues in short-lived workloads (#735, #741) -- **SPIRE**: "certificate contains no URI SAN" errors in CronJobs (#724) -- **Helm**: Add `/tmp` volume when rootfs is read-only (#718) -- **CI**: Prevent disk space issues in runners (#749) -- **CI**: Avoid PR label duplication (#755) -- **CI**: Fix unit test execution (#733) -- **CI**: Fix reusable build workflow component tag handling (#742) -- **CI**: Add SPIRE task cleanup with sudo (#734) -- **CI**: Helm linting integration (#780) -- **Brew**: Formula updater process after public release (#686) - -### Removed -- **Cleanup**: Outdated components and unused code (#783) - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.5.0...v0.6.0) - -## [v0.5.7] - 2025-12-12 - -### Key Highlights - -This patch release fixes a critical SPIRE integration bug and adds OASF configuration support: -- **Critical Fix**: Added mandatory `className` field to ClusterSPIFFEID resources to ensure proper SPIRE workload registration -- **New Feature**: OASF configuration support in Helm chart for API validation -- **Dependencies**: Updated cosign and zot dependencies - -### Compatibility Matrix - -| Component | Version | Compatible With | -| ---------------------- | ------- | --------------------------- | -| **dir-apiserver** | v0.5.7 | oasf v0.3.x, v0.7.x, v0.8.x | -| **dirctl** | v0.5.7 | dir-apiserver >= v0.5.0 | -| **dir-go** | v0.5.7 | dir-apiserver >= v0.5.0 | -| **dir-py** | v0.5.7 | dir-apiserver >= v0.5.0 | -| **dir-js** | v0.5.7 | dir-apiserver >= v0.5.0 | -| **helm-charts/dir** | v0.5.7 | dir-apiserver >= v0.5.0 | -| **helm-charts/dirctl** | v0.5.7 | dirctl >= v0.5.0 | - -### Added -- **Helm**: OASF configuration support for API validation settings (#769) -- **Helm**: OASF server deployment option with directory (#769) - -### Changed -- **Dependencies**: Bump github.com/sigstore/cosign dependency (#773) -- **Dependencies**: Update zot dependency (#761) - -### Fixed -- **Helm**: Add mandatory `className` field to ClusterSPIFFEID resources in both `apiserver` and `dirctl` charts (#774) -- **CI**: Pin CodeQL analyze action version to match init version (#774) - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.5.6...v0.5.7) - ---- - -## [v0.5.6] - 2025-12-05 - -### Key Highlights - -This patch release adds observability improvements, refactors search functionality, and updates configuration validation. - -### Compatibility Matrix - -| Component | Version | Compatible With | -| ---------------------- | ------- | --------------------------- | -| **dir-apiserver** | v0.5.6 | oasf v0.3.x, v0.7.x, v0.8.x | -| **dirctl** | v0.5.6 | dir-apiserver >= v0.5.0 | -| **dir-go** | v0.5.6 | dir-apiserver >= v0.5.0 | -| **dir-py** | v0.5.6 | dir-apiserver >= v0.5.0 | -| **dir-js** | v0.5.6 | dir-apiserver >= v0.5.0 | -| **helm-charts/dir** | v0.5.6 | dir-apiserver >= v0.5.0 | -| **helm-charts/dirctl** | v0.5.6 | dirctl >= v0.5.0 | - -### Added -- **Observability**: Prometheus metrics support with ServiceMonitor and gRPC interceptors (#757) -- **CLI**: Add `--format` flag to search command for output control (#747, #759) - -### Changed -- **Refactor**: Improve local search implementation and testing (#747) -- **Configuration**: Update OASF API validation environment variables (#754) - -### Fixed -- **CI**: Avoid PR label duplication (#755) -- **CI**: Prevent disk space issues in CI runners (#749) - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.5.5...v0.5.6) - ---- - -## [v0.5.5] - 2025-12-01 - -### Key Highlights - -This patch release refactors SPIFFE X.509-SVID retry logic into a shared package for better code organization and maintainability. - -### Compatibility Matrix - -| Component | Version | Compatible With | -| ---------------------- | ------- | --------------------------- | -| **dir-apiserver** | v0.5.5 | oasf v0.3.x, v0.7.x, v0.8.x | -| **dirctl** | v0.5.5 | dir-apiserver >= v0.5.0 | -| **dir-go** | v0.5.5 | dir-apiserver >= v0.5.0 | -| **dir-py** | v0.5.5 | dir-apiserver >= v0.5.0 | -| **dir-js** | v0.5.5 | dir-apiserver >= v0.5.0 | -| **helm-charts/dir** | v0.5.5 | dir-apiserver >= v0.5.0 | -| **helm-charts/dirctl** | v0.5.5 | dirctl >= v0.5.0 | - -### Changed -- **Refactor**: Centralize X.509-SVID retry logic in `utils/spiffe` package (#741) - -### Fixed -- **Code Quality**: Remove duplicate retry constants and improve code organization (#741) - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.5.4...v0.5.5) - ---- - -## [v0.5.4] - 2025-11-26 - -### Key Highlights - -This patch release improves SPIFFE authentication reliability for short-lived workloads: -- Client-side retry logic with exponential backoff for X509-SVID fetching -- Handles SPIRE agent sync delays in CronJobs and ephemeral pods -- Prevents "certificate contains no URI SAN" authentication failures - -### Compatibility Matrix - -| Component | Version | Compatible With | -| ---------------------- | ------- | --------------------------- | -| **dir-apiserver** | v0.5.4 | oasf v0.3.x, v0.7.x, v0.8.x | -| **dirctl** | v0.5.4 | dir-apiserver >= v0.5.0 | -| **dir-go** | v0.5.4 | dir-apiserver >= v0.5.0 | -| **dir-py** | v0.5.4 | dir-apiserver >= v0.5.0 | -| **dir-js** | v0.5.4 | dir-apiserver >= v0.5.0 | -| **helm-charts/dir** | v0.5.4 | dir-apiserver >= v0.5.0 | -| **helm-charts/dirctl** | v0.5.4 | dirctl >= v0.5.0 | - -### Fixed -- **Client**: X509-SVID retry logic with exponential backoff to handle SPIRE agent sync delays (#725) -- **Client**: "certificate contains no URI SAN" errors in CronJobs and short-lived workloads (#725) - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.5.3...v0.5.4) - ---- - -## [v0.5.3] - 2025-11-25 - -### Key Highlights - -This patch release improves SPIFFE identity injection reliability and chart security: -- SPIFFE CSI driver support eliminates authentication race conditions -- Automatic writable home directory for security-hardened deployments -- Read-only SPIRE socket mounts for enhanced security - -### Compatibility Matrix - -| Component | Version | Compatible With | -| ---------------------- | ------- | --------------------------- | -| **dir-apiserver** | v0.5.3 | oasf v0.3.x, v0.7.x, v0.8.x | -| **dirctl** | v0.5.3 | dir-apiserver >= v0.5.0 | -| **dir-go** | v0.5.3 | dir-apiserver >= v0.5.0 | -| **dir-py** | v0.5.3 | dir-apiserver >= v0.5.0 | -| **dir-js** | v0.5.3 | dir-apiserver >= v0.5.0 | -| **helm-charts/dir** | v0.5.3 | dir-apiserver >= v0.5.0 | -| **helm-charts/dirctl** | v0.5.3 | dirctl >= v0.5.0 | - -### Added -- **Helm**: SPIFFE CSI driver support with `spire.useCSIDriver` flag for both charts (#724) -- **Helm**: Automatic `home-dir` emptyDir when `readOnlyRootFilesystem` is enabled (#724) - -### Changed -- **Helm**: Default to SPIFFE CSI driver for production reliability (#724) - -### Fixed -- **Helm**: "certificate contains no URI SAN" authentication failures in CronJobs (#724) -- **Helm**: SPIRE socket mounts now use `readOnly: true` for security (#724) -- **Helm**: "read-only file system" warnings when creating config files (#724) - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.5.2...v0.5.3) - ---- - -## [v0.5.2] - 2025-11-23 - -### Key Highlights - -This release focuses on storage improvements, SDK enhancements, MCP tooling, and operational stability, including: - -**Storage & Deployment Improvements** -- SQLite PVC configuration support for persistent storage in Kubernetes deployments -- Recreate deployment strategy to prevent database lock conflicts during rolling updates -- Automatic `/tmp` emptyDir mount when `readOnlyRootFilesystem` is enabled for security hardening -- Fixes compatibility issue between SQLite temp files and read-only root filesystem -- Enhanced unit test coverage and stability - -**SDK Enhancements** -- Added Events (listen) gRPC client to Python and JavaScript SDKs -- Added Publication gRPC client to Python and JavaScript SDKs -- Comprehensive test coverage for new SDK methods - -**MCP Enhancements** -- Import/export tools for record management workflows -- Domain enrichment capabilities for importer - -**CI/CD Improvements** -- Brew formula updater process improvements -- Better release automation and publication workflow - -### Compatibility Matrix - -| Component | Version | Compatible With | -| ---------------------- | ------- | --------------------------- | -| **dir-apiserver** | v0.5.2 | oasf v0.3.x, v0.7.x, v0.8.x | -| **dirctl** | v0.5.2 | dir-apiserver >= v0.5.0 | -| **dir-go** | v0.5.2 | dir-apiserver >= v0.5.0 | -| **dir-py** | v0.5.2 | dir-apiserver >= v0.5.0 | -| **dir-js** | v0.5.2 | dir-apiserver >= v0.5.0 | -| **helm-charts/dir** | v0.5.2 | dir-apiserver >= v0.5.0 | -| **helm-charts/dirctl** | v0.5.2 | dirctl >= v0.5.0 | - -### Added -- **Storage**: PVC configuration support for SQLite (#713) -- **Helm**: Deployment strategy configuration to prevent PVC lock conflicts (#720) -- **SDK**: Events (listen) gRPC client for Python and JavaScript SDKs (#709) -- **SDK**: Publication gRPC client for Python and JavaScript SDKs (#709) -- **MCP**: Import/export tools and prompts for record workflows (#705) -- **Importer**: Domain enrichment capabilities (#696) - -### Changed -- **CI**: Update brew formula version (#702) - -### Fixed -- **Helm**: Add `/tmp` emptyDir mount when `readOnlyRootFilesystem` is enabled (#718) -- **CI**: Brew formula updater to work after release is public (#686) -- **Testing**: Fix unit tests for storage components (#713) - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.5.1...v0.5.2) - ---- - -## [v0.5.1] - 2025-11-17 - -### Key Highlights - -This release focuses on operational improvements and deployment enhancements, including: - -**Helm & Deployment Improvements** -- External Secrets Operator integration for secure credential management -- SPIRE ClusterSPIFFEID DNS name templates support for external access -- Improved TLS certificate SAN configuration for production deployments - -**MCP Enhancements** -- OASF schema exploration tools for AI-assisted record enrichment -- Hierarchical domain and skill navigation capabilities - -**Dependencies & Stability** -- OASF SDK upgrade to v0.0.11 with latest schema improvements -- SDK testing fixes for X.509 authentication mode - -### Compatibility Matrix - -| Component | Version | Compatible With | -| ---------------------- | ------- | --------------------------- | -| **dir-apiserver** | v0.5.1 | oasf v0.3.x, v0.7.x, v0.8.x | -| **dirctl** | v0.5.1 | dir-apiserver >= v0.5.0 | -| **dir-go** | v0.5.1 | dir-apiserver >= v0.5.0 | -| **dir-py** | v0.5.1 | dir-apiserver >= v0.5.0 | -| **dir-js** | v0.5.1 | dir-apiserver >= v0.5.0 | -| **helm-charts/dir** | v0.5.1 | dir-apiserver >= v0.5.0 | -| **helm-charts/dirctl** | v0.5.1 | dirctl >= v0.5.0 | - -### Added -- **Helm**: External Secrets Operator integration for credential management (#691) -- **Helm**: DNS name templates support for SPIRE ClusterSPIFFEID (#681) -- **MCP**: OASF schema domain and skill exploration tools for enricher workflow (#680) -- **Importer**: OASF SDK translator for MCP Registry conversion with deduplication and debug diagnostics (#624) - -### Changed -- **Dependencies**: Bump OASF SDK to v0.0.11 (#679) -- **CI**: Update upload-artifacts version (#682) -- **CI**: Update brew formula version (#684) -- **Docs**: Update readme versions (#685) - -### Fixed -- **SDK**: Use X.509 auth mode for testing (#678) - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.5.0...v0.5.1) - ---- - -## [v0.5.0] - 2025-11-12 - -### Key Highlights - -This release focuses on extending API functionalities, improving -operational reliability, strengthening security capabilities, and -adding MCP (Model Context Protocol) integrations support, including: - -**MCP Integrations** -- MCP registry importer for automated OASF record ingestion -- MCP server implementation with OASF and Directory tools -- Added support for MCP server announce and discovery via DHT - -**API & Client Improvements** -- Event listener RPC for real-time updates across services -- gRPC connection management and streaming enhancements -- Rate limiting at application layer for improved stability -- Health checks migrated from HTTP to gRPC - -**Security & Reliability** -- Simplified TLS-based authentication support for SDKs -- Panic recovery middleware and structured logging for gRPC -- Critical resource leak fixes and improved context handling -- Enhanced security scanning with CodeQL workflows - -**Developer Experience** -- MCP tooling for easy record management and API access -- LLM-based enrichment for OASF records -- Simplified SDK integration in secure environments -- Unified CLI output formats with --output flag and JSONL support - -### Compatibility Matrix - -| Component | Version | Compatible With | -| ---------------------- | ------- | --------------------------- | -| **dir-apiserver** | v0.5.0 | oasf v0.3.x, v0.7.x, v0.8.x | -| **dirctl** | v0.5.0 | dir-apiserver >= v0.5.0 | -| **dir-go** | v0.5.0 | dir-apiserver >= v0.5.0 | -| **dir-py** | v0.5.0 | dir-apiserver >= v0.5.0 | -| **dir-js** | v0.5.0 | dir-apiserver >= v0.5.0 | -| **helm-charts/dir** | v0.5.0 | dir-apiserver >= v0.5.0 | -| **helm-charts/dirctl** | v0.5.0 | dirctl >= v0.5.0 | - -### Added -- **MCP**: Add tools for record management and API operations (#465, #574, #619, #611, #650, #660) -- **MCP**: Registry importer for automated record ingestion (#544, #568) -- **MCP**: LLM-based record enrichment for OASF records (#646) -- **API**: Event listener RPC exposure (#537) -- **API**: Structured gRPC request/response logging interceptor (#566) -- **API**: Panic recovery middleware for gRPC handlers (#573) -- **API**: Application-layer rate limiting for gRPC (#593) -- **API**: Readiness checks for apiserver services (#582) -- **API**: gRPC connection management (#647) -- **Security**: Container security scanning workflow (#547) -- **Security**: CodeQL security workflows (#584) -- **Security**: TLS token-based authentication for Go client/CLI (#606) -- **Helm**: Routing service deployment configuration (#599) -- **Helm**: Extra environment variables support in dir chart (#605) -- **Helm**: Zot configuration and authentication (#576) -- **CLI**: Unified output formats with --output flag and JSONL support (#587) -- **CLI**: MCP dirctl subcommand (#660) -- **Testing**: Unit test coverage for all Go modules (#555) -- **Testing**: E2E test coverage (#591) - -### Changed -- **API**: Push/pull API refactoring and improvements (#585, #595) -- **Storage**: Store Capability Interfaces refactoring (#562) -- **SDK**: Migration to local generated proto stubs (#569, #588) -- **CLI**: Hub sign/verify commands restoration (#612) -- **Helm**: Ingress deployment fixes (#600, #601) -- **Security**: Security fixes and dependency updates (#602) -- **Health**: Health checks migrated from HTTP to gRPC (#597) -- **Dependencies**: Bump OASF SDK to v0.0.8 and v0.0.9 (#603, #640) -- **Dependencies**: Bump Zot to latest version (#578, #579) -- **Dependencies**: Set SPIRE version in taskfile (#583) -- **CI**: Update container tags for security scans (#558) -- **CI**: Tag for SDK/JS package releases (#617, #621) - -### Fixed -- **Client**: Critical resource leaks and context handling issues (#577) -- **Client**: Push stream hanging with multiple errors (#644) -- **Security**: JWT auth test and authentication fixes (#545) -- **API**: Rate limit E2E tests (#598) -- **API**: Hub API updates (#595) -- **API**: MCP search limit handling (#623) -- **Storage**: OCI E2E concurrent issues and healthcheck service (#620) -- **SDK**: SPIFFE sign test (#592) -- **SDK**: Add proto stubs to repository (#588) -- **SDK**: JS package release prefix for RC tags (#621, #625) -- **CLI**: Sign and verify options cleanup (#673) -- **CLI**: API key methods improvements (#659) - -### Removed -- **MCP**: Remove dedicated MCP artifacts (#663) - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.4.0...v0.5.0) - ---- - -## [v0.4.0] - 2025-10-15 - -### Key Highlights - -This release delivers improvements to application layer via generic referrers, -security schema with JWT/X.509 support, network capabilities, operational stability, -and developer experience, with a focus on: - -**Security & Authentication** -- Added JWT/X.509 over TLS versus mTLS for flexible authentication -- Authenticated PeerID integration with libp2p transport -- Secure credential management for zot sync operations -- OCI 1.1 referrers specification migration for signature attachments - -**Networking & Connectivity** -- GossipSub implementation for efficient label announcements -- Connection Manager implementation with removal of custom peer discovery -- Improved routing search with better peer address handling -- Locator-based record search capabilities across the network - -**Generic Record Referrers** -- Support for storage and handling of generic record referrers -- Referrer object encoding across server and SDK components -- Application support via referrer (e.g., signatures, attestations) - -**Developer Experience & Tooling** -- Streaming package for Golang gRPC client functionality -- Standardized CLI output formats and command improvements -- Reusable setup-dirctl GitHub Action for CI/CD workflows - -**Quality & Stability Improvements** -- Comprehensive test suite enhancements including SPIRE tests -- E2E network test stability with environment warm-up phases -- Bug fixes for API key formatting, file permissions, and documentation - -### Compatibility Matrix - -| Component | Version | Compatible With | -| ---------------------- | ------- | -------------------------------------------- | -| **dir-apiserver** | v0.4.0 | oasf v0.3.x, v0.7.x | -| **dirctl** | v0.4.0 | dir-apiserver >= v0.3.0, oasf v0.3.x, v0.7.x | -| **dir-go** | v0.4.0 | dir-apiserver >= v0.3.0, oasf v0.3.x, v0.7.x | -| **dir-py** | v0.4.0 | dir-apiserver >= v0.3.0, oasf v0.3.x, v0.7.x | -| **dir-js** | v0.4.0 | dir-apiserver >= v0.3.0, oasf v0.3.x, v0.7.x | -| **helm-charts/dir** | v0.4.0 | dir-apiserver >= v0.3.0 | -| **helm-charts/dirctl** | v0.4.0 | dirctl >= v0.3.0 | - -### Added -- **Networking**: GossipSub for efficient label announcements (#472) -- **Networking**: AutoRelay, Hole Punching, and mDNS for better peer connectivity (#503) -- **Networking**: Connection Manager implementation (#495) -- **Security**: JWT authentication and TLS communication support (#492) -- **Security**: Authenticated PeerID from libp2p transport (#502) -- **Storage**: Generic record referrers support (#451, #480) -- **Storage**: Referrer encoding capabilities (#491, #499) -- **API**: Server validator functionality (#456) -- **SDK**: Streaming package for gRPC client (#527) -- **CI**: Zot dependency for server config (#444) -- **CI**: Reusable setup-dirctl GitHub Action (#441) -- **CI**: SPIRE tests action (#488) -- **CI**: Local tap for testing homebrew formulae (#437) - -### Changed -- **Security**: Rename mTLS to x.509 for clarity (#508) -- **Storage**: Secure credential management for zot sync operations (#457) -- **API**: Unify extensions to modules architecture (#463) -- **CLI**: Standardize CLI output formats (#509) -- **CLI**: Update search command to use routing search command flags (#521) -- **Docs**: Update SDK source and release links (#434) -- **CI**: Improvements to pipeline and taskfile setup (#438, #442) -- **CI**: Bump Go, golangci-lint and libp2p (#534) -- **CI**: Bump OASF SDK to versions 0.0.6 and 0.0.7 (#481, #489) -- **CI**: Bump brew tap to v0.3.0 (#435) - -### Fixed -- **Networking**: Empty peer addresses in routing search results (#513) -- **Networking**: Locator-based remote record search (#469) -- **Networking**: E2E network test flakiness with environment warm-up (#516) -- **Security**: Auth mode mTLS issues (#517) -- **Security**: Cosign signature attachment migration to OCI 1.1 referrers spec (#464) -- **Security**: Signature verification against expected payload (#493) -- **Storage**: Demo testdata and flaky test removal (#504) -- **SDK**: JavaScript package release issues (#432) -- **CLI**: API key list format (#494) -- **Docs**: Remove broken links from README (#458) -- **Docs**: Fix push and pull documentation (#436) -- **CI**: Support multiple federation artifacts via charts (#519) -- **CI**: Enable push to buf registry on merge (#484) -- **CI**: Bug report template to allow version input (#460) -- **CI**: File permissions for hub/api/v1alpha1/* (#518) - -### Removed -- **Networking**: Custom peer discovery (replaced with Connection Manager) (#495) -- **CI**: Useless files in hub/ directory (#522) - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.3.0...v0.4.0) - ---- - -## [v0.3.0] - 2025-09-19 - -### Key Highlights - -This release delivers foundational improvements to security schema, -storage services, network capabilities, and user/developer experience, -with a focus on: - -**Zero-Trust Security Architecture** -- X.509-based SPIFFE/SPIRE identity framework with federation support -- Policy-based authorization framework with fine-grained access controls -- Secure mTLS communication across all services -- OCI-native PKI with client- and server-side verification capabilities - -**Content Standardization** -- Unified Core v1 API with multi-version support for OASF objects -- Deterministic CID generation using canonical JSON marshaling -- Cross-language and service consistency with CIDv1 record addressing -- OCI-native object storage and relationship management - -**Search & Discovery** -- Local search with wildcard and pattern matching support -- Network-wide record discovery with prefix-based search capabilities -- DHT-based routing for distributed service announcement and discovery - -**Data Synchronization** -- Full and partial index synchronization with CID selection -- Automated sync workflows for simple data migration and replication -- Post-sync verification checks and search capabilities across records - -**Developer Experience** -- Native Client SDKs for Golang, JavaScript, TypeScript, and Python -- Standardized CLI and SDK tooling with consistent interfaces -- Decoupled signing workflows for easier usage and integration -- Kubernetes deployment with SPIRE and Federation support - -### Compatibility Matrix - -The following matrix shows compatibility between different component versions: - -| Core Component | Version | Compatible With | -| ----------------- | ------- | ---------------------------------------------- | -| **dir-apiserver** | v0.3.0 | oasf v0.3.x, oasf v0.7.x | -| **dirctl** | v0.3.0 | dir-apiserver v0.3.0, oasf v0.3.x, oasf v0.7.x | -| **dir-go** | v0.3.0 | dir-apiserver v0.3.0, oasf v0.3.x, oasf v0.7.x | -| **dir-py** | v0.3.0 | dir-apiserver v0.3.0, oasf v0.3.x, oasf v0.7.x | -| **dir-js** | v0.3.0 | dir-apiserver v0.3.0, oasf v0.3.x, oasf v0.7.x | - -#### Helm Chart Compatibility - -| Helm Chart | Version | Deploys Component | Minimum Requirements | -| -------------------------- | ------- | -------------------- | -------------------- | -| **dir/helm-charts/dir** | v0.3.0 | dir-apiserver v0.3.0 | Kubernetes 1.20+ | -| **dir/helm-charts/dirctl** | v0.3.0 | dirctl v0.3.0 | Kubernetes 1.20+ | - -#### Compatibility Notes - -- **Full OASF support** is available across all core components -- **dir-apiserver v0.3.0** introduces breaking changes to the API layer -- **dirctl v0.3.0** introduces breaking changes to the CLI usage -- **dir-go v0.3.0** introduces breaking changes to the SDK usage -- Older versions of **dir-apiserver** are **not compatible** with **dir-apiserver v0.3.0** -- Older versions of client components are **not compatible** with **dir-apiserver v0.3.0** -- Older versions of helm charts are **not compatible** with **dir-apiserver v0.3.0** -- Data must be manually migrated from older **dir-apiserver** versions to **dir-apiserver v0.3.0** - -#### Migration Guide - -Data from the OCI storage layer in the Directory can be migrated by repushing via new API endpoints. -For example: - -```bash -repo=localhost:5000/dir -for tag in $(oras repo tags $repo); do - digest=$(oras resolve $repo:$tag) - oras blob fetch --output - $repo@$digest | dirctl push --stdin -done -``` - -### Added -- **API**: Implement Search API for network-wide record discovery using RecordQuery interface (#362) -- **API**: Add initial authorization framework (#330) -- **API**: Add distributed label-based announce/discovery via DHT (#285) -- **API**: Add wildcard search support with pattern matching (#355) -- **API**: Add max replicasets to keep in deployment (#207) -- **API**: Add sync API (#199) -- **CI**: Add Codecov workflow & docs (#380) -- **CI**: Introduce BSR (#212) -- **SDK**: Add SDK release process (#216) -- **SDK**: Add more gRPC services (#294) -- **SDK**: Add gRPC client code and example for JavaScript SDK (#248) -- **SDK**: Add sync support (#361) -- **SDK**: Add sign and verification (#337) -- **SDK**: Add testing solution for CI (#269) -- **SDK**: Standardize Python SDK tooling for Directory (#371) -- **SDK**: Add TypeScript/JavaScript DIR Client SDK (#407) -- **Security**: Implement server-side verification with zot (#286) -- **Security**: Use SPIFFE/SPIRE to enable security schema (#210) -- **Security**: Add spire federation support (#295) -- **Storage**: Add storage layer full-index record synchronisation (#227) -- **Storage**: Add post sync verification (#324) -- **Storage**: Enable search on synced records (#310) -- **Storage**: Add fallback to client-side verification (#373) -- **Storage**: Add policy-based publish (#333) -- **Storage**: Add custom type for error handling (#189) -- **Storage**: Add sign and verify gRPC service (#201) -- **Storage**: Add new hub https://hub.agntcy.org/directory (#202) -- **Storage**: Add cid-based synchronisation support (#401) -- **Storage**: Add rich manifest annotations (#236) - -### Changed -- **API**: Switch to generic OASF objects across codebase (#381) -- **API**: Version upgrade of API services (#225) -- **API**: Update sync API and add credential RPC (#217) -- **API**: Refactor domain interfaces to align with OASF schema (#397) -- **API**: Rename v1alpha2 to v1 (#258) -- **CI**: Find better place for proto APIs (#384) -- **CI**: Reduce flaky jobs for SDK (#339) -- **CI**: Update codebase with proto namespace changes (#398) -- **CI**: Update CI task gen to ignore buf lock file changes (#275) -- **CI**: Update brew formula version (#372, #263, #257, #247) -- **CI**: Bump Go (#221) -- **CI**: Update Directory proto imports for SDKs (#421) -- **CI**: Bump OASF SDK version to v0.0.5 (#424) -- **Documentation**: Update usage documentation for record generation (#287) -- **Documentation**: Add and update README for refactored SDKs (#273) -- **Documentation**: Update README to reflect new usage documentation link and remove USAGE.md file (#332) -- **Documentation**: Update documentation setup (#394) -- **SDK**: Move and refactor Python SDK code (#229) -- **SDK**: Bump package versions for release (#274) -- **SDK**: Bump versions for release (#249) -- **SDK**: Support streams & update docs (#284) -- **SDK**: Update API code and add example code for Python SDK (#237) -- **Storage**: Migrate record signature to OCI native signature (#250) -- **Storage**: Store implementations and digest/CID calculation (#238) -- **Storage**: Standardize and cleanup store providers (#385) -- **Storage**: Improve logging to suppress misleading errors in database and routing layers (#289) -- **Storage**: Refactor E2E Test Suite & Utilities Enhancement (#268) -- **Storage**: Refactor e2e tests multiple OASF versions (#278) -- **Storage**: Refactor: remove semantic tags keep only CID tag (#265) -- **Storage**: Refactor: remove generated OASF objects (#356) -- **Storage**: Refactor: remove builder artifacts and build cmd usages (#329) -- **Storage**: Refactor: remove agent refs (#331) -- **Storage**: Refactor: remove redundant proto files (#219) -- **Storage**: Refactor: remove Legacy List API and Migrate to RecordQuery-Based System (#342) -- **Storage**: Refactor: remove Python code generation (#215) - -### Fixed -- **API**: Resolve buf proto API namespacing issues (#393) -- **API**: Add sync testdata (#396) -- **API**: Update apiserver.env to use new config values (#406) -- **API**: Suppress command usage display on runtime errors (#290) -- **API**: Quick-fix for e2e CLI cmd state handling (#270) -- **API**: Fix/CI task gen (#271) -- **CI**: Allow dir-hub-maintainers release (#402) -- **SDK**: Fix Python SDK imports and tests (#403) -- **SDK**: Fix codeowners file (#404) -- **SDK**: Flaky SDK CICD tests (#422) -- **Storage**: Add separate maintainers for hub CLI directory (#375) -- **Storage**: Update agent directory default location (#226) -- **Storage**: Flaky e2e test and restructure test suites (#416) -- **Storage**: E2E sync test cleanup (#423) - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.13...v0.3.0) - ---- - -## [v0.2.13] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.12...v0.2.13) - ---- - -## [v0.2.12] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.11...v0.2.12) - ---- - -## [v0.2.11] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.10...v0.2.11) - ---- - -## [v0.2.10] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.9...v0.2.10) - ---- - -## [v0.2.9] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.8...v0.2.9) - ---- - -## [v0.2.8] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.7...v0.2.8) - ---- - -## [v0.2.7] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.6...v0.2.7) - ---- - -## [v0.2.6] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.5...v0.2.6) - ---- - -## [v0.2.5] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.4...v0.2.5) - ---- - -## [v0.2.4] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.3...v0.2.4) - ---- - -## [v0.2.3] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.2...v0.2.3) - ---- - -## [v0.2.2] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.1...v0.2.2) - ---- - -## [v0.2.1] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.0...v0.2.1) - ---- - -## [v0.2.0] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.1.6...v0.2.0) - ---- - -## [v0.1.6] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.1.5...v0.1.6) - ---- - -## [v0.1.5] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.1.4...v0.1.5) - ---- - -## [v0.1.4] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.1.3...v0.1.4) - ---- - -## [v0.1.3] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.1.2...v0.1.3) - ---- - -## [v0.1.2] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.1.1...v0.1.2) - ---- - -## [v0.1.1] - Previous Release - -[Full Changelog](https://github.com/agntcy/dir/compare/v0.1.0...v0.1.1) - ---- - -## [v0.1.0] - Initial Release - -[Full Changelog](https://github.com/agntcy/dir/releases/tag/v0.1.0) - ---- - -## Legend - -- **Added** for new features -- **Changed** for changes in existing functionality -- **Deprecated** for soon-to-be removed features -- **Removed** for now removed features -- **Fixed** for any bug fixes -- **Security** for vulnerability fixes +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [v0.6.0] - 2025-12-19 + +### Key Highlights + +This release consolidates improvements from v0.5.1 through v0.5.7, focusing on operational +reliability, integration enhancements, and cross-registry support, including: + +**Tooling & Integration** +- Enhanced local search implementation with wildcard support +- Configurable server-side OASF validation with auto-deployment support +- Extended MCP tools for record enrichment and import/export workflows +- Domain-based enrichment capabilities for importer service +- Support across different OCI Registry storage backends + +**Observability & Operations** +- Enhanced SPIRE support for reliability and multi-SPIRE deployments +- Prometheus metrics with ServiceMonitor and gRPC interceptors + +### Compatibility Matrix + +| Component | Version | Compatible With | +| ---------------------- | ------- | --------------------------- | +| **dir-apiserver** | v0.6.0 | oasf v0.3.x, v0.7.x, v0.8.x | +| **dirctl** | v0.6.0 | dir-apiserver >= v0.5.0 | +| **dir-go** | v0.6.0 | dir-apiserver >= v0.5.0 | +| **dir-py** | v0.6.0 | dir-apiserver >= v0.5.0 | +| **dir-js** | v0.6.0 | dir-apiserver >= v0.5.0 | +| **helm-charts/dir** | v0.6.0 | dir-apiserver >= v0.5.0 | +| **helm-charts/dirctl** | v0.6.0 | dirctl >= v0.5.0 | + +### Added +- **SPIRE**: SPIFFE CSI driver support for reliable identity injection (#724) +- **SPIRE**: Automatic writable home directory when `readOnlyRootFilesystem` is enabled (#724) +- **SPIRE**: ClusterSPIFFEID className field for proper workload registration (#774) +- **Helm**: External Secrets Operator integration for credential management (#691) +- **Helm**: DNS name templates for SPIRE ClusterSPIFFEID (#681) +- **Helm**: SQLite PVC configuration support (#713) +- **Helm**: OASF configuration for API validation (#769) +- **Helm**: Recreate deployment strategy to prevent PVC lock conflicts (#720) +- **Observability**: Prometheus metrics with ServiceMonitor and gRPC interceptors (#757) +- **Security**: Container security scan workflow with automated version detection (#746) +- **SDK**: Additional gRPC service methods in Python and JavaScript SDKs (#709) +- **MCP**: Import/export tools and prompts for workflow automation (#705) +- **MCP**: OASF schema exploration tools for enricher workflow (#680) +- **Importer**: Domain-based record enrichment (#696) +- **Importer**: Deduplication checker prioritization (#743) +- **Validation**: Server-side OASF API validation (#711) +- **CI/CD**: Feature branch build workflows for images and charts (#739) + +### Changed +- **Search**: Refactored local search implementation (#747) +- **Search**: Removed search subcommands for simplified CLI (#759) +- **Importer**: Migration to oasf-sdk/translator (#624) +- **Configuration**: Updated OASF validation environment variables (#754) +- **Toolchain**: Disabled Go workspace mode (#732) +- **Dependencies**: Bump golang.org/x/crypto to v0.45.0 (#744) +- **Dependencies**: Bump js-yaml to v4.1.1 (#745) +- **Dependencies**: Update zot and fix CodeQL warnings (#761) +- **Dependencies**: Bump github.com/sigstore/cosign (#773) + +### Fixed +- **SPIRE**: X.509-SVID retry logic for timing issues in short-lived workloads (#735, #741) +- **SPIRE**: "certificate contains no URI SAN" errors in CronJobs (#724) +- **Helm**: Add `/tmp` volume when rootfs is read-only (#718) +- **CI**: Prevent disk space issues in runners (#749) +- **CI**: Avoid PR label duplication (#755) +- **CI**: Fix unit test execution (#733) +- **CI**: Fix reusable build workflow component tag handling (#742) +- **CI**: Add SPIRE task cleanup with sudo (#734) +- **CI**: Helm linting integration (#780) +- **Brew**: Formula updater process after public release (#686) + +### Removed +- **Cleanup**: Outdated components and unused code (#783) + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.5.0...v0.6.0) + +## [v0.5.7] - 2025-12-12 + +### Key Highlights + +This patch release fixes a critical SPIRE integration bug and adds OASF configuration support: +- **Critical Fix**: Added mandatory `className` field to ClusterSPIFFEID resources to ensure proper SPIRE workload registration +- **New Feature**: OASF configuration support in Helm chart for API validation +- **Dependencies**: Updated cosign and zot dependencies + +### Compatibility Matrix + +| Component | Version | Compatible With | +| ---------------------- | ------- | --------------------------- | +| **dir-apiserver** | v0.5.7 | oasf v0.3.x, v0.7.x, v0.8.x | +| **dirctl** | v0.5.7 | dir-apiserver >= v0.5.0 | +| **dir-go** | v0.5.7 | dir-apiserver >= v0.5.0 | +| **dir-py** | v0.5.7 | dir-apiserver >= v0.5.0 | +| **dir-js** | v0.5.7 | dir-apiserver >= v0.5.0 | +| **helm-charts/dir** | v0.5.7 | dir-apiserver >= v0.5.0 | +| **helm-charts/dirctl** | v0.5.7 | dirctl >= v0.5.0 | + +### Added +- **Helm**: OASF configuration support for API validation settings (#769) +- **Helm**: OASF server deployment option with directory (#769) + +### Changed +- **Dependencies**: Bump github.com/sigstore/cosign dependency (#773) +- **Dependencies**: Update zot dependency (#761) + +### Fixed +- **Helm**: Add mandatory `className` field to ClusterSPIFFEID resources in both `apiserver` and `dirctl` charts (#774) +- **CI**: Pin CodeQL analyze action version to match init version (#774) + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.5.6...v0.5.7) + +--- + +## [v0.5.6] - 2025-12-05 + +### Key Highlights + +This patch release adds observability improvements, refactors search functionality, and updates configuration validation. + +### Compatibility Matrix + +| Component | Version | Compatible With | +| ---------------------- | ------- | --------------------------- | +| **dir-apiserver** | v0.5.6 | oasf v0.3.x, v0.7.x, v0.8.x | +| **dirctl** | v0.5.6 | dir-apiserver >= v0.5.0 | +| **dir-go** | v0.5.6 | dir-apiserver >= v0.5.0 | +| **dir-py** | v0.5.6 | dir-apiserver >= v0.5.0 | +| **dir-js** | v0.5.6 | dir-apiserver >= v0.5.0 | +| **helm-charts/dir** | v0.5.6 | dir-apiserver >= v0.5.0 | +| **helm-charts/dirctl** | v0.5.6 | dirctl >= v0.5.0 | + +### Added +- **Observability**: Prometheus metrics support with ServiceMonitor and gRPC interceptors (#757) +- **CLI**: Add `--format` flag to search command for output control (#747, #759) + +### Changed +- **Refactor**: Improve local search implementation and testing (#747) +- **Configuration**: Update OASF API validation environment variables (#754) + +### Fixed +- **CI**: Avoid PR label duplication (#755) +- **CI**: Prevent disk space issues in CI runners (#749) + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.5.5...v0.5.6) + +--- + +## [v0.5.5] - 2025-12-01 + +### Key Highlights + +This patch release refactors SPIFFE X.509-SVID retry logic into a shared package for better code organization and maintainability. + +### Compatibility Matrix + +| Component | Version | Compatible With | +| ---------------------- | ------- | --------------------------- | +| **dir-apiserver** | v0.5.5 | oasf v0.3.x, v0.7.x, v0.8.x | +| **dirctl** | v0.5.5 | dir-apiserver >= v0.5.0 | +| **dir-go** | v0.5.5 | dir-apiserver >= v0.5.0 | +| **dir-py** | v0.5.5 | dir-apiserver >= v0.5.0 | +| **dir-js** | v0.5.5 | dir-apiserver >= v0.5.0 | +| **helm-charts/dir** | v0.5.5 | dir-apiserver >= v0.5.0 | +| **helm-charts/dirctl** | v0.5.5 | dirctl >= v0.5.0 | + +### Changed +- **Refactor**: Centralize X.509-SVID retry logic in `utils/spiffe` package (#741) + +### Fixed +- **Code Quality**: Remove duplicate retry constants and improve code organization (#741) + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.5.4...v0.5.5) + +--- + +## [v0.5.4] - 2025-11-26 + +### Key Highlights + +This patch release improves SPIFFE authentication reliability for short-lived workloads: +- Client-side retry logic with exponential backoff for X509-SVID fetching +- Handles SPIRE agent sync delays in CronJobs and ephemeral pods +- Prevents "certificate contains no URI SAN" authentication failures + +### Compatibility Matrix + +| Component | Version | Compatible With | +| ---------------------- | ------- | --------------------------- | +| **dir-apiserver** | v0.5.4 | oasf v0.3.x, v0.7.x, v0.8.x | +| **dirctl** | v0.5.4 | dir-apiserver >= v0.5.0 | +| **dir-go** | v0.5.4 | dir-apiserver >= v0.5.0 | +| **dir-py** | v0.5.4 | dir-apiserver >= v0.5.0 | +| **dir-js** | v0.5.4 | dir-apiserver >= v0.5.0 | +| **helm-charts/dir** | v0.5.4 | dir-apiserver >= v0.5.0 | +| **helm-charts/dirctl** | v0.5.4 | dirctl >= v0.5.0 | + +### Fixed +- **Client**: X509-SVID retry logic with exponential backoff to handle SPIRE agent sync delays (#725) +- **Client**: "certificate contains no URI SAN" errors in CronJobs and short-lived workloads (#725) + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.5.3...v0.5.4) + +--- + +## [v0.5.3] - 2025-11-25 + +### Key Highlights + +This patch release improves SPIFFE identity injection reliability and chart security: +- SPIFFE CSI driver support eliminates authentication race conditions +- Automatic writable home directory for security-hardened deployments +- Read-only SPIRE socket mounts for enhanced security + +### Compatibility Matrix + +| Component | Version | Compatible With | +| ---------------------- | ------- | --------------------------- | +| **dir-apiserver** | v0.5.3 | oasf v0.3.x, v0.7.x, v0.8.x | +| **dirctl** | v0.5.3 | dir-apiserver >= v0.5.0 | +| **dir-go** | v0.5.3 | dir-apiserver >= v0.5.0 | +| **dir-py** | v0.5.3 | dir-apiserver >= v0.5.0 | +| **dir-js** | v0.5.3 | dir-apiserver >= v0.5.0 | +| **helm-charts/dir** | v0.5.3 | dir-apiserver >= v0.5.0 | +| **helm-charts/dirctl** | v0.5.3 | dirctl >= v0.5.0 | + +### Added +- **Helm**: SPIFFE CSI driver support with `spire.useCSIDriver` flag for both charts (#724) +- **Helm**: Automatic `home-dir` emptyDir when `readOnlyRootFilesystem` is enabled (#724) + +### Changed +- **Helm**: Default to SPIFFE CSI driver for production reliability (#724) + +### Fixed +- **Helm**: "certificate contains no URI SAN" authentication failures in CronJobs (#724) +- **Helm**: SPIRE socket mounts now use `readOnly: true` for security (#724) +- **Helm**: "read-only file system" warnings when creating config files (#724) + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.5.2...v0.5.3) + +--- + +## [v0.5.2] - 2025-11-23 + +### Key Highlights + +This release focuses on storage improvements, SDK enhancements, MCP tooling, and operational stability, including: + +**Storage & Deployment Improvements** +- SQLite PVC configuration support for persistent storage in Kubernetes deployments +- Recreate deployment strategy to prevent database lock conflicts during rolling updates +- Automatic `/tmp` emptyDir mount when `readOnlyRootFilesystem` is enabled for security hardening +- Fixes compatibility issue between SQLite temp files and read-only root filesystem +- Enhanced unit test coverage and stability + +**SDK Enhancements** +- Added Events (listen) gRPC client to Python and JavaScript SDKs +- Added Publication gRPC client to Python and JavaScript SDKs +- Comprehensive test coverage for new SDK methods + +**MCP Enhancements** +- Import/export tools for record management workflows +- Domain enrichment capabilities for importer + +**CI/CD Improvements** +- Brew formula updater process improvements +- Better release automation and publication workflow + +### Compatibility Matrix + +| Component | Version | Compatible With | +| ---------------------- | ------- | --------------------------- | +| **dir-apiserver** | v0.5.2 | oasf v0.3.x, v0.7.x, v0.8.x | +| **dirctl** | v0.5.2 | dir-apiserver >= v0.5.0 | +| **dir-go** | v0.5.2 | dir-apiserver >= v0.5.0 | +| **dir-py** | v0.5.2 | dir-apiserver >= v0.5.0 | +| **dir-js** | v0.5.2 | dir-apiserver >= v0.5.0 | +| **helm-charts/dir** | v0.5.2 | dir-apiserver >= v0.5.0 | +| **helm-charts/dirctl** | v0.5.2 | dirctl >= v0.5.0 | + +### Added +- **Storage**: PVC configuration support for SQLite (#713) +- **Helm**: Deployment strategy configuration to prevent PVC lock conflicts (#720) +- **SDK**: Events (listen) gRPC client for Python and JavaScript SDKs (#709) +- **SDK**: Publication gRPC client for Python and JavaScript SDKs (#709) +- **MCP**: Import/export tools and prompts for record workflows (#705) +- **Importer**: Domain enrichment capabilities (#696) + +### Changed +- **CI**: Update brew formula version (#702) + +### Fixed +- **Helm**: Add `/tmp` emptyDir mount when `readOnlyRootFilesystem` is enabled (#718) +- **CI**: Brew formula updater to work after release is public (#686) +- **Testing**: Fix unit tests for storage components (#713) + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.5.1...v0.5.2) + +--- + +## [v0.5.1] - 2025-11-17 + +### Key Highlights + +This release focuses on operational improvements and deployment enhancements, including: + +**Helm & Deployment Improvements** +- External Secrets Operator integration for secure credential management +- SPIRE ClusterSPIFFEID DNS name templates support for external access +- Improved TLS certificate SAN configuration for production deployments + +**MCP Enhancements** +- OASF schema exploration tools for AI-assisted record enrichment +- Hierarchical domain and skill navigation capabilities + +**Dependencies & Stability** +- OASF SDK upgrade to v0.0.11 with latest schema improvements +- SDK testing fixes for X.509 authentication mode + +### Compatibility Matrix + +| Component | Version | Compatible With | +| ---------------------- | ------- | --------------------------- | +| **dir-apiserver** | v0.5.1 | oasf v0.3.x, v0.7.x, v0.8.x | +| **dirctl** | v0.5.1 | dir-apiserver >= v0.5.0 | +| **dir-go** | v0.5.1 | dir-apiserver >= v0.5.0 | +| **dir-py** | v0.5.1 | dir-apiserver >= v0.5.0 | +| **dir-js** | v0.5.1 | dir-apiserver >= v0.5.0 | +| **helm-charts/dir** | v0.5.1 | dir-apiserver >= v0.5.0 | +| **helm-charts/dirctl** | v0.5.1 | dirctl >= v0.5.0 | + +### Added +- **Helm**: External Secrets Operator integration for credential management (#691) +- **Helm**: DNS name templates support for SPIRE ClusterSPIFFEID (#681) +- **MCP**: OASF schema domain and skill exploration tools for enricher workflow (#680) +- **Importer**: OASF SDK translator for MCP Registry conversion with deduplication and debug diagnostics (#624) + +### Changed +- **Dependencies**: Bump OASF SDK to v0.0.11 (#679) +- **CI**: Update upload-artifacts version (#682) +- **CI**: Update brew formula version (#684) +- **Docs**: Update readme versions (#685) + +### Fixed +- **SDK**: Use X.509 auth mode for testing (#678) + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.5.0...v0.5.1) + +--- + +## [v0.5.0] - 2025-11-12 + +### Key Highlights + +This release focuses on extending API functionalities, improving +operational reliability, strengthening security capabilities, and +adding MCP (Model Context Protocol) integrations support, including: + +**MCP Integrations** +- MCP registry importer for automated OASF record ingestion +- MCP server implementation with OASF and Directory tools +- Added support for MCP server announce and discovery via DHT + +**API & Client Improvements** +- Event listener RPC for real-time updates across services +- gRPC connection management and streaming enhancements +- Rate limiting at application layer for improved stability +- Health checks migrated from HTTP to gRPC + +**Security & Reliability** +- Simplified TLS-based authentication support for SDKs +- Panic recovery middleware and structured logging for gRPC +- Critical resource leak fixes and improved context handling +- Enhanced security scanning with CodeQL workflows + +**Developer Experience** +- MCP tooling for easy record management and API access +- LLM-based enrichment for OASF records +- Simplified SDK integration in secure environments +- Unified CLI output formats with --output flag and JSONL support + +### Compatibility Matrix + +| Component | Version | Compatible With | +| ---------------------- | ------- | --------------------------- | +| **dir-apiserver** | v0.5.0 | oasf v0.3.x, v0.7.x, v0.8.x | +| **dirctl** | v0.5.0 | dir-apiserver >= v0.5.0 | +| **dir-go** | v0.5.0 | dir-apiserver >= v0.5.0 | +| **dir-py** | v0.5.0 | dir-apiserver >= v0.5.0 | +| **dir-js** | v0.5.0 | dir-apiserver >= v0.5.0 | +| **helm-charts/dir** | v0.5.0 | dir-apiserver >= v0.5.0 | +| **helm-charts/dirctl** | v0.5.0 | dirctl >= v0.5.0 | + +### Added +- **MCP**: Add tools for record management and API operations (#465, #574, #619, #611, #650, #660) +- **MCP**: Registry importer for automated record ingestion (#544, #568) +- **MCP**: LLM-based record enrichment for OASF records (#646) +- **API**: Event listener RPC exposure (#537) +- **API**: Structured gRPC request/response logging interceptor (#566) +- **API**: Panic recovery middleware for gRPC handlers (#573) +- **API**: Application-layer rate limiting for gRPC (#593) +- **API**: Readiness checks for apiserver services (#582) +- **API**: gRPC connection management (#647) +- **Security**: Container security scanning workflow (#547) +- **Security**: CodeQL security workflows (#584) +- **Security**: TLS token-based authentication for Go client/CLI (#606) +- **Helm**: Routing service deployment configuration (#599) +- **Helm**: Extra environment variables support in dir chart (#605) +- **Helm**: Zot configuration and authentication (#576) +- **CLI**: Unified output formats with --output flag and JSONL support (#587) +- **CLI**: MCP dirctl subcommand (#660) +- **Testing**: Unit test coverage for all Go modules (#555) +- **Testing**: E2E test coverage (#591) + +### Changed +- **API**: Push/pull API refactoring and improvements (#585, #595) +- **Storage**: Store Capability Interfaces refactoring (#562) +- **SDK**: Migration to local generated proto stubs (#569, #588) +- **CLI**: Hub sign/verify commands restoration (#612) +- **Helm**: Ingress deployment fixes (#600, #601) +- **Security**: Security fixes and dependency updates (#602) +- **Health**: Health checks migrated from HTTP to gRPC (#597) +- **Dependencies**: Bump OASF SDK to v0.0.8 and v0.0.9 (#603, #640) +- **Dependencies**: Bump Zot to latest version (#578, #579) +- **Dependencies**: Set SPIRE version in taskfile (#583) +- **CI**: Update container tags for security scans (#558) +- **CI**: Tag for SDK/JS package releases (#617, #621) + +### Fixed +- **Client**: Critical resource leaks and context handling issues (#577) +- **Client**: Push stream hanging with multiple errors (#644) +- **Security**: JWT auth test and authentication fixes (#545) +- **API**: Rate limit E2E tests (#598) +- **API**: Hub API updates (#595) +- **API**: MCP search limit handling (#623) +- **Storage**: OCI E2E concurrent issues and healthcheck service (#620) +- **SDK**: SPIFFE sign test (#592) +- **SDK**: Add proto stubs to repository (#588) +- **SDK**: JS package release prefix for RC tags (#621, #625) +- **CLI**: Sign and verify options cleanup (#673) +- **CLI**: API key methods improvements (#659) + +### Removed +- **MCP**: Remove dedicated MCP artifacts (#663) + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.4.0...v0.5.0) + +--- + +## [v0.4.0] - 2025-10-15 + +### Key Highlights + +This release delivers improvements to application layer via generic referrers, +security schema with JWT/X.509 support, network capabilities, operational stability, +and developer experience, with a focus on: + +**Security & Authentication** +- Added JWT/X.509 over TLS versus mTLS for flexible authentication +- Authenticated PeerID integration with libp2p transport +- Secure credential management for zot sync operations +- OCI 1.1 referrers specification migration for signature attachments + +**Networking & Connectivity** +- GossipSub implementation for efficient label announcements +- Connection Manager implementation with removal of custom peer discovery +- Improved routing search with better peer address handling +- Locator-based record search capabilities across the network + +**Generic Record Referrers** +- Support for storage and handling of generic record referrers +- Referrer object encoding across server and SDK components +- Application support via referrer (e.g., signatures, attestations) + +**Developer Experience & Tooling** +- Streaming package for Golang gRPC client functionality +- Standardized CLI output formats and command improvements +- Reusable setup-dirctl GitHub Action for CI/CD workflows + +**Quality & Stability Improvements** +- Comprehensive test suite enhancements including SPIRE tests +- E2E network test stability with environment warm-up phases +- Bug fixes for API key formatting, file permissions, and documentation + +### Compatibility Matrix + +| Component | Version | Compatible With | +| ---------------------- | ------- | -------------------------------------------- | +| **dir-apiserver** | v0.4.0 | oasf v0.3.x, v0.7.x | +| **dirctl** | v0.4.0 | dir-apiserver >= v0.3.0, oasf v0.3.x, v0.7.x | +| **dir-go** | v0.4.0 | dir-apiserver >= v0.3.0, oasf v0.3.x, v0.7.x | +| **dir-py** | v0.4.0 | dir-apiserver >= v0.3.0, oasf v0.3.x, v0.7.x | +| **dir-js** | v0.4.0 | dir-apiserver >= v0.3.0, oasf v0.3.x, v0.7.x | +| **helm-charts/dir** | v0.4.0 | dir-apiserver >= v0.3.0 | +| **helm-charts/dirctl** | v0.4.0 | dirctl >= v0.3.0 | + +### Added +- **Networking**: GossipSub for efficient label announcements (#472) +- **Networking**: AutoRelay, Hole Punching, and mDNS for better peer connectivity (#503) +- **Networking**: Connection Manager implementation (#495) +- **Security**: JWT authentication and TLS communication support (#492) +- **Security**: Authenticated PeerID from libp2p transport (#502) +- **Storage**: Generic record referrers support (#451, #480) +- **Storage**: Referrer encoding capabilities (#491, #499) +- **API**: Server validator functionality (#456) +- **SDK**: Streaming package for gRPC client (#527) +- **CI**: Zot dependency for server config (#444) +- **CI**: Reusable setup-dirctl GitHub Action (#441) +- **CI**: SPIRE tests action (#488) +- **CI**: Local tap for testing homebrew formulae (#437) + +### Changed +- **Security**: Rename mTLS to x.509 for clarity (#508) +- **Storage**: Secure credential management for zot sync operations (#457) +- **API**: Unify extensions to modules architecture (#463) +- **CLI**: Standardize CLI output formats (#509) +- **CLI**: Update search command to use routing search command flags (#521) +- **Docs**: Update SDK source and release links (#434) +- **CI**: Improvements to pipeline and taskfile setup (#438, #442) +- **CI**: Bump Go, golangci-lint and libp2p (#534) +- **CI**: Bump OASF SDK to versions 0.0.6 and 0.0.7 (#481, #489) +- **CI**: Bump brew tap to v0.3.0 (#435) + +### Fixed +- **Networking**: Empty peer addresses in routing search results (#513) +- **Networking**: Locator-based remote record search (#469) +- **Networking**: E2E network test flakiness with environment warm-up (#516) +- **Security**: Auth mode mTLS issues (#517) +- **Security**: Cosign signature attachment migration to OCI 1.1 referrers spec (#464) +- **Security**: Signature verification against expected payload (#493) +- **Storage**: Demo testdata and flaky test removal (#504) +- **SDK**: JavaScript package release issues (#432) +- **CLI**: API key list format (#494) +- **Docs**: Remove broken links from README (#458) +- **Docs**: Fix push and pull documentation (#436) +- **CI**: Support multiple federation artifacts via charts (#519) +- **CI**: Enable push to buf registry on merge (#484) +- **CI**: Bug report template to allow version input (#460) +- **CI**: File permissions for hub/api/v1alpha1/* (#518) + +### Removed +- **Networking**: Custom peer discovery (replaced with Connection Manager) (#495) +- **CI**: Useless files in hub/ directory (#522) + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.3.0...v0.4.0) + +--- + +## [v0.3.0] - 2025-09-19 + +### Key Highlights + +This release delivers foundational improvements to security schema, +storage services, network capabilities, and user/developer experience, +with a focus on: + +**Zero-Trust Security Architecture** +- X.509-based SPIFFE/SPIRE identity framework with federation support +- Policy-based authorization framework with fine-grained access controls +- Secure mTLS communication across all services +- OCI-native PKI with client- and server-side verification capabilities + +**Content Standardization** +- Unified Core v1 API with multi-version support for OASF objects +- Deterministic CID generation using canonical JSON marshaling +- Cross-language and service consistency with CIDv1 record addressing +- OCI-native object storage and relationship management + +**Search & Discovery** +- Local search with wildcard and pattern matching support +- Network-wide record discovery with prefix-based search capabilities +- DHT-based routing for distributed service announcement and discovery + +**Data Synchronization** +- Full and partial index synchronization with CID selection +- Automated sync workflows for simple data migration and replication +- Post-sync verification checks and search capabilities across records + +**Developer Experience** +- Native Client SDKs for Golang, JavaScript, TypeScript, and Python +- Standardized CLI and SDK tooling with consistent interfaces +- Decoupled signing workflows for easier usage and integration +- Kubernetes deployment with SPIRE and Federation support + +### Compatibility Matrix + +The following matrix shows compatibility between different component versions: + +| Core Component | Version | Compatible With | +| ----------------- | ------- | ---------------------------------------------- | +| **dir-apiserver** | v0.3.0 | oasf v0.3.x, oasf v0.7.x | +| **dirctl** | v0.3.0 | dir-apiserver v0.3.0, oasf v0.3.x, oasf v0.7.x | +| **dir-go** | v0.3.0 | dir-apiserver v0.3.0, oasf v0.3.x, oasf v0.7.x | +| **dir-py** | v0.3.0 | dir-apiserver v0.3.0, oasf v0.3.x, oasf v0.7.x | +| **dir-js** | v0.3.0 | dir-apiserver v0.3.0, oasf v0.3.x, oasf v0.7.x | + +#### Helm Chart Compatibility + +| Helm Chart | Version | Deploys Component | Minimum Requirements | +| -------------------------- | ------- | -------------------- | -------------------- | +| **dir/helm-charts/dir** | v0.3.0 | dir-apiserver v0.3.0 | Kubernetes 1.20+ | +| **dir/helm-charts/dirctl** | v0.3.0 | dirctl v0.3.0 | Kubernetes 1.20+ | + +#### Compatibility Notes + +- **Full OASF support** is available across all core components +- **dir-apiserver v0.3.0** introduces breaking changes to the API layer +- **dirctl v0.3.0** introduces breaking changes to the CLI usage +- **dir-go v0.3.0** introduces breaking changes to the SDK usage +- Older versions of **dir-apiserver** are **not compatible** with **dir-apiserver v0.3.0** +- Older versions of client components are **not compatible** with **dir-apiserver v0.3.0** +- Older versions of helm charts are **not compatible** with **dir-apiserver v0.3.0** +- Data must be manually migrated from older **dir-apiserver** versions to **dir-apiserver v0.3.0** + +#### Migration Guide + +Data from the OCI storage layer in the Directory can be migrated by repushing via new API endpoints. +For example: + +```bash +repo=localhost:5000/dir +for tag in $(oras repo tags $repo); do + digest=$(oras resolve $repo:$tag) + oras blob fetch --output - $repo@$digest | dirctl push --stdin +done +``` + +### Added +- **API**: Implement Search API for network-wide record discovery using RecordQuery interface (#362) +- **API**: Add initial authorization framework (#330) +- **API**: Add distributed label-based announce/discovery via DHT (#285) +- **API**: Add wildcard search support with pattern matching (#355) +- **API**: Add max replicasets to keep in deployment (#207) +- **API**: Add sync API (#199) +- **CI**: Add Codecov workflow & docs (#380) +- **CI**: Introduce BSR (#212) +- **SDK**: Add SDK release process (#216) +- **SDK**: Add more gRPC services (#294) +- **SDK**: Add gRPC client code and example for JavaScript SDK (#248) +- **SDK**: Add sync support (#361) +- **SDK**: Add sign and verification (#337) +- **SDK**: Add testing solution for CI (#269) +- **SDK**: Standardize Python SDK tooling for Directory (#371) +- **SDK**: Add TypeScript/JavaScript DIR Client SDK (#407) +- **Security**: Implement server-side verification with zot (#286) +- **Security**: Use SPIFFE/SPIRE to enable security schema (#210) +- **Security**: Add spire federation support (#295) +- **Storage**: Add storage layer full-index record synchronisation (#227) +- **Storage**: Add post sync verification (#324) +- **Storage**: Enable search on synced records (#310) +- **Storage**: Add fallback to client-side verification (#373) +- **Storage**: Add policy-based publish (#333) +- **Storage**: Add custom type for error handling (#189) +- **Storage**: Add sign and verify gRPC service (#201) +- **Storage**: Add new hub https://hub.agntcy.org/directory (#202) +- **Storage**: Add cid-based synchronisation support (#401) +- **Storage**: Add rich manifest annotations (#236) + +### Changed +- **API**: Switch to generic OASF objects across codebase (#381) +- **API**: Version upgrade of API services (#225) +- **API**: Update sync API and add credential RPC (#217) +- **API**: Refactor domain interfaces to align with OASF schema (#397) +- **API**: Rename v1alpha2 to v1 (#258) +- **CI**: Find better place for proto APIs (#384) +- **CI**: Reduce flaky jobs for SDK (#339) +- **CI**: Update codebase with proto namespace changes (#398) +- **CI**: Update CI task gen to ignore buf lock file changes (#275) +- **CI**: Update brew formula version (#372, #263, #257, #247) +- **CI**: Bump Go (#221) +- **CI**: Update Directory proto imports for SDKs (#421) +- **CI**: Bump OASF SDK version to v0.0.5 (#424) +- **Documentation**: Update usage documentation for record generation (#287) +- **Documentation**: Add and update README for refactored SDKs (#273) +- **Documentation**: Update README to reflect new usage documentation link and remove USAGE.md file (#332) +- **Documentation**: Update documentation setup (#394) +- **SDK**: Move and refactor Python SDK code (#229) +- **SDK**: Bump package versions for release (#274) +- **SDK**: Bump versions for release (#249) +- **SDK**: Support streams & update docs (#284) +- **SDK**: Update API code and add example code for Python SDK (#237) +- **Storage**: Migrate record signature to OCI native signature (#250) +- **Storage**: Store implementations and digest/CID calculation (#238) +- **Storage**: Standardize and cleanup store providers (#385) +- **Storage**: Improve logging to suppress misleading errors in database and routing layers (#289) +- **Storage**: Refactor E2E Test Suite & Utilities Enhancement (#268) +- **Storage**: Refactor e2e tests multiple OASF versions (#278) +- **Storage**: Refactor: remove semantic tags keep only CID tag (#265) +- **Storage**: Refactor: remove generated OASF objects (#356) +- **Storage**: Refactor: remove builder artifacts and build cmd usages (#329) +- **Storage**: Refactor: remove agent refs (#331) +- **Storage**: Refactor: remove redundant proto files (#219) +- **Storage**: Refactor: remove Legacy List API and Migrate to RecordQuery-Based System (#342) +- **Storage**: Refactor: remove Python code generation (#215) + +### Fixed +- **API**: Resolve buf proto API namespacing issues (#393) +- **API**: Add sync testdata (#396) +- **API**: Update apiserver.env to use new config values (#406) +- **API**: Suppress command usage display on runtime errors (#290) +- **API**: Quick-fix for e2e CLI cmd state handling (#270) +- **API**: Fix/CI task gen (#271) +- **CI**: Allow dir-hub-maintainers release (#402) +- **SDK**: Fix Python SDK imports and tests (#403) +- **SDK**: Fix codeowners file (#404) +- **SDK**: Flaky SDK CICD tests (#422) +- **Storage**: Add separate maintainers for hub CLI directory (#375) +- **Storage**: Update agent directory default location (#226) +- **Storage**: Flaky e2e test and restructure test suites (#416) +- **Storage**: E2E sync test cleanup (#423) + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.13...v0.3.0) + +--- + +## [v0.2.13] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.12...v0.2.13) + +--- + +## [v0.2.12] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.11...v0.2.12) + +--- + +## [v0.2.11] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.10...v0.2.11) + +--- + +## [v0.2.10] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.9...v0.2.10) + +--- + +## [v0.2.9] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.8...v0.2.9) + +--- + +## [v0.2.8] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.7...v0.2.8) + +--- + +## [v0.2.7] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.6...v0.2.7) + +--- + +## [v0.2.6] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.5...v0.2.6) + +--- + +## [v0.2.5] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.4...v0.2.5) + +--- + +## [v0.2.4] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.3...v0.2.4) + +--- + +## [v0.2.3] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.2...v0.2.3) + +--- + +## [v0.2.2] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.1...v0.2.2) + +--- + +## [v0.2.1] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.2.0...v0.2.1) + +--- + +## [v0.2.0] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.1.6...v0.2.0) + +--- + +## [v0.1.6] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.1.5...v0.1.6) + +--- + +## [v0.1.5] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.1.4...v0.1.5) + +--- + +## [v0.1.4] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.1.3...v0.1.4) + +--- + +## [v0.1.3] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.1.2...v0.1.3) + +--- + +## [v0.1.2] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.1.1...v0.1.2) + +--- + +## [v0.1.1] - Previous Release + +[Full Changelog](https://github.com/agntcy/dir/compare/v0.1.0...v0.1.1) + +--- + +## [v0.1.0] - Initial Release + +[Full Changelog](https://github.com/agntcy/dir/releases/tag/v0.1.0) + +--- + +## Legend + +- **Added** for new features +- **Changed** for changes in existing functionality +- **Deprecated** for soon-to-be removed features +- **Removed** for now removed features +- **Fixed** for any bug fixes +- **Security** for vulnerability fixes diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 94d5687b4..35cad2637 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,132 +1,132 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -We as members, contributors, and leaders pledge to make participation in our -community a harassment-free experience for everyone, regardless of age, body -size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, -nationality, personal appearance, race, caste, color, religion, or sexual -identity and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. - -## Our Standards - -Examples of behavior that contributes to a positive environment for our -community include: - -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -* Focusing on what is best not just for us as individuals, but for the overall - community - -Examples of unacceptable behavior include: - -* The use of sexualized language or imagery, and sexual attention or advances of - any kind -* Trolling, insulting or derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or email address, - without their explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Enforcement Responsibilities - -Community leaders are responsible for clarifying and enforcing our standards of -acceptable behavior and will take appropriate and fair corrective action in -response to any behavior that they deem inappropriate, threatening, offensive, -or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, and will communicate reasons for moderation -decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also applies when -an individual is officially representing the community in public spaces. -Examples of representing our community include using an official email address, -posting via an official social media account, or acting as an appointed -representative at an online or offline event. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement at -[moderation@agntcy.org](mailto:moderation@agntcy.org). All complaints will be -reviewed and investigated promptly and fairly. - -All community leaders are obligated to respect the privacy and security of the -reporter of any incident. - -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining -the consequences for any action they deem in violation of this Code of Conduct: - -### 1. Correction - -**Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. - -**Consequence**: A private, written warning from community leaders, providing -clarity around the nature of the violation and an explanation of why the -behavior was inappropriate. A public apology may be requested. - -### 2. Warning - -**Community Impact**: A violation through a single incident or series of -actions. - -**Consequence**: A warning with consequences for continued behavior. No -interaction with the people involved, including unsolicited interaction with -those enforcing the Code of Conduct, for a specified period of time. This -includes avoiding interactions in community spaces as well as external channels -like social media. Violating these terms may lead to a temporary or permanent -ban. - -### 3. Temporary Ban - -**Community Impact**: A serious violation of community standards, including -sustained inappropriate behavior. - -**Consequence**: A temporary ban from any sort of interaction or public -communication with the community for a specified period of time. No public or -private interaction with the people involved, including unsolicited interaction -with those enforcing the Code of Conduct, is allowed during this period. -Violating these terms may lead to a permanent ban. - -### 4. Permanent Ban - -**Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. - -**Consequence**: A permanent ban from any sort of public interaction within the -community. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 2.1, available at -[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. - -Community Impact Guidelines were inspired by [Mozilla's code of conduct -enforcement ladder][Mozilla CoC]. - -For answers to common questions about this code of conduct, see the FAQ at -[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at -[https://www.contributor-covenant.org/translations][translations]. - -[homepage]: https://www.contributor-covenant.org -[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html -[Mozilla CoC]: https://github.com/mozilla/diversity -[FAQ]: https://www.contributor-covenant.org/faq -[translations]: https://www.contributor-covenant.org/translations +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official email address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +[moderation@agntcy.org](mailto:moderation@agntcy.org). All complaints will be +reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bdc9c64d0..88ec3b3ce 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,81 +1,81 @@ -# How to Contribute - -Thanks for your interest in contributing to Directory! Here are a few -general guidelines on contributing and reporting bugs that we ask you to review. -Following these guidelines helps to communicate that you respect the time of the -contributors managing and developing this open source project. In return, they -should reciprocate that respect in addressing your issue, assessing changes, and -helping you finalize your pull requests. In that spirit of mutual respect, we -endeavor to review incoming issues and pull requests within 10 days, and will -close any lingering issues or pull requests after 60 days of inactivity. - -Please note that all of your interactions in the project are subject to our -[Code of Conduct](/CODE_OF_CONDUCT.md). This includes creation of issues or pull -requests, commenting on issues or pull requests, and extends to all interactions -in any real-time space e.g., Slack, Discord, etc. - -## Reporting Issues - -Before reporting a new issue, please ensure that the issue was not already -reported or fixed by searching through our [issues -list](https://github.com/agntcy/dir/issues). - -When creating a new issue, please be sure to include a **title and clear -description**, as much relevant information as possible, and, if possible, a -test case. - -**If you discover a security bug, please do not report it through GitHub. -Instead, please see security procedures in [SECURITY.md](/SECURITY.md).** - -## Sending Pull Requests - -Before sending a new pull request, take a look at existing pull requests and -issues to see if the proposed change or fix has been discussed in the past, or -if the change was already implemented but not yet released. - -We expect new pull requests to include tests for any affected behavior, and, as -we follow semantic versioning, we may reserve breaking changes until the next -major version release. - - -## Developer’s Certificate of Origin - -To improve tracking of who did what, we have introduced a “sign-off” procedure. -The sign-off is a line at the end of the explanation for the commit, which -certifies that you wrote it or otherwise have the right to pass it on as open -source work. We use the Developer Certificate of Origin (see -https://developercertificate.org/) for our sign-off procedure. You must include -a sign-off in the commit message of your pull request for it to be accepted. The -format for a sign-off is: - -``` -Signed-off-by: Random J Developer - -``` - -You can use the -s when you do a git commit to simplify including a properly -formatted sign-off in your commits. If you need to add your sign-off to a commit -you have already made, you will need to amend: -``` -git commit --amend --signoff -``` - -## Other Ways to Contribute - -We welcome anyone that wants to contribute to DIR to triage and -reply to open issues to help troubleshoot and fix existing bugs. Here is what -you can do: - -- Help ensure that existing issues follows the recommendations from the - _[Reporting Issues](#reporting-issues)_ section, providing feedback to the - issue's author on what might be missing. -- Review and update the existing content of our - [Wiki](https://github.com/agntcy/dir/wiki) with up-to-date - instructions and code samples. -- Review existing pull requests, and testing patches against real existing - applications that use `dir`. -- Write a test, or add a missing test case to an existing test. - -Thanks again for your interest on contributing to `dir`! - -:heart: +# How to Contribute + +Thanks for your interest in contributing to Directory! Here are a few +general guidelines on contributing and reporting bugs that we ask you to review. +Following these guidelines helps to communicate that you respect the time of the +contributors managing and developing this open source project. In return, they +should reciprocate that respect in addressing your issue, assessing changes, and +helping you finalize your pull requests. In that spirit of mutual respect, we +endeavor to review incoming issues and pull requests within 10 days, and will +close any lingering issues or pull requests after 60 days of inactivity. + +Please note that all of your interactions in the project are subject to our +[Code of Conduct](/CODE_OF_CONDUCT.md). This includes creation of issues or pull +requests, commenting on issues or pull requests, and extends to all interactions +in any real-time space e.g., Slack, Discord, etc. + +## Reporting Issues + +Before reporting a new issue, please ensure that the issue was not already +reported or fixed by searching through our [issues +list](https://github.com/agntcy/dir/issues). + +When creating a new issue, please be sure to include a **title and clear +description**, as much relevant information as possible, and, if possible, a +test case. + +**If you discover a security bug, please do not report it through GitHub. +Instead, please see security procedures in [SECURITY.md](/SECURITY.md).** + +## Sending Pull Requests + +Before sending a new pull request, take a look at existing pull requests and +issues to see if the proposed change or fix has been discussed in the past, or +if the change was already implemented but not yet released. + +We expect new pull requests to include tests for any affected behavior, and, as +we follow semantic versioning, we may reserve breaking changes until the next +major version release. + + +## Developer’s Certificate of Origin + +To improve tracking of who did what, we have introduced a “sign-off” procedure. +The sign-off is a line at the end of the explanation for the commit, which +certifies that you wrote it or otherwise have the right to pass it on as open +source work. We use the Developer Certificate of Origin (see +https://developercertificate.org/) for our sign-off procedure. You must include +a sign-off in the commit message of your pull request for it to be accepted. The +format for a sign-off is: + +``` +Signed-off-by: Random J Developer + +``` + +You can use the -s when you do a git commit to simplify including a properly +formatted sign-off in your commits. If you need to add your sign-off to a commit +you have already made, you will need to amend: +``` +git commit --amend --signoff +``` + +## Other Ways to Contribute + +We welcome anyone that wants to contribute to DIR to triage and +reply to open issues to help troubleshoot and fix existing bugs. Here is what +you can do: + +- Help ensure that existing issues follows the recommendations from the + _[Reporting Issues](#reporting-issues)_ section, providing feedback to the + issue's author on what might be missing. +- Review and update the existing content of our + [Wiki](https://github.com/agntcy/dir/wiki) with up-to-date + instructions and code samples. +- Review existing pull requests, and testing patches against real existing + applications that use `dir`. +- Write a test, or add a missing test case to an existing test. + +Thanks again for your interest on contributing to `dir`! + +:heart: diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index dc68fb99c..a3df8154c 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -1,10 +1,10 @@ -# Contributors to Directory - -CONTRIBUTOR file should only contain list of copyright holder (i.e. employers of -maintainers). All files that support comments should include standard header for -the project. AGNTCY uses the following file header: - -Copyright AGNTCY Contributors (https://github.com/agntcy) -SPDX-License-Identifier: Apache-2.0 - +# Contributors to Directory + +CONTRIBUTOR file should only contain list of copyright holder (i.e. employers of +maintainers). All files that support comments should include standard header for +the project. AGNTCY uses the following file header: + +Copyright AGNTCY Contributors (https://github.com/agntcy) +SPDX-License-Identifier: Apache-2.0 + 1. Cisco Systems Inc. \ No newline at end of file diff --git a/HomebrewFormula/README.md b/HomebrewFormula/README.md index 686cf8ce4..618fe506d 100644 --- a/HomebrewFormula/README.md +++ b/HomebrewFormula/README.md @@ -1,21 +1,21 @@ -# Directory homebrew tap - -## How to add this homebrew tap to your system -```bash - brew tap agntcy/dir https://github.com/agntcy/dir/ -``` - -## How to install a formula -```bash - brew install dirctl -``` - -## How to remove an installed formula -```bash - brew remove dirctl -``` - -## How to remove tap from your system -```bash - brew untap agntcy/dir +# Directory homebrew tap + +## How to add this homebrew tap to your system +```bash + brew tap agntcy/dir https://github.com/agntcy/dir/ +``` + +## How to install a formula +```bash + brew install dirctl +``` + +## How to remove an installed formula +```bash + brew remove dirctl +``` + +## How to remove tap from your system +```bash + brew untap agntcy/dir ``` \ No newline at end of file diff --git a/HomebrewFormula/dirctl.rb b/HomebrewFormula/dirctl.rb index 4dca83c73..0a8de2f32 100644 --- a/HomebrewFormula/dirctl.rb +++ b/HomebrewFormula/dirctl.rb @@ -1,63 +1,63 @@ -class Dirctl < Formula - desc "Command-line interface for AGNTCY directory" - homepage "https://github.com/agntcy/dir" - version "v0.6.0" - license "Apache-2.0" - version_scheme 1 - - url "https://github.com/agntcy/dir/releases/download/#{version}" # NOTE: It is abused to reduce redundancy - - # TODO: Livecheck can be used to brew bump later - - on_macos do - if Hardware::CPU.arm? && Hardware::CPU.is_64_bit? - url "#{url}/dirctl-darwin-arm64" - sha256 "d69abfab5f55042331237a1ba5966b53efad366032c3bd9dc21f43745f1a8f37" - - def install - bin.install "dirctl-darwin-arm64" => "dirctl" - - system "chmod", "+x", bin/"dirctl" - generate_completions_from_executable(bin/"dirctl", "completion", shells: [:bash, :zsh, :fish]) - end - end - - if Hardware::CPU.intel? && Hardware::CPU.is_64_bit? - url "#{url}/dirctl-darwin-amd64" - sha256 "26bf0098f34a4c4247b267a76ab230e625cd972b6275ac9305719f1fbdd93657" - - def install - bin.install "dirctl-darwin-amd64" => "dirctl" - - system "chmod", "+x", bin/"dirctl" - generate_completions_from_executable(bin/"dirctl", "completion", shells: [:bash, :zsh, :fish]) - end - end - end - - on_linux do - if Hardware::CPU.arm? && Hardware::CPU.is_64_bit? - url "#{url}/dirctl-linux-arm64" - sha256 "9c3590f69bc5b7abffafdded39b339af6beba6148b1a4fe4676f8633e63c6a0d" - - def install - bin.install "dirctl-linux-arm64" => "dirctl" - - system "chmod", "+x", bin/"dirctl" - generate_completions_from_executable(bin/"dirctl", "completion", shells: [:bash, :zsh, :fish]) - end - end - - if Hardware::CPU.intel? && Hardware::CPU.is_64_bit? - url "#{url}/dirctl-linux-amd64" - sha256 "090c220d6e74de2455854a4dab9795dec2f551a11fb4f83b72c5656085b35585" - - def install - bin.install "dirctl-linux-amd64" => "dirctl" - - system "chmod", "+x", bin/"dirctl" - generate_completions_from_executable(bin/"dirctl", "completion", shells: [:bash, :zsh, :fish]) - end - end - end -end +class Dirctl < Formula + desc "Command-line interface for AGNTCY directory" + homepage "https://github.com/agntcy/dir" + version "v0.6.0" + license "Apache-2.0" + version_scheme 1 + + url "https://github.com/agntcy/dir/releases/download/#{version}" # NOTE: It is abused to reduce redundancy + + # TODO: Livecheck can be used to brew bump later + + on_macos do + if Hardware::CPU.arm? && Hardware::CPU.is_64_bit? + url "#{url}/dirctl-darwin-arm64" + sha256 "d69abfab5f55042331237a1ba5966b53efad366032c3bd9dc21f43745f1a8f37" + + def install + bin.install "dirctl-darwin-arm64" => "dirctl" + + system "chmod", "+x", bin/"dirctl" + generate_completions_from_executable(bin/"dirctl", "completion", shells: [:bash, :zsh, :fish]) + end + end + + if Hardware::CPU.intel? && Hardware::CPU.is_64_bit? + url "#{url}/dirctl-darwin-amd64" + sha256 "26bf0098f34a4c4247b267a76ab230e625cd972b6275ac9305719f1fbdd93657" + + def install + bin.install "dirctl-darwin-amd64" => "dirctl" + + system "chmod", "+x", bin/"dirctl" + generate_completions_from_executable(bin/"dirctl", "completion", shells: [:bash, :zsh, :fish]) + end + end + end + + on_linux do + if Hardware::CPU.arm? && Hardware::CPU.is_64_bit? + url "#{url}/dirctl-linux-arm64" + sha256 "9c3590f69bc5b7abffafdded39b339af6beba6148b1a4fe4676f8633e63c6a0d" + + def install + bin.install "dirctl-linux-arm64" => "dirctl" + + system "chmod", "+x", bin/"dirctl" + generate_completions_from_executable(bin/"dirctl", "completion", shells: [:bash, :zsh, :fish]) + end + end + + if Hardware::CPU.intel? && Hardware::CPU.is_64_bit? + url "#{url}/dirctl-linux-amd64" + sha256 "090c220d6e74de2455854a4dab9795dec2f551a11fb4f83b72c5656085b35585" + + def install + bin.install "dirctl-linux-amd64" => "dirctl" + + system "chmod", "+x", bin/"dirctl" + generate_completions_from_executable(bin/"dirctl", "completion", shells: [:bash, :zsh, :fish]) + end + end + end +end diff --git a/LICENSE.md b/LICENSE.md index d9a10c0d8..7cd40e552 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,176 +1,176 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 0fb5280f7..735774b75 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -1,7 +1,7 @@ -# Maintainers - -- [ramizpolic](https://github.com/ramizpolic), Ramiz Polic -- [muscariello](https://github.com/muscariello), Luca Muscariello -- [paralta](https://github.com/paralta), Catarina Paralta -- [adamtagscherer](https://github.com/adamtagscherer), Adam Tagscherer -- [pbalogh-sa](https://github.com/pbalogh-sa), Peter Balogh +# Maintainers + +- [ramizpolic](https://github.com/ramizpolic), Ramiz Polic +- [muscariello](https://github.com/muscariello), Luca Muscariello +- [paralta](https://github.com/paralta), Catarina Paralta +- [adamtagscherer](https://github.com/adamtagscherer), Adam Tagscherer +- [pbalogh-sa](https://github.com/pbalogh-sa), Peter Balogh diff --git a/README.md b/README.md index 5ea35126d..378701481 100644 --- a/README.md +++ b/README.md @@ -1,174 +1,220 @@ -# Directory - -![GitHub Release (latest by date)](https://img.shields.io/github/v/release/agntcy/dir) -[![CI](https://github.com/agntcy/dir/actions/workflows/ci.yaml/badge.svg?branch=main)](https://github.com/agntcy/dir/actions/workflows/ci.yaml) -[![Coverage](https://codecov.io/gh/agntcy/dir/branch/main/graph/badge.svg)](https://codecov.io/gh/agntcy/dir) -[![License](https://img.shields.io/github/license/agntcy/dir)](./LICENSE.md) - -[Buf Registry](https://buf.build/agntcy/dir) | -[MCP Server](./mcp) | -[Go SDK](https://pkg.go.dev/github.com/agntcy/dir/client) | -[Python SDK](https://pypi.org/project/agntcy-dir/) | -[JavaScript SDK](https://www.npmjs.com/package/agntcy-dir) | -[GitHub Actions](https://github.com/agntcy/dir/tree/main/.github/actions/setup-dirctl) | - -The Directory (dir) allows publication, exchange, and discovery of information about records over a distributed peer-to-peer network. -It leverages [OASF](https://github.com/agntcy/oasf) to describe AI agents and provides a set of APIs and tools to store, publish, and discover records across the network by their attributes and constraints. -Directory also leverages [CSIT](https://github.com/agntcy/csit) for continuous system integration and testing across different versions, environments, and features. - -## Features - -ADS enables several key capabilities for the agentic AI ecosystem: - -- **Capability-Based Discovery**: Agents publish structured metadata describing their -functional characteristics as described by the [OASF](https://github.com/agntcy/oasf). -The system organizes this information using hierarchical taxonomies, -enabling efficient matching of capabilities to requirements. -- **Verifiable Claims**: While agent capabilities are often subjectively evaluated, -ADS provides cryptographic mechanisms for data integrity and provenance tracking. -This allows users to make informed decisions about agent selection. -- **Semantic Linkage**: Components can be securely linked to create various relationships -like version histories for evolutionary development, collaborative partnerships where -complementary skills solve complex problems, and dependency chains for composite agent workflows. -- **Distributed Architecture**: Built on proven distributed systems principles, -ADS uses content-addressing for global uniqueness and implements distributed hash tables (DHT) -for scalable content discovery and synchronization across decentralized networks. -- **Tooling and Integration**: Provides a suite of command-line tools, SDKs, and APIs -to facilitate interaction with the system, enabling developers to manage Directory -records and node operations programmatically. -- **Security and Trust**: Incorporates robust security measures including -cryptographic signing, verification of claims, secure communication protocols, and access controls -to ensure the integrity and authenticity of Directory records and nodes. - -## Documentation - -Check the [Documentation](https://docs.agntcy.org/dir/overview/) for a full walkthrough of all the Directory features. - -## Source tree - -- [proto](./proto) - gRPC specification for data models and services -- [api](./api) - API models for tools and packages -- [cli](./cli) - command line client for interacting with system components -- [client](./client) - client SDK for development and API workflows -- [e2e](./e2e) - end-to-end testing framework -- [docs](./docs) - research details and documentation around the project -- [server](./server) - API services to manage storage, routing, and networking operations -- [sdk](./sdk) - client SDK implementations in different languages for development - -## Prerequisites - -To build the project and work with the code, you will need the following installed in your system - -- [Taskfile](https://taskfile.dev/) -- [Docker](https://www.docker.com/) -- [Golang](https://go.dev/doc/devel/release#go1.24.0) - -Make sure Docker is installed with Buildx. - -## Development - -Use `Taskfile` for all related development operations such as testing, validating, deploying, and working with the project. - -### Clone the repository - -```bash -git clone https://github.com/agntcy/dir -cd dir -``` - -### Initialize the project - -This step will fetch all project dependencies and prepare the environment for development. - -```bash -task deps -``` - -### Make changes - -Make the changes to the source code and rebuild for later testing. - -```bash -task build -``` - -### Test changes - -The local testing pipeline relies on Golang to perform unit tests, and -Docker to perform E2E tests in an isolated Kubernetes environment using Kind. - -```bash -task test:unit -task test:e2e -``` - -## Artifacts distribution - -All artifacts are tagged using the [Semantic Versioning](https://semver.org/) and follow the checked-out source code tags. -It is not advised to use artifacts with mismatched versions. - -### Container images - -All container images are distributed via [GitHub Packages](https://github.com/orgs/agntcy/packages?repo_name=dir). - -```bash -docker pull ghcr.io/agntcy/dir-ctl:v0.5.7 -docker pull ghcr.io/agntcy/dir-apiserver:v0.5.7 -``` - -### Helm charts - -All helm charts are distributed as OCI artifacts via [GitHub Packages](https://github.com/agntcy/dir/pkgs/container/dir%2Fhelm-charts%2Fdir). - -```bash -helm pull oci://ghcr.io/agntcy/dir/helm-charts/dir --version v0.5.7 -``` - -### Binaries - -All release binaries are distributed via [GitHub Releases](https://github.com/agntcy/dir/releases) and [Homebrew](./HomebrewFormula/) `agntcy/dir` tap. - -### SDKs - -- **Golang** - [pkg.go.dev/github.com/agntcy/dir/client](https://pkg.go.dev/github.com/agntcy/dir/client) - [github.com/agntcy/dir/client](https://github.com/agntcy/dir/tree/main/client) - -- **Python** - [pypi.org/agntcy-dir](https://pypi.org/project/agntcy-dir/) - [github.com/agntcy/dir/sdk/dir-py](https://github.com/agntcy/dir/tree/main/sdk/dir-py) - -- **JavaScript** - [npmjs.com/agntcy-dir](https://www.npmjs.com/package/agntcy-dir) - [github.com/agntcy/dir/sdk/dir-js](https://github.com/agntcy/dir/tree/main/sdk/dir-js) - -## Deployment - -Directory API services can be deployed either using the `Taskfile` or directly via the released Helm chart. - -### Using Taskfile - -This will start the necessary components such as storage and API services. - -```bash -task server:start -``` - -### Using Helm chart - -This will deploy Directory services into an existing Kubernetes cluster. - -```bash -helm pull oci://ghcr.io/agntcy/dir/helm-charts/dir --version v0.5.7 -helm upgrade --install dir oci://ghcr.io/agntcy/dir/helm-charts/dir --version v0.5.7 -``` - -### Using Docker Compose - -This will deploy Directory services using Docker Compose: - -```bash -cd install/docker -docker compose up -d -``` - -## Copyright Notice - -[Copyright Notice and License](./LICENSE.md) - -Distributed under Apache 2.0 License. See LICENSE for more information. -Copyright AGNTCY Contributors (https://github.com/agntcy) +# Directory + +![GitHub Release (latest by date)](https://img.shields.io/github/v/release/agntcy/dir) +[![CI](https://github.com/agntcy/dir/actions/workflows/ci.yaml/badge.svg?branch=main)](https://github.com/agntcy/dir/actions/workflows/ci.yaml) +[![Coverage](https://codecov.io/gh/agntcy/dir/branch/main/graph/badge.svg)](https://codecov.io/gh/agntcy/dir) +[![License](https://img.shields.io/github/license/agntcy/dir)](./LICENSE.md) + +[Buf Registry](https://buf.build/agntcy/dir) | +[MCP Server](./mcp) | +[Go SDK](https://pkg.go.dev/github.com/agntcy/dir/client) | +[Python SDK](https://pypi.org/project/agntcy-dir/) | +[JavaScript SDK](https://www.npmjs.com/package/agntcy-dir) | +[GitHub Actions](https://github.com/agntcy/dir/tree/main/.github/actions/setup-dirctl) | + +The Directory (dir) allows publication, exchange, and discovery of information about records over a distributed peer-to-peer network. +It leverages [OASF](https://github.com/agntcy/oasf) to describe AI agents and provides a set of APIs and tools to store, publish, and discover records across the network by their attributes and constraints. +Directory also leverages [CSIT](https://github.com/agntcy/csit) for continuous system integration and testing across different versions, environments, and features. + +## Trust Ranking (Reference PoC) + +This repository includes a **reference-only trust ranking extension** for directory results, implemented as an optional add-on under `extensions/trust_ranking/`. + +The goal is to demonstrate: + +- how trust signals *could* influence discovery and ranking +- how rankings can be explainable (human-readable reasons) +- how this logic can remain optional and pluggable + +**Details:** `extensions/trust_ranking/REFERENCE.md` + +### What this is + +- A minimal, additive extension +- A toy scoring model with clear limitations +- A runnable demo for discussion and experimentation + +### What this is not + +- Not a security system +- Not a standard +- Not a production recommendation + +The reference implementation intentionally avoids real-world enforcement, +identity guarantees, or adversarial robustness. + +### Try it + +```bash +python scripts/run_trust_ranking.py --top 10 +``` + +The demo ranks sample directory entries and annotates them with: + +- `trust.score` (0–100) +- `trust.band` (`green | yellow | red`) +- `trust.reasons` (top 3 human-readable explanations) + +### Why include this + +As agent directories grow, returning an unordered list of candidates becomes insufficient. +This PoC exists to explore *where* trust-based ranking might live and *how* it could be exposed without centralizing control or mandating policy. + +--- + +## Features + +ADS enables several key capabilities for the agentic AI ecosystem: + +- **Capability-Based Discovery**: Agents publish structured metadata describing their + functional characteristics as described by the [OASF](https://github.com/agntcy/oasf). + The system organizes this information using hierarchical taxonomies, + enabling efficient matching of capabilities to requirements. +- **Verifiable Claims**: While agent capabilities are often subjectively evaluated, + ADS provides cryptographic mechanisms for data integrity and provenance tracking. + This allows users to make informed decisions about agent selection. +- **Semantic Linkage**: Components can be securely linked to create various relationships + like version histories for evolutionary development, collaborative partnerships where + complementary skills solve complex problems, and dependency chains for composite agent workflows. +- **Distributed Architecture**: Built on proven distributed systems principles, + ADS uses content-addressing for global uniqueness and implements distributed hash tables (DHT) + for scalable content discovery and synchronization across decentralized networks. +- **Tooling and Integration**: Provides a suite of command-line tools, SDKs, and APIs + to facilitate interaction with the system, enabling developers to manage Directory + records and node operations programmatically. +- **Security and Trust**: Incorporates robust security measures including + cryptographic signing, verification of claims, secure communication protocols, and access controls + to ensure the integrity and authenticity of Directory records and nodes. + +## Documentation + +Check the [Documentation](https://docs.agntcy.org/dir/overview/) for a full walkthrough of all the Directory features. + +## Source tree + +- [proto](./proto) - gRPC specification for data models and services +- [api](./api) - API models for tools and packages +- [cli](./cli) - command line client for interacting with system components +- [client](./client) - client SDK for development and API workflows +- [e2e](./e2e) - end-to-end testing framework +- [docs](./docs) - research details and documentation around the project +- [server](./server) - API services to manage storage, routing, and networking operations +- [sdk](./sdk) - client SDK implementations in different languages for development + +## Prerequisites + +To build the project and work with the code, you will need the following installed in your system + +- [Taskfile](https://taskfile.dev/) +- [Docker](https://www.docker.com/) +- [Golang](https://go.dev/doc/devel/release#go1.24.0) + +Make sure Docker is installed with Buildx. + +## Development + +Use `Taskfile` for all related development operations such as testing, validating, deploying, and working with the project. + +### Clone the repository + +```bash +git clone https://github.com/agntcy/dir +cd dir +``` + +### Initialize the project + +This step will fetch all project dependencies and prepare the environment for development. + +```bash +task deps +``` + +### Make changes + +Make the changes to the source code and rebuild for later testing. + +```bash +task build +``` + +### Test changes + +The local testing pipeline relies on Golang to perform unit tests, and +Docker to perform E2E tests in an isolated Kubernetes environment using Kind. + +```bash +task test:unit +task test:e2e +``` + +## Artifacts distribution + +All artifacts are tagged using the [Semantic Versioning](https://semver.org/) and follow the checked-out source code tags. +It is not advised to use artifacts with mismatched versions. + +### Container images + +All container images are distributed via [GitHub Packages](https://github.com/orgs/agntcy/packages?repo_name=dir). + +```bash +docker pull ghcr.io/agntcy/dir-ctl:v0.5.7 +docker pull ghcr.io/agntcy/dir-apiserver:v0.5.7 +``` + +### Helm charts + +All helm charts are distributed as OCI artifacts via [GitHub Packages](https://github.com/agntcy/dir/pkgs/container/dir%2Fhelm-charts%2Fdir). + +```bash +helm pull oci://ghcr.io/agntcy/dir/helm-charts/dir --version v0.5.7 +``` + +### Binaries + +All release binaries are distributed via [GitHub Releases](https://github.com/agntcy/dir/releases) and [Homebrew](./HomebrewFormula/) `agntcy/dir` tap. + +### SDKs + +- **Golang** - [pkg.go.dev/github.com/agntcy/dir/client](https://pkg.go.dev/github.com/agntcy/dir/client) - [github.com/agntcy/dir/client](https://github.com/agntcy/dir/tree/main/client) + +- **Python** - [pypi.org/agntcy-dir](https://pypi.org/project/agntcy-dir/) - [github.com/agntcy/dir/sdk/dir-py](https://github.com/agntcy/dir/tree/main/sdk/dir-py) + +- **JavaScript** - [npmjs.com/agntcy-dir](https://www.npmjs.com/package/agntcy-dir) - [github.com/agntcy/dir/sdk/dir-js](https://github.com/agntcy/dir/tree/main/sdk/dir-js) + +## Deployment + +Directory API services can be deployed either using the `Taskfile` or directly via the released Helm chart. + +### Using Taskfile + +This will start the necessary components such as storage and API services. + +```bash +task server:start +``` + +### Using Helm chart + +This will deploy Directory services into an existing Kubernetes cluster. + +```bash +helm pull oci://ghcr.io/agntcy/dir/helm-charts/dir --version v0.5.7 +helm upgrade --install dir oci://ghcr.io/agntcy/dir/helm-charts/dir --version v0.5.7 +``` + +### Using Docker Compose + +This will deploy Directory services using Docker Compose: + +```bash +cd install/docker +docker compose up -d +``` + +## Copyright Notice + +[Copyright Notice and License](./LICENSE.md) + +Distributed under Apache 2.0 License. See LICENSE for more information. +Copyright AGNTCY Contributors (https://github.com/agntcy) diff --git a/RELEASE.md b/RELEASE.md index 5b1b24fe2..ee55e14f4 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,40 +1,40 @@ -# Release - -This document outlines the process for creating a new release for Directory packages. -All code block examples provided below correspond to an update to version `v1.0.0`, please update accordingly. - -## 1. Create Release branch - -Prepare a new release for the desired version by running the following command: - -```sh -task release:create RELEASE_VERSION=v1.0.0 -``` - -> [!NOTE] -> For SDK release candidates, versions like `1.0.0-rc.1` becomes `1.0.0-rc.1` in JavaScript package.json -> and `1.0.0rc1` in Python pyproject.toml. - -## 2. Create and Push Tags - -* After the pull request is approved and merged, update your local main branch. -```sh -git checkout main -git pull origin main -``` - -* To trigger the release workflow, create and push to the repository a release tag for the last commit. -```sh -git tag -a v1.0.0 -git push origin v1.0.0 -``` - -Please note that the release tag is not necessarily associated with the "release: prepare version v1.0.0" commit. For example, if any bug fixes were required after this commit, they can be merged and included in the release. - -## 3. Publish release - -* Wait until the release workflow is completed successfully. - -* Navigate to the [Releases page](https://github.com/agntcy/dir/releases) and verify the draft release description as well as the assets listed. - -* Once the draft release has been verified, click on `Edit` release and then on `Publish Release`. +# Release + +This document outlines the process for creating a new release for Directory packages. +All code block examples provided below correspond to an update to version `v1.0.0`, please update accordingly. + +## 1. Create Release branch + +Prepare a new release for the desired version by running the following command: + +```sh +task release:create RELEASE_VERSION=v1.0.0 +``` + +> [!NOTE] +> For SDK release candidates, versions like `1.0.0-rc.1` becomes `1.0.0-rc.1` in JavaScript package.json +> and `1.0.0rc1` in Python pyproject.toml. + +## 2. Create and Push Tags + +* After the pull request is approved and merged, update your local main branch. +```sh +git checkout main +git pull origin main +``` + +* To trigger the release workflow, create and push to the repository a release tag for the last commit. +```sh +git tag -a v1.0.0 +git push origin v1.0.0 +``` + +Please note that the release tag is not necessarily associated with the "release: prepare version v1.0.0" commit. For example, if any bug fixes were required after this commit, they can be merged and included in the release. + +## 3. Publish release + +* Wait until the release workflow is completed successfully. + +* Navigate to the [Releases page](https://github.com/agntcy/dir/releases) and verify the draft release description as well as the assets listed. + +* Once the draft release has been verified, click on `Edit` release and then on `Publish Release`. diff --git a/SECURITY.md b/SECURITY.md index b85f389e3..a7d97184b 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,24 +1,24 @@ -## Report a security issue - -The DIR project team welcomes security reports and is committed to -providing prompt attention to security issues. Security issues should be -reported privately via [security@agntcy.org](mailto:security@agntcy.org). -Security issues should not be reported via the public GitHub Issue tracker. - - -## Vulnerability coordination - -Remediation of security vulnerabilities is prioritized by the project team. The -project team coordinates remediation with third-party project stakeholders via -[GitHub Security Advisories](https://help.github.com/en/github/managing-security-vulnerabilities/about-github-security-advisories). Third-party stakeholders may include the reporter of the issue, affected direct or indirect -users of DIR, and maintainers of upstream dependencies if applicable. - -Downstream project maintainers and DIR users can request participation in -coordination of applicable security issues by sending your contact email address, -GitHub username(s) and any other salient information to [security@agntcy.org](mailto:security@agntcy.org). -Participation in security issue coordination processes is at the discretion of the DIR team. - -## Security advisories - -The project team is committed to transparency in the security issue disclosure +## Report a security issue + +The DIR project team welcomes security reports and is committed to +providing prompt attention to security issues. Security issues should be +reported privately via [security@agntcy.org](mailto:security@agntcy.org). +Security issues should not be reported via the public GitHub Issue tracker. + + +## Vulnerability coordination + +Remediation of security vulnerabilities is prioritized by the project team. The +project team coordinates remediation with third-party project stakeholders via +[GitHub Security Advisories](https://help.github.com/en/github/managing-security-vulnerabilities/about-github-security-advisories). Third-party stakeholders may include the reporter of the issue, affected direct or indirect +users of DIR, and maintainers of upstream dependencies if applicable. + +Downstream project maintainers and DIR users can request participation in +coordination of applicable security issues by sending your contact email address, +GitHub username(s) and any other salient information to [security@agntcy.org](mailto:security@agntcy.org). +Participation in security issue coordination processes is at the discretion of the DIR team. + +## Security advisories + +The project team is committed to transparency in the security issue disclosure process. The DIR team announces security issues via [project GitHub Release notes](https://github.com/agtncy/dir/releases). \ No newline at end of file diff --git a/Taskfile.yml b/Taskfile.yml index 2129b7916..f29043b3a 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -1,1906 +1,1906 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -version: "3" - -env: - GOWORK: off - -vars: - ## Version - RELEASE_VERSION: - sh: grep 'version:' versions.yaml | awk '{print $2}' - RELEASE_VERSION_LDFLAG: "-X 'github.com/agntcy/dir/api/version.Version={{ .RELEASE_VERSION }}'" - COMMIT_SHA: - sh: git rev-parse --short HEAD - COMMIT_SHA_LDFLAG: "-X 'github.com/agntcy/dir/api/version.CommitHash={{ .COMMIT_SHA }}'" - VERSION_LDFLAGS: "{{ .RELEASE_VERSION_LDFLAG }} {{ .COMMIT_SHA_LDFLAG }}" - - ## Image config - IMAGE_REPO: '{{ .IMAGE_REPO | default "ghcr.io/agntcy" }}' - IMAGE_TAG: "{{ .IMAGE_TAG | default .COMMIT_SHA }}" - IMAGE_BAKE_ENV: "IMAGE_REPO={{.IMAGE_REPO}} IMAGE_TAG={{.IMAGE_TAG}}" - IMAGE_BAKE_OPTS: '{{ .IMAGE_BAKE_OPTS | default "" }}' - BAKE_ENV: '{{ .IMAGE_BAKE_ENV }} EXTRA_LDFLAGS="{{.VERSION_LDFLAGS}}"' - COVERAGE_IMAGE_TAG: "{{ .IMAGE_TAG | default .COMMIT_SHA }}-coverage" - COVERAGE_IMAGE_BAKE_ENV: "IMAGE_REPO={{.IMAGE_REPO}} IMAGE_TAG={{.COVERAGE_IMAGE_TAG}}" - COVERAGE_BAKE_ENV: '{{ .COVERAGE_IMAGE_BAKE_ENV }} EXTRA_LDFLAGS="{{.VERSION_LDFLAGS}}"' - COVERAGE_PKGS: '{{ .COVERAGE_PKGS | default "github.com/agntcy/dir/api/...,github.com/agntcy/dir/cli/...,github.com/agntcy/dir/client/...,github.com/agntcy/dir/importer/...,github.com/agntcy/dir/utils/..." }}' - - ## Dependency config - BIN_DIR: "{{ .ROOT_DIR }}/bin" - DIRCTL_BIN: "{{ .BIN_DIR}}/dirctl" - HELM_VERSION: "3.16.3" - HELM_BIN: "{{ .BIN_DIR }}/helm-{{.HELM_VERSION}}" - KUBECTL_VERSION: "1.31.3" - KUBECTL_BIN: "{{ .BIN_DIR }}/kubectl-{{.KUBECTL_VERSION}}" - KIND_VERSION: "0.25.0" - KIND_BIN: "{{ .BIN_DIR }}/kind-{{.KIND_VERSION}}" - PROTOC_VERSION: "27.1" - PROTOC_BIN: "{{ .BIN_DIR }}/protoc-{{.PROTOC_VERSION}}" - BUFBUILD_VERSION: "1.50.1" - BUFBUILD_BIN: "{{ .BIN_DIR }}/bufbuild-{{.BUFBUILD_VERSION}}" - GO_VERSION: "1.25.2" - MULTIMOD_VERSION: "0.17.0" - MULTIMOD_BIN: "{{ .BIN_DIR }}/multimod-{{.MULTIMOD_VERSION}}" - GOLANGCI_LINT_VERSION: "2.5.0" - GOLANGCI_LINT_BIN: "{{ .BIN_DIR }}/golangci-lint-{{.GOLANGCI_LINT_VERSION}}" - LICENSEI_VERSION: "0.9.0" - LICENSEI_BIN: "{{ .BIN_DIR }}/licensei-{{.LICENSEI_VERSION}}" - UV_VERSION: "0.8.23" - UV_BIN: "{{ .BIN_DIR }}/uv-{{.UV_VERSION}}" - UV_PUBLISH_TOKEN: '{{ .UV_PUBLISH_TOKEN | default "" }}' - COSIGN_VERSION: "2.5.3" - COSIGN_BIN: "{{ .BIN_DIR }}/cosign-{{.COSIGN_VERSION}}" - HUB_API_VERSION: "main" - ZOT_VERSION: "2.1.11" - SPIRE_VERSION: "1.13.3" - - ## Go module discovery - GO_MOD_DIR: - sh: find . -name go.mod -not -path "./tmp*" -exec dirname {} \; - GO_MOD_DIR_UNIT_TEST: - sh: find . -name go.mod -not -path "./e2e*" -not -path "./tmp*" -exec dirname {} \; - -tasks: - ## - ## General - ## - default: - cmds: - - task -l - - gen: - desc: Generate code for all components - cmds: - - task: api:gen - - task: helm:gen - - check: - desc: Checks for all code violations - cmds: - - task: lint - - task: license - - build: - desc: Build images for all components - deps: - - task: deps:tidy - - task: gen - vars: - GOARCH: "{{ .GOARCH | default ARCH }}" - EXTRA_FLAGS: '{{ .EXTRA_FLAGS | default "" }}' - cmds: - - "{{.BAKE_ENV}} docker buildx bake {{.IMAGE_BAKE_OPTS}} --set *.platform=linux/{{.GOARCH}} {{.EXTRA_FLAGS}}" - - build:coverage: - desc: Build images for all components with coverage instrumentation - cmds: - - task: build - vars: - BAKE_ENV: "{{ .COVERAGE_BAKE_ENV }}" - # TODO: -coverpkg should be set to include all packages (server, api, utils) in the coverage report - # but it's not working as expected, so we're using the default coverage package for now - EXTRA_FLAGS: 'coverage --set *.args.BUILD_OPTS="-cover -covermode=atomic"' - - build:all: - desc: Build images for all components for multiple platforms - cmds: - - "{{.BAKE_ENV}} docker buildx bake {{.IMAGE_BAKE_OPTS}} --set *.platform=linux/amd64,linux/arm64" - - pull: - desc: Pull images for all components - cmds: - - | - images=$({{.BAKE_ENV}} docker buildx bake default --print | jq -r '.target | with_entries(.value |= .tags[0]) | to_entries[] | .value') - echo "$images" | while read image; do - echo "Pulling image: $image" - docker pull $image - done - - push: - desc: Build and push images for all components - prompt: - - Are you sure you want to push the images to remote registry? - cmds: - - "{{.BAKE_ENV}} docker buildx bake {{.IMAGE_BAKE_OPTS}} --set=*.output=type=registry" - - release:create: - desc: Prepare release - deps: - - task: deps:multimod-bin - - task: sdk:deps:python - - task: sdk:deps:javascript - vars: - RELEASE_VERSION: "{{ .RELEASE_VERSION }}" - cmds: - # Switch to new branch - - 'if [ "$(git rev-parse --abbrev-ref HEAD)" != "release/{{.RELEASE_VERSION}}" ]; then git checkout -b release/{{.RELEASE_VERSION}}; fi' - # Update versions.yaml with the new version - - 'awk ''{gsub(/version: .*/,"version: {{.RELEASE_VERSION}}")}1'' versions.yaml > versions.yaml.tmp' - - "mv versions.yaml.tmp versions.yaml" - # Update SDK packages with the new version - - "cd sdk/dir-py && {{ .UV_BIN }} version {{.RELEASE_VERSION}} && cd -" - - "cd sdk/dir-js && npm version {{.RELEASE_VERSION}} --allow-same-version --no-git-tag-version && cd -" - # Add release changes - - | - git add . - git commit -S -m "release(dir): prepare release {{.RELEASE_VERSION}}" - # Verify Go release - - | - {{ .MULTIMOD_BIN }} verify - {{ .MULTIMOD_BIN }} prerelease --all-module-sets --skip-go-mod-tidy=true --commit-to-different-branch=false - # Push prepared release - - task: release:push - - release:push: - internal: true - vars: - RELEASE_VERSION: "{{ .RELEASE_VERSION }}" - prompt: - - "Are you sure you want to push the release branch release/{{.RELEASE_VERSION}} to remote repository?" - cmds: - - | - git push --set-upstream origin release/{{.RELEASE_VERSION}} || true - - ## - ## API - ## - api:gen: - desc: Generates API stubs - dir: ./proto - deps: - - task: deps:protoc - - task: deps:bufbuild - # NOTE(ramizpolic): This allows Taskfile YAML parsing to accept '{' as a starting command token. - # In translation, this is interpreted as a regular multi-line shell script. - cmds: - - "{{.BUFBUILD_BIN}} dep update" - - "{{.BUFBUILD_BIN}} generate" - - api:clean: - desc: Clean generated API stubs - deps: - - api:clean:go - - api:clean:python - - api:clean:javascript - - api:clean:go: - desc: Clean generated golang API stubs - dir: ./api - cmds: - - find . \( -name "*.pb.go" \) -type f -delete - - api:clean:python: - desc: Clean generated Python API stubs - dir: ./sdk/dir-py/agntcy - cmd: rm -drf ./dir - - api:clean:javascript: - desc: Clean generated JS/TS API stubs - dir: ./sdk/dir-js/ - cmd: rm -drf ./api - - ## - ## CLI - ## - cli:compile: - desc: Compile CLI binaries - dir: ./cli - vars: - GOOS: "{{ .GOOS | default OS }}" - GOARCH: "{{ .GOARCH | default ARCH }}" - BINARY_NAME: '{{ .BINARY_NAME | default "dirctl" }}' - OUT_BINARY: '{{ if eq OS "windows" }}{{ .ROOT_DIR }}\\bin\\{{ .BINARY_NAME }}.exe{{ else }}{{ .ROOT_DIR }}/bin/{{ .BINARY_NAME }}{{ end }}' - LDFLAGS: "-s -w -extldflags -static {{ .VERSION_LDFLAGS }}" - TRY_SKIP_COMPILE: '{{ .TRY_SKIP_COMPILE | default "false" }}' - cmds: - - | - if [ "{{.TRY_SKIP_COMPILE}}" = "true" ]; then - if [ -f "{{.OUT_BINARY}}" ]; then - echo "Binary {{.OUT_BINARY}} already exists, skipping compilation." - exit 0 - else - echo "Binary {{.OUT_BINARY}} does not exist, proceeding with compilation." - fi - fi - - CGO_ENABLED=0 GOOS={{.GOOS}} GOARCH={{.GOARCH}} go build -ldflags="{{ .LDFLAGS }}" -o "{{.OUT_BINARY}}" cli.go - - cli:compile:all: - desc: Compile CLI client binaries for multiple platforms - aliases: [compile] - cmds: - - for: - matrix: - OS: ["linux", "darwin", "windows"] - ARCH: ["amd64", "arm64"] - cmd: | - # Skip unsupported combinations (e.g., Windows ARM64) - if [ "{{.ITEM.OS}}" = "windows" ] && [ "{{.ITEM.ARCH}}" = "arm64" ]; then - echo "Skipping unsupported platform: {{.ITEM.OS}}/{{.ITEM.ARCH}}" - else - GOOS={{.ITEM.OS}} GOARCH={{.ITEM.ARCH}} BINARY_NAME=dirctl-{{.ITEM.OS}}-{{.ITEM.ARCH}} task cli:compile - fi - - ## - ## Client SDK - ## - sdk:deps:common: - desc: Common dependencies for SDKs - vars: - TRY_SKIP_COMPILE: "{{ .TRY_SKIP_COMPILE }}" - cmds: - - task: deps:cosign - - task: cli:compile - vars: - TRY_SKIP_COMPILE: "{{.TRY_SKIP_COMPILE}}" - - task: sdk:deps:javascript - - task: sdk:deps:python - - sdk:deps:cicd:iodc-token-generation: - desc: Get Fulcio OIDC token for CICD - requires: - vars: [CLIENT_ID] - cmds: - - | - OIDC_TOKEN=$(curl -s -H "Authorization: bearer $ACTIONS_ID_TOKEN_REQUEST_TOKEN" \ - "$ACTIONS_ID_TOKEN_REQUEST_URL&audience=sigstore" | jq -r '.value') - - CLIENT_ID="{{.CLIENT_ID}}" - PROVIDER_URL="https://token.actions.githubusercontent.com" - - echo "OIDC_PROVIDER_URL=${PROVIDER_URL}" - echo "CLIENT_ID=${CLIENT_ID}" - echo "OIDC_TOKEN=${OIDC_TOKEN}" - - sdk:build:all: - desc: Build all client SDK package - cmds: - - task: sdk:build:javascript - - task: sdk:build:python - - sdk:build:python: - desc: Build python client SDK package - dir: ./sdk/dir-py - deps: - - task: sdk:deps:python - cmds: - - "{{.UV_BIN}} build" - - sdk:build:javascript: - desc: Build javascript client SDK package - dir: ./sdk/dir-js - deps: - - task: sdk:deps:javascript - cmds: - - npm run build - - sdk:test-env:create: - desc: Create Kubernetes cluster test environment - cmds: - - task: deploy:kubernetes:local - vars: - DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL: 1s - - task: deploy:kubernetes:local:port-forward - - sdk:test-env:delete: - desc: Delete Kubernetes cluster test environment - cmds: - - task: deploy:kubernetes:local:port-forward:cleanup - - task: deploy:kubernetes:local:cleanup - - sdk:test-env:spiffe:load-test-image: - desc: Load the SDK tests into KinD - deps: - - task: api:gen - vars: - GOARCH: "{{ .GOARCH | default ARCH }}" - cmds: - - "{{.BAKE_ENV}} docker buildx bake sdks-test --set *.platform=linux/{{.GOARCH}} {{.IMAGE_BAKE_OPTS}}" - - "{{.KUBECTL_BIN}} config use-context kind-dir.example" - - "{{.KIND_BIN}} load docker-image ghcr.io/agntcy/sdks-test:{{.IMAGE_TAG}} --name dir.example" - - sdk:test:all:spiffe: - desc: Test all client SDK pacakges with spiffe - dir: ./e2e/sdk - cmds: - - "{{.KUBECTL_BIN}} config use-context kind-dir.example" - - "{{.HELM_BIN}} uninstall sdk-tests --wait --keep-history --ignore-not-found > /dev/null || true" - - "{{.HELM_BIN}} install --replace --timeout 3m --wait --wait-for-jobs sdk-tests ./chart -f chart/values.yaml --set image.tag={{.IMAGE_TAG}} > /dev/null || true" - - | - status=$({{.KUBECTL_BIN}} get job sdks-test -o jsonpath='{.status.conditions[0].type}') - status_value=$({{.KUBECTL_BIN}} get job sdks-test -o jsonpath='{.status.conditions[0].status}') - - if [[ "$status" == "SuccessCriteriaMet" ]] && [[ "$status_value" == "True" ]]; then - echo "SDKs test are finished successfully! ✅" - exit 0 - fi - - if [[ "$status" == "FailureTarget" ]] && [[ "$status_value" == "True" ]]; then - {{.KUBECTL_BIN}} logs jobs/sdks-test - echo "SDKs test are failed! ❎" - exit 1 - fi - - echo "Unknown error happend, check logs! ⚠️" - exit 1 - - sdk:test:all: - desc: Test all client SDK packages - cmds: - - task: sdk:test:javascript - - task: sdk:test:python - - sdk:test:python: - desc: Test python client SDK package - dir: ./sdk/dir-py - deps: - - task: sdk:deps:python - env: - KIND_CLUSTER_NAME: "sdk-py-test" - cmds: - - task: sdk:test-env:create - - defer: { task: sdk:test-env:delete } - - | - export DIRCTL_PATH="$(printf "%s" "${DIRCTL_PATH:-{{ .DIRCTL_BIN }}}")" - export COSIGN_PATH="$(printf "%s" "${COSIGN_PATH:-{{ .COSIGN_BIN }}}")" - - '{{.UV_BIN}}' run pytest - - sdk:test:javascript: - desc: Test javascript client SDK package - dir: ./sdk/dir-js - deps: - - task: sdk:deps:javascript - env: - KIND_CLUSTER_NAME: "sdk-js-test" - cmds: - - task: sdk:test-env:create - - defer: { task: sdk:test-env:delete } - - | - export DIRCTL_PATH="$(printf "%s" "${DIRCTL_PATH:-{{ .DIRCTL_BIN }}}")" - export COSIGN_PATH="$(printf "%s" "${COSIGN_PATH:-{{ .COSIGN_BIN }}}")" - - npm run test - - sdk:deps:python: - desc: Install deps for python SDK package - dir: ./sdk/dir-py - deps: - - task: deps:bufbuild - - task: deps:uv - cmds: - - task: api:gen - - "{{.UV_BIN}} sync --all-packages" - - sdk:deps:javascript: - desc: Install deps for javascript SDK package - dir: ./sdk/dir-js - cmds: - - npm install - - task: api:gen - - sdk:release:all: - desc: Release all client SDK package - env: - UV_PUBLISH_TOKEN: "{{ .UV_PUBLISH_TOKEN }}" - NODE_AUTH_TOKEN: "{{ .NODE_AUTH_TOKEN }}" - cmds: - - task: sdk:release:javascript - - task: sdk:release:python - - sdk:release:python: - ignore_error: true # FIXME: Need to check if package version already exists - desc: Release python client SDK package - dir: ./sdk/dir-py - env: - UV_PUBLISH_TOKEN: "{{ .UV_PUBLISH_TOKEN }}" - deps: - - task: deps:uv - cmds: - - "{{.UV_BIN}} publish" - - sdk:release:javascript: - ignore_error: true # FIXME: Need to check if package version already exists - desc: Release javascript client SDK package - dir: ./sdk/dir-js - env: - NODE_AUTH_TOKEN: "{{ .NODE_AUTH_TOKEN }}" - cmd: | - version=$(npm pkg get version) - - if [[ $version == *"rc"* ]]; then - npm publish --scope=@agntcy --access public --tag rc-{{.COMMIT_SHA}} - else - npm publish --scope=@agntcy --access public - fi - - ## - ## Server - ## - server:build: - desc: Build Directory server image - cmds: - - "{{.BAKE_ENV}} docker buildx bake {{.IMAGE_BAKE_OPTS}} dir-apiserver" - - server:start: - desc: Start Directory server - dir: server/cmd - env: - DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL: 1s - cmds: - - defer: { task: server:store:stop } - - task: server:store:start - - go run main.go - - server:store:pull: - desc: Pull local OCI registry server docker image - vars: - IMAGE: "{{.IMAGE}}" - cmds: - - | - docker pull {{.IMAGE}} - - server:store:start: - desc: Start local OCI registry server for storage - internal: true - vars: - IMAGE: ghcr.io/project-zot/zot-linux-{{ARCH}}:v{{.ZOT_VERSION}} - deps: - - task: server:store:pull - vars: - IMAGE: "{{.IMAGE}}" - cmds: - - | - # mount config - cat > /tmp/config.json < {{ .CREDS_FILE }} << EOF - export HTPASSWD_USERNAME="{{ .HTPASSWD_USERNAME }}" - export HTPASSWD_PASSWORD="${HTPASSWD_PASSWORD}" - export HTPASSWD_AUTH_HEADER="${HTPASSWD_AUTH_HEADER}" - export HTPASSWD_SYNC_USERNAME="{{ .HTPASSWD_SYNC_USERNAME }}" - export HTPASSWD_SYNC_PASSWORD="${HTPASSWD_SYNC_PASSWORD}" - EOF - - # Write htpasswd file (for helm --set-file commands) - cat > {{ .HTPASSWD_FILE }} << EOF - {{ .HTPASSWD_USERNAME }}:${HTPASSWD_ADMIN} - {{ .HTPASSWD_SYNC_USERNAME }}:${HTPASSWD_SYNC} - EOF - - deploy:kubernetes:cleanup-htpasswd-creds: - desc: Cleanup htpasswd credentials and files - vars: - CREDS_FILE: '{{ .CREDS_FILE | default "/tmp/dir-htpasswd-creds.env" }}' - HTPASSWD_FILE: '{{ .HTPASSWD_FILE | default "/tmp/dir-htpasswd" }}' - cmds: - - rm -f {{ .CREDS_FILE }} - - rm -f {{ .HTPASSWD_FILE }} - - deploy:kubernetes:local: - aliases: [deploy:local] - desc: Deploy a local Directory server in Kubernetes - deps: - - deploy:kubernetes:setup-cluster - vars: - # Kind args - KIND_CLUSTER_NAME: '{{ .KIND_CLUSTER_NAME | default "agntcy-cluster" }}' - KIND_CREATE_OPTS: '{{ .KIND_CREATE_OPTS | default "" }}' - # Helm args - HELM_NAMESPACE: '{{ .HELM_NAMESPACE | default "dir-server" }}' - HELM_CHART_PATH: "{{ .ROOT_DIR }}/install/charts/dir" - HELM_VALUES_PATH: "{{ .ROOT_DIR }}/install/charts/dir/values.yaml" - # Coverage config - E2E_COVERAGE_ENABLED: '{{ .E2E_COVERAGE_ENABLED | default "false" }}' - DEPLOY_IMAGE_TAG: '{{ if eq .E2E_COVERAGE_ENABLED "true" }}{{ .COVERAGE_IMAGE_TAG }}{{ else }}{{ .IMAGE_TAG }}{{ end }}' - cmds: - # TODO: make logic idempotent so that running functional tests does not change previous contexts - - # Generate credentials and htpasswd file (using defaults) - - task: deploy:kubernetes:gen-htpasswd-creds - - # Cleanup credentials on exit (using defaults) - - defer: - task: deploy:kubernetes:cleanup-htpasswd-creds - - # Deploy chart - - | - # Load credentials - source /tmp/dir-htpasswd-creds.env - - {{ .HELM_BIN }} dependency build {{ .HELM_CHART_PATH }} - - {{ .HELM_BIN }} upgrade dir \ - {{ .HELM_CHART_PATH }} \ - -f {{ .HELM_VALUES_PATH }} \ - --set apiserver.image.tag="{{ .DEPLOY_IMAGE_TAG }}" \ - {{ if .DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL }}--set apiserver.config.publication.scheduler_interval="{{ .DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL }}"{{ end }} \ - --set apiserver.config.routing.refresh_interval="1s" \ - --set apiserver.secrets.ociAuth.username="${HTPASSWD_USERNAME}" \ - --set apiserver.secrets.ociAuth.password="${HTPASSWD_PASSWORD}" \ - --set apiserver.zot.authHeader="${HTPASSWD_AUTH_HEADER}" \ - --set-file apiserver.zot.secretFiles.htpasswd="/tmp/dir-htpasswd" \ - --set apiserver.secrets.syncAuth.username="${HTPASSWD_SYNC_USERNAME}" \ - --set apiserver.secrets.syncAuth.password="${HTPASSWD_SYNC_PASSWORD}" \ - {{ if .DIRECTORY_SERVER_RATELIMIT_ENABLED }}--set apiserver.config.ratelimit.enabled="{{ .DIRECTORY_SERVER_RATELIMIT_ENABLED }}"{{ end }} \ - {{ if .DIRECTORY_SERVER_RATELIMIT_GLOBAL_RPS }}--set apiserver.config.ratelimit.global_rps="{{ .DIRECTORY_SERVER_RATELIMIT_GLOBAL_RPS }}"{{ end }} \ - {{ if .DIRECTORY_SERVER_RATELIMIT_GLOBAL_BURST }}--set apiserver.config.ratelimit.global_burst="{{ .DIRECTORY_SERVER_RATELIMIT_GLOBAL_BURST }}"{{ end }} \ - {{ if .DIRECTORY_SERVER_RATELIMIT_PER_CLIENT_RPS }}--set apiserver.config.ratelimit.per_client_rps="{{ .DIRECTORY_SERVER_RATELIMIT_PER_CLIENT_RPS }}"{{ end }} \ - {{ if .DIRECTORY_SERVER_RATELIMIT_PER_CLIENT_BURST }}--set apiserver.config.ratelimit.per_client_burst="{{ .DIRECTORY_SERVER_RATELIMIT_PER_CLIENT_BURST }}"{{ end }} \ - {{ if eq .E2E_COVERAGE_ENABLED "true" }}--set-json 'apiserver.extraEnv=[{"name":"GOCOVERDIR","value":"/tmp/coverage"}]' --set apiserver.coverageVolume=true{{ end }} \ - {{ if .DIRECTORY_SERVER_OASF_API_VALIDATION_SCHEMA_URL }}--set apiserver.config.oasf_api_validation.schema_url="{{ .DIRECTORY_SERVER_OASF_API_VALIDATION_SCHEMA_URL }}"{{ end }} \ - {{ if .DIRECTORY_SERVER_OASF_API_VALIDATION_DISABLE }}--set apiserver.config.oasf_api_validation.disable="{{ .DIRECTORY_SERVER_OASF_API_VALIDATION_DISABLE }}"{{ end }} \ - {{ if ne .DIRECTORY_SERVER_OASF_API_VALIDATION_STRICT_MODE nil }}--set apiserver.config.oasf_api_validation.strict_mode="{{ .DIRECTORY_SERVER_OASF_API_VALIDATION_STRICT_MODE }}"{{ end }} \ - --namespace {{ .HELM_NAMESPACE }} \ - --create-namespace \ - --install \ - --wait \ - --wait-for-jobs \ - --timeout "15m" - - deploy:kubernetes:context: - desc: Switch context to given Kubernetes cluster - vars: - KIND_CLUSTER_NAME: '{{ .KIND_CLUSTER_NAME | default "agntcy-cluster" }}' - cmds: - - "{{ .KIND_BIN }} export kubeconfig --name {{ .KIND_CLUSTER_NAME }}" - - deploy:kubernetes:dir: - desc: Deploy DIR helm chart - vars: - HELM_NAMESPACE: '{{ .HELM_NAMESPACE | default "dir-server" }}' - HELM_CHART_PATH: "{{ .ROOT_DIR }}/install/charts/dir" - HELM_VALUES_PATH: "{{ .ROOT_DIR }}/install/charts/dir/values.yaml" - HELM_EXTRA_ARGS: '{{ .HELM_EXTRA_ARGS | default "" }}' - cmds: - # Generate credentials and htpasswd file (using defaults) - - task: deploy:kubernetes:gen-htpasswd-creds - - # Cleanup credentials on exit (using defaults) - - defer: - task: deploy:kubernetes:cleanup-htpasswd-creds - - # Deploy chart - - | - # Load credentials - source /tmp/dir-htpasswd-creds.env - - {{ .HELM_BIN }} dependency build {{ .HELM_CHART_PATH }} - - {{ .HELM_BIN }} upgrade --install dir \ - {{ .HELM_CHART_PATH }} \ - -f {{ .HELM_VALUES_PATH }} \ - --set apiserver.image.tag="{{ .IMAGE_TAG }}" \ - {{ if .DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL }}--set apiserver.config.publication.scheduler_interval="{{ .DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL }}"{{ end }} \ - --set apiserver.secrets.ociAuth.username="${HTPASSWD_USERNAME}" \ - --set apiserver.secrets.ociAuth.password="${HTPASSWD_PASSWORD}" \ - --set apiserver.zot.authHeader="${HTPASSWD_AUTH_HEADER}" \ - --set-file apiserver.zot.secretFiles.htpasswd="/tmp/dir-htpasswd" \ - --set apiserver.secrets.syncAuth.username="${HTPASSWD_SYNC_USERNAME}" \ - --set apiserver.secrets.syncAuth.password="${HTPASSWD_SYNC_PASSWORD}" \ - --set apiserver.log_level="DEBUG" \ - {{ .HELM_EXTRA_ARGS }} \ - --namespace {{ .HELM_NAMESPACE }} \ - --create-namespace \ - --install \ - --wait \ - --wait-for-jobs \ - --timeout "15m" - - deploy:kubernetes:dirctl: - desc: Deploy DIRCTL helm chart - vars: - HELM_NAMESPACE: '{{ .HELM_NAMESPACE | default "dir-client" }}' - HELM_CHART_PATH: "{{ .ROOT_DIR }}/install/charts/dirctl" - HELM_VALUES_PATH: "{{ .ROOT_DIR }}/install/charts/dirctl/values.yaml" - HELM_EXTRA_ARGS: '{{ .HELM_EXTRA_ARGS | default "" }}' - cmds: - - | - {{ .HELM_BIN }} upgrade --install dirctl \ - {{ .HELM_CHART_PATH }} \ - -f {{ .HELM_VALUES_PATH }} \ - --set image.tag="{{ .IMAGE_TAG }}" \ - {{ .HELM_EXTRA_ARGS }} \ - --namespace {{ .HELM_NAMESPACE }} \ - --create-namespace \ - --install \ - --wait \ - --wait-for-jobs \ - --timeout "15m" - - deploy:kubernetes:spire: - desc: Deploy SPIRE helm chart - vars: - TRUST_DOMAIN: '{{ .TRUST_DOMAIN | default "example.org" }}' - SERVICE_TYPE: '{{ .SERVICE_TYPE | default "LoadBalancer" }}' - BUNDLE_PATH: '{{ .BUNDLE_PATH | default "/tmp/spire-bundle.spiffe" }}' - cmds: - - | - {{ .HELM_BIN }} upgrade --install spire-crds spire-crds \ - --repo https://spiffe.github.io/helm-charts-hardened/ \ - --namespace spire-crds \ - --create-namespace \ - --install \ - --wait \ - --wait-for-jobs \ - --timeout "15m" - - - | - {{ .HELM_BIN }} upgrade --install spire spire \ - --repo https://spiffe.github.io/helm-charts-hardened/ \ - --set global.spire.trustDomain="{{ .TRUST_DOMAIN }}" \ - --set spire-server.image.tag="{{ .SPIRE_VERSION }}" \ - --set spire-agent.image.tag="{{ .SPIRE_VERSION }}" \ - --set spire-server.service.type="{{ .SERVICE_TYPE }}" \ - --set spire-server.federation.enabled="true" \ - --set spire-server.controllerManager.watchClassless="true" \ - --set spire-server.controllerManager.className="dir-spire" \ - --namespace spire \ - --create-namespace \ - --install \ - --wait \ - --wait-for-jobs \ - --timeout "15m" - - - | - {{ .KUBECTL_BIN }} get configmap -n spire spire-bundle -o json | jq '.data."bundle.spiffe"' -r > {{ .BUNDLE_PATH }} - - deploy:kubernetes:local:port-forward: - aliases: [deploy:local:port-forward] - desc: Set up port-forwarding for the local deployment - vars: - # Helm args - HELM_NAMESPACE: '{{ .HELM_NAMESPACE | default "dir-server" }}' - cmds: - # Port-forward dependency services - - | - {{ .KUBECTL_BIN }} port-forward service/dir-apiserver 8888:8888 -n {{ .HELM_NAMESPACE }} & - {{ .KUBECTL_BIN }} port-forward service/dir-apiserver 9090:9090 -n {{ .HELM_NAMESPACE }} & - {{ .KUBECTL_BIN }} port-forward service/dir-ingress-controller 8080:80 -n {{ .HELM_NAMESPACE }} & - - # Delay to ensure services are online - - sleep 10 - - deploy:kubernetes:local:port-forward:cleanup: - aliases: [deploy:local:port-forward:cleanup] - desc: Cleanup port-forwarding processes - cmds: - # Kill any existing port-forward processes for the dir-apiserver and dir-ingress-controller services - - kill -9 $(ps aux | grep port-forward | grep -E "(dir-apiserver|dir-ingress-controller)" | awk '{print $2}') || true - - deploy:kubernetes:local:cleanup: - aliases: [deploy:local:cleanup, deploy:kubernetes:cleanup] - desc: Cleanup Kubernetes environment for local deployment - deps: - - deps:kind - vars: - # Kind args - KIND_CLUSTER_NAME: '{{ .KIND_CLUSTER_NAME | default "agntcy-cluster" }}' - cmds: - - "{{ .KIND_BIN }} delete cluster --name {{ .KIND_CLUSTER_NAME }}" - - deploy:kubernetes:network:bootstrap: - internal: true - desc: Deploy a bootstrap Directory server in Kubernetes - deps: - - task: deploy:kubernetes:setup-cluster - vars: - E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" - - deps:dirctl-bin - vars: - # Helm args - HELM_CHART_PATH: "{{ .ROOT_DIR }}/install/charts/dir" - HELM_VALUES_PATH: "{{ .ROOT_DIR }}/install/charts/dir/values.yaml" - # Coverage config - E2E_COVERAGE_ENABLED: '{{ .E2E_COVERAGE_ENABLED | default "false" }}' - DEPLOY_IMAGE_TAG: '{{ if eq .E2E_COVERAGE_ENABLED "true" }}{{ .COVERAGE_IMAGE_TAG }}{{ else }}{{ .IMAGE_TAG }}{{ end }}' - cmds: - # Generate private key if it doesn't exist - - | - test -f /tmp/node.privkey || {{ .BIN_DIR }}/dirctl network init --output /tmp/node.privkey - - # Generate the bootstrap peer ID and export it to the environment file - - | - bootstrap_peerid=$({{ .BIN_DIR }}/dirctl network info /tmp/node.privkey) - echo "PEER ID: ${bootstrap_peerid}" - echo BOOTSTRAP_PEER_ID="${bootstrap_peerid}" > .env - - # Generate credentials and htpasswd file - - task: deploy:kubernetes:gen-htpasswd-creds - vars: - CREDS_FILE: "/tmp/dir-htpasswd-creds-bootstrap.env" - HTPASSWD_FILE: "/tmp/zot-htpasswd-bootstrap" - - # Cleanup credentials on exit - - defer: - task: deploy:kubernetes:cleanup-htpasswd-creds - vars: - CREDS_FILE: "/tmp/dir-htpasswd-creds-bootstrap.env" - HTPASSWD_FILE: "/tmp/zot-htpasswd-bootstrap" - - # Deploy the bootstrap server using Helm - - | - # Load credentials - source /tmp/dir-htpasswd-creds-bootstrap.env - - {{ .HELM_BIN }} dependency build {{ .HELM_CHART_PATH }} - - {{ .HELM_BIN }} upgrade agntcy-dir \ - {{ .HELM_CHART_PATH }} \ - -f {{ .HELM_VALUES_PATH }} \ - --set apiserver.image.tag="{{ .DEPLOY_IMAGE_TAG }}" \ - {{ if .PUBLICATION_SCHEDULER_INTERVAL }}--set apiserver.config.publication.scheduler_interval="{{ .PUBLICATION_SCHEDULER_INTERVAL }}"{{ end }} \ - --set apiserver.config.routing.refresh_interval="1s" \ - --set apiserver.secrets.privKey="$(cat /tmp/node.privkey)" \ - --set apiserver.config.routing.key_path="/etc/agntcy/dir/node.privkey" \ - --set apiserver.config.routing.listen_address="/ip4/0.0.0.0/tcp/8999" \ - --set apiserver.config.routing.directory_api_address="agntcy-dir-apiserver.bootstrap.svc.cluster.local:8888" \ - --set apiserver.config.store.oci.registry_address="agntcy-dir-zot.bootstrap.svc.cluster.local:5000" \ - --set apiserver.zot.extraVolumes[0].persistentVolumeClaim.claimName="agntcy-dir-zot-config" \ - --set apiserver.secrets.ociAuth.username="${HTPASSWD_USERNAME}" \ - --set apiserver.secrets.ociAuth.password="${HTPASSWD_PASSWORD}" \ - --set apiserver.zot.authHeader="${HTPASSWD_AUTH_HEADER}" \ - --set-file apiserver.zot.secretFiles.htpasswd="/tmp/zot-htpasswd-bootstrap" \ - --set apiserver.secrets.syncAuth.username="${HTPASSWD_SYNC_USERNAME}" \ - --set apiserver.secrets.syncAuth.password="${HTPASSWD_SYNC_PASSWORD}" \ - {{ if eq .E2E_COVERAGE_ENABLED "true" }}--set-json 'apiserver.extraEnv=[{"name":"GOCOVERDIR","value":"/tmp/coverage"}]' --set apiserver.coverageVolume=true{{ end }} \ - --namespace "bootstrap" \ - --create-namespace \ - --install \ - --wait \ - --wait-for-jobs \ - --timeout "15m" - - deploy:kubernetes:network: - aliases: [deploy:network] - desc: Deploy a network of Directory servers in Kubernetes (1 bootstrap + 3 peers) - vars: - HELM_CHART_PATH: "{{ .ROOT_DIR }}/install/charts/dir" - HELM_VALUES_PATH: "{{ .ROOT_DIR }}/install/charts/dir/values.yaml" - # Coverage config - E2E_COVERAGE_ENABLED: '{{ .E2E_COVERAGE_ENABLED | default "false" }}' - DEPLOY_IMAGE_TAG: '{{ if eq .E2E_COVERAGE_ENABLED "true" }}{{ .COVERAGE_IMAGE_TAG }}{{ else }}{{ .IMAGE_TAG }}{{ end }}' - cmds: - # Deploy bootstrap with explicit var passing (not as dependency) - - task: deploy:kubernetes:network:bootstrap - vars: - PUBLICATION_SCHEDULER_INTERVAL: "{{ .PUBLICATION_SCHEDULER_INTERVAL }}" - E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" - - # Deploy the peer servers using Helm - - for: - matrix: - PEER: ["peer1", "peer2", "peer3"] - cmd: | - export $(cat .env) - - # Generate fresh credentials for this peer using helper task - task deploy:kubernetes:gen-htpasswd-creds \ - CREDS_FILE=/tmp/dir-htpasswd-creds-{{ .ITEM.PEER }}.env \ - HTPASSWD_FILE=/tmp/zot-htpasswd-{{ .ITEM.PEER }} - - # Load credentials - source /tmp/dir-htpasswd-creds-{{ .ITEM.PEER }}.env - - {{ .HELM_BIN }} dependency build {{ .HELM_CHART_PATH }} - {{ .HELM_BIN }} upgrade agntcy-dir \ - {{ .HELM_CHART_PATH }} \ - -f {{ .HELM_VALUES_PATH }} \ - --set apiserver.image.tag="{{ .DEPLOY_IMAGE_TAG }}" \ - {{ if .PUBLICATION_SCHEDULER_INTERVAL }}--set apiserver.config.publication.scheduler_interval="{{ .PUBLICATION_SCHEDULER_INTERVAL }}"{{ end }} \ - --set apiserver.config.routing.refresh_interval="1s" \ - --set apiserver.config.store.oci.registry_address="agntcy-dir-zot.{{ .ITEM.PEER }}.svc.cluster.local:5000" \ - --set apiserver.config.routing.bootstrap_peers[0]="/dns4/agntcy-dir-apiserver-routing.bootstrap.svc.cluster.local/tcp/8999/p2p/${BOOTSTRAP_PEER_ID}" \ - --set apiserver.config.routing.directory_api_address="agntcy-dir-apiserver.{{ .ITEM.PEER }}.svc.cluster.local:8888" \ - --set apiserver.zot.extraVolumes[0].persistentVolumeClaim.claimName="agntcy-dir-zot-config" \ - --set apiserver.secrets.ociAuth.username="${HTPASSWD_USERNAME}" \ - --set apiserver.secrets.ociAuth.password="${HTPASSWD_PASSWORD}" \ - --set apiserver.zot.authHeader="${HTPASSWD_AUTH_HEADER}" \ - --set-file apiserver.zot.secretFiles.htpasswd="/tmp/zot-htpasswd-{{ .ITEM.PEER }}" \ - --set apiserver.secrets.syncAuth.username="${HTPASSWD_SYNC_USERNAME}" \ - --set apiserver.secrets.syncAuth.password="${HTPASSWD_SYNC_PASSWORD}" \ - {{ if eq .E2E_COVERAGE_ENABLED "true" }}--set-json 'apiserver.extraEnv=[{"name":"GOCOVERDIR","value":"/tmp/coverage"}]' --set apiserver.coverageVolume=true{{ end }} \ - --namespace "{{ .ITEM.PEER }}" \ - --create-namespace \ - --install \ - --wait \ - --wait-for-jobs \ - --timeout "15m" - - # Cleanup temp files - rm -f /tmp/zot-htpasswd-{{ .ITEM.PEER }} - rm -f /tmp/dir-htpasswd-creds-{{ .ITEM.PEER }}.env - - deploy:kubernetes:network:port-forward: - aliases: [deploy:network:port-forward] - desc: Set up port-forwarding for the peers - cmds: - # Port-forward dependency services - - "{{ .KUBECTL_BIN }} port-forward svc/agntcy-dir-apiserver -n peer1 8890:8888 &" - - "{{ .KUBECTL_BIN }} port-forward svc/agntcy-dir-apiserver -n peer2 8891:8888 &" - - "{{ .KUBECTL_BIN }} port-forward svc/agntcy-dir-apiserver -n peer3 8892:8888 &" - - # Delay to ensure services are online - - sleep 10 - - deploy:kubernetes:network:port-forward:cleanup: - aliases: [deploy:network:port-forward:cleanup] - desc: Cleanup port-forwarding processes - cmds: - # Kill any existing port-forward processes for the agntcy-dir-apiserver service - - kill -9 $(ps aux | grep port-forward | grep agntcy-dir-apiserver | awk '{print $2}') || true - - deploy:kubernetes:network:cleanup: - aliases: [deploy:network:cleanup] - desc: Cleanup Kubernetes environment for network deployment - vars: - # Kind args - KIND_CLUSTER_NAME: '{{ .KIND_CLUSTER_NAME | default "agntcy-cluster" }}' - cmds: - # Delete helm releases - - for: - matrix: - PEER: ["bootstrap", "peer1", "peer2", "peer3"] - cmd: | - {{ .HELM_BIN }} delete --namespace {{ .ITEM.PEER }} agntcy-dir - - - "{{ .KIND_BIN }} delete cluster --name {{ .KIND_CLUSTER_NAME }}" - - ## - ## Test - ## - test:unit: - desc: Run unit tests on codebase - aliases: [test] - env: - GOWORK: off - vars: - EXTRA_ARGS: '{{ .EXTRA_ARGS | default "" }}' - cmds: - - for: { var: GO_MOD_DIR_UNIT_TEST } - cmd: | - echo "Running tests in {{.ITEM}}" - go -C {{.ITEM}} test ./... {{.EXTRA_ARGS}} - - test:unit:coverage: - desc: Run all unit tests with coverage and generate summaries + HTML reports - vars: - COVERAGE_DIR: '{{ .COVERAGE_DIR | default (print .ROOT_DIR "/.coverage/unit") }}' - cmds: - - echo "Removing existing coverage directory and reports" - - rm -rf {{.COVERAGE_DIR}}/* - - echo "Creating new coverage directory" - - mkdir -p {{.COVERAGE_DIR}} - - | - set -euo pipefail - # Build list of modules from GO_MOD_DIR_UNIT_TEST - modules="" - {{range $dir := .GO_MOD_DIR_UNIT_TEST | splitList "\n"}} - {{if $dir}}modules="$modules $(basename {{$dir}})"{{end}} - {{end}} - for m in $modules; do - if [ -d "$m" ]; then - echo "[coverage] Testing module: $m" - ( - cd "$m" - # Run tests with coverprofile (text format) - if go test -covermode=atomic -coverprofile="{{.COVERAGE_DIR}}/$m.out.tmp" ./... -json 2>&1 | tee "{{.COVERAGE_DIR}}/test-report-$m.json" >/dev/null; then - echo "[coverage] Completed: $m" - else - status=$? - echo "[coverage][warn] Tests failed in $m (exit $status); continuing" - fi - ) - # Filter out generated files (matching codecov.yml ignores) - if [ -f "{{.COVERAGE_DIR}}/$m.out.tmp" ]; then - grep -v "\.pb\.go" "{{.COVERAGE_DIR}}/$m.out.tmp" | grep -v "mock_.*\.go" > "{{.COVERAGE_DIR}}/$m.out" || echo "mode: atomic" > "{{.COVERAGE_DIR}}/$m.out" - rm "{{.COVERAGE_DIR}}/$m.out.tmp" - else - echo "[coverage] No coverage generated for $m" - echo "mode: atomic" > "{{.COVERAGE_DIR}}/$m.out" - fi - fi - done - - - | - set -euo pipefail - echo "[coverage] Generating per-module summaries" - : > {{.COVERAGE_DIR}}/summary.txt - # Build list of modules from GO_MOD_DIR_UNIT_TEST - modules="" - {{range $dir := .GO_MOD_DIR_UNIT_TEST | splitList "\n"}} - {{if $dir}}modules="$modules $(basename {{$dir}})"{{end}} - {{end}} - for m in $modules; do - if [ -f "{{.COVERAGE_DIR}}/$m.out" ]; then - if (cd "$m" && go tool cover -func={{.COVERAGE_DIR}}/$m.out > {{.COVERAGE_DIR}}/$m.func.txt 2>/dev/null); then - tail -n1 {{.COVERAGE_DIR}}/$m.func.txt | sed "s/^total:/[$m] total:/" >> {{.COVERAGE_DIR}}/summary.txt || true - else - echo "[$m] total: (error generating summary)" >> {{.COVERAGE_DIR}}/summary.txt - fi - fi - done - echo "[coverage] Summary:"; cat {{.COVERAGE_DIR}}/summary.txt - - - | - set -euo pipefail - echo "[coverage] Generating HTML reports" - # Build list of modules from GO_MOD_DIR_UNIT_TEST - modules="" - {{range $dir := .GO_MOD_DIR_UNIT_TEST | splitList "\n"}} - {{if $dir}}modules="$modules $(basename {{$dir}})"{{end}} - {{end}} - for m in $modules; do - if [ -f "{{.COVERAGE_DIR}}/$m.out" ] && [ -d "$m" ]; then - (cd "$m" && go tool cover -html={{.COVERAGE_DIR}}/$m.out -o {{.COVERAGE_DIR}}/$m.html || true) - fi - done - echo "[coverage] Generated HTML files:"; ls -1 {{.COVERAGE_DIR}}/*.html 2>/dev/null | sed 's|{{.COVERAGE_DIR}}/| - |' || echo " (none)" - - bench: - desc: Run bench tests on codebase - cmds: # run in sequence - - task: server:bench - - echo "Done" - - test:e2e: - desc: Run end-to-end tests for local deployment and network deployment - aliases: [e2e] - cmds: - - task: test:e2e:local - - task: test:e2e:network - - test:e2e:local:cli: - desc: Run only local CLI tests (with dedicated infrastructure) - aliases: [e2e:local:cli] - vars: - PUBLICATION_SCHEDULER_INTERVAL: '{{ .PUBLICATION_SCHEDULER_INTERVAL | default "1s" }}' - RATELIMIT_ENABLED: '{{ .RATELIMIT_ENABLED | default "false" }}' - RATELIMIT_GLOBAL_RPS: '{{ .RATELIMIT_GLOBAL_RPS | default "100" }}' - RATELIMIT_GLOBAL_BURST: '{{ .RATELIMIT_GLOBAL_BURST | default "200" }}' - E2E_COVERAGE_ENABLED: '{{ .E2E_COVERAGE_ENABLED | default "false" }}' - COVERAGE_DIR: '{{ .COVERAGE_DIR | default (print .ROOT_DIR "/.coverage/e2e") }}' - env: - DIRECTORY_E2E_DEPLOYMENT_MODE: "local" - cmds: - - defer: { task: deploy:kubernetes:local:cleanup } - - defer: { task: deploy:kubernetes:local:port-forward:cleanup } - - defer: - task: test:e2e:coverage:extract-pods - vars: - E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" - NAMESPACE: "dir-server" - COVERAGE_DIR: "{{ .COVERAGE_DIR }}" - - task: deploy:kubernetes:local - vars: - DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL: "{{ .PUBLICATION_SCHEDULER_INTERVAL }}" - DIRECTORY_SERVER_RATELIMIT_ENABLED: "{{ .RATELIMIT_ENABLED }}" - DIRECTORY_SERVER_RATELIMIT_GLOBAL_RPS: "{{ .RATELIMIT_GLOBAL_RPS }}" - DIRECTORY_SERVER_RATELIMIT_GLOBAL_BURST: "{{ .RATELIMIT_GLOBAL_BURST }}" - E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" - - task: deploy:kubernetes:local:port-forward - - | - # Run E2E tests with coverage if enabled - if [ "{{.E2E_COVERAGE_ENABLED}}" = "true" ]; then - mkdir -p {{.COVERAGE_DIR}} - go test -C ./e2e/local -covermode=atomic -coverpkg={{ .COVERAGE_PKGS }} -coverprofile={{.COVERAGE_DIR}}/local-cli.out.tmp . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v - # Filter out generated files (matching codecov.yml ignores) - if [ -f {{.COVERAGE_DIR}}/local-cli.out.tmp ]; then - grep -v "\.pb\.go" {{.COVERAGE_DIR}}/local-cli.out.tmp | grep -v "mock_.*\.go" > {{.COVERAGE_DIR}}/local-cli.out || echo "mode: atomic" > {{.COVERAGE_DIR}}/local-cli.out - rm {{.COVERAGE_DIR}}/local-cli.out.tmp - fi - else - go test -C ./e2e/local . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v - fi - - test:e2e:client: - desc: Run only client library tests (with dedicated infrastructure) - aliases: [e2e:client] - vars: - PUBLICATION_SCHEDULER_INTERVAL: '{{ .PUBLICATION_SCHEDULER_INTERVAL | default "1s" }}' - RATELIMIT_ENABLED: '{{ .RATELIMIT_ENABLED | default "true" }}' - RATELIMIT_GLOBAL_RPS: '{{ .RATELIMIT_GLOBAL_RPS | default "100" }}' - RATELIMIT_GLOBAL_BURST: '{{ .RATELIMIT_GLOBAL_BURST | default "200" }}' - E2E_COVERAGE_ENABLED: '{{ .E2E_COVERAGE_ENABLED | default "false" }}' - COVERAGE_DIR: '{{ .COVERAGE_DIR | default (print .ROOT_DIR "/.coverage/e2e") }}' - env: - DIRECTORY_E2E_DEPLOYMENT_MODE: "local" - cmds: - - defer: { task: deploy:kubernetes:local:cleanup } - - defer: { task: deploy:kubernetes:local:port-forward:cleanup } - - defer: - task: test:e2e:coverage:extract-pods - vars: - E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" - NAMESPACE: "dir-server" - COVERAGE_DIR: "{{ .COVERAGE_DIR }}" - - task: deploy:kubernetes:local - vars: - DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL: "{{ .PUBLICATION_SCHEDULER_INTERVAL }}" - DIRECTORY_SERVER_RATELIMIT_ENABLED: "{{ .RATELIMIT_ENABLED }}" - DIRECTORY_SERVER_RATELIMIT_GLOBAL_RPS: "{{ .RATELIMIT_GLOBAL_RPS }}" - DIRECTORY_SERVER_RATELIMIT_GLOBAL_BURST: "{{ .RATELIMIT_GLOBAL_BURST }}" - E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" - - task: deploy:kubernetes:local:port-forward - - | - # Run E2E tests with coverage if enabled - if [ "{{.E2E_COVERAGE_ENABLED}}" = "true" ]; then - mkdir -p {{.COVERAGE_DIR}} - go test -C ./e2e/client -covermode=atomic -coverpkg={{ .COVERAGE_PKGS }} -coverprofile={{.COVERAGE_DIR}}/local-client.out.tmp . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v - # Filter out generated files (matching codecov.yml ignores) - if [ -f {{.COVERAGE_DIR}}/local-client.out.tmp ]; then - grep -v "\.pb\.go" {{.COVERAGE_DIR}}/local-client.out.tmp | grep -v "mock_.*\.go" > {{.COVERAGE_DIR}}/local-client.out || echo "mode: atomic" > {{.COVERAGE_DIR}}/local-client.out - rm {{.COVERAGE_DIR}}/local-client.out.tmp - fi - else - go test -C ./e2e/client . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v - fi - - test:e2e:local: - desc: Run end-to-end tests for local deployment (Client + CLI + Rate limiting tests) - aliases: [e2e:local] - vars: - PUBLICATION_SCHEDULER_INTERVAL: '{{ .PUBLICATION_SCHEDULER_INTERVAL | default "1s" }}' - RATELIMIT_ENABLED: '{{ .RATELIMIT_ENABLED | default "true" }}' - RATELIMIT_GLOBAL_RPS: '{{ .RATELIMIT_GLOBAL_RPS | default "100" }}' - RATELIMIT_GLOBAL_BURST: '{{ .RATELIMIT_GLOBAL_BURST | default "200" }}' - E2E_COVERAGE_ENABLED: '{{ .E2E_COVERAGE_ENABLED | default "false" }}' - COVERAGE_DIR: '{{ .COVERAGE_DIR | default (print .ROOT_DIR "/.coverage/e2e") }}' - env: - DIRECTORY_E2E_DEPLOYMENT_MODE: "local" - cmds: - - defer: { task: deploy:kubernetes:local:cleanup } - - defer: { task: deploy:kubernetes:local:port-forward:cleanup } - - defer: - task: test:e2e:coverage:extract-pods - vars: - E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" - NAMESPACE: "dir-server" - # Bootstrap infrastructure once for all test suites - - task: deploy:kubernetes:local - vars: - DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL: "{{ .PUBLICATION_SCHEDULER_INTERVAL }}" - DIRECTORY_SERVER_RATELIMIT_ENABLED: "{{ .RATELIMIT_ENABLED }}" - DIRECTORY_SERVER_RATELIMIT_GLOBAL_RPS: "{{ .RATELIMIT_GLOBAL_RPS }}" - DIRECTORY_SERVER_RATELIMIT_GLOBAL_BURST: "{{ .RATELIMIT_GLOBAL_BURST }}" - E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" - - task: deploy:kubernetes:local:port-forward - # Run client library tests first (faster feedback) - - | - if [ "{{.E2E_COVERAGE_ENABLED}}" = "true" ]; then - mkdir -p {{.COVERAGE_DIR}} - go test -C ./e2e/client -covermode=atomic -coverpkg={{ .COVERAGE_PKGS }} -coverprofile={{.COVERAGE_DIR}}/local-client.out.tmp . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v -ginkgo.label-filter="!ratelimit" - # Filter out generated files (matching codecov.yml ignores) - if [ -f {{.COVERAGE_DIR}}/local-client.out.tmp ]; then - grep -v "\.pb\.go" {{.COVERAGE_DIR}}/local-client.out.tmp | grep -v "mock_.*\.go" > {{.COVERAGE_DIR}}/local-client.out || echo "mode: atomic" > {{.COVERAGE_DIR}}/local-client.out - rm {{.COVERAGE_DIR}}/local-client.out.tmp - fi - else - go test -C ./e2e/client . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v -ginkgo.label-filter="!ratelimit" - fi - # Run local CLI tests second (same infrastructure) - - | - if [ "{{.E2E_COVERAGE_ENABLED}}" = "true" ]; then - mkdir -p {{.COVERAGE_DIR}} - go test -C ./e2e/local -covermode=atomic -coverpkg={{ .COVERAGE_PKGS }} -coverprofile={{.COVERAGE_DIR}}/local-cli.out.tmp . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v - # Filter out generated files (matching codecov.yml ignores) - if [ -f {{.COVERAGE_DIR}}/local-cli.out.tmp ]; then - grep -v "\.pb\.go" {{.COVERAGE_DIR}}/local-cli.out.tmp | grep -v "mock_.*\.go" > {{.COVERAGE_DIR}}/local-cli.out || echo "mode: atomic" > {{.COVERAGE_DIR}}/local-cli.out - rm {{.COVERAGE_DIR}}/local-cli.out.tmp - fi - else - go test -C ./e2e/local . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v - fi - # Wait 3 seconds for rate limit reset - - sleep 3 - # Run rate limiting tests LAST - - | - if [ "{{.E2E_COVERAGE_ENABLED}}" = "true" ]; then - mkdir -p {{.COVERAGE_DIR}} - go test -C ./e2e/client -coverpkg={{ .COVERAGE_PKGS }} -coverprofile={{.COVERAGE_DIR}}/local-client.out . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v -ginkgo.label-filter="ratelimit" - else - go test -C ./e2e/client . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v -ginkgo.label-filter="ratelimit" - fi - - test:e2e:network: - desc: Run end-to-end tests for network deployment (Multi-peer CLI tests) - aliases: [e2e:network] - vars: - PUBLICATION_SCHEDULER_INTERVAL: '{{ .PUBLICATION_SCHEDULER_INTERVAL | default "1s" }}' - E2E_COVERAGE_ENABLED: '{{ .E2E_COVERAGE_ENABLED | default "false" }}' - COVERAGE_DIR: '{{ .COVERAGE_DIR | default (print .ROOT_DIR "/.coverage/e2e") }}' - env: - DIRECTORY_E2E_DEPLOYMENT_MODE: "network" - cmds: - - defer: { task: deploy:kubernetes:network:cleanup } - - defer: { task: deploy:kubernetes:network:port-forward:cleanup } - # Extract coverage from all network namespaces - - defer: - task: test:e2e:coverage:extract-pods - vars: - E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" - NAMESPACE: "bootstrap" - - defer: - task: test:e2e:coverage:extract-pods - vars: - E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" - NAMESPACE: "peer1" - - defer: - task: test:e2e:coverage:extract-pods - vars: - E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" - NAMESPACE: "peer2" - - defer: - task: test:e2e:coverage:extract-pods - vars: - E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" - NAMESPACE: "peer3" - # Bootstrap - # NOTE: Run as a dedicated task instead of dependency, otherwise the port forwarding won't work - - task: deploy:kubernetes:network - vars: - PUBLICATION_SCHEDULER_INTERVAL: "{{ .PUBLICATION_SCHEDULER_INTERVAL }}" - E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" - - task: deploy:kubernetes:network:port-forward - # Run network tests with proper isolation and cleanup - - | - if [ "{{.E2E_COVERAGE_ENABLED}}" = "true" ]; then - mkdir -p {{.COVERAGE_DIR}} - go test -C ./e2e/network -covermode=atomic -coverpkg={{ .COVERAGE_PKGS }} -coverprofile={{.COVERAGE_DIR}}/network.out.tmp . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v - # Filter out generated files (matching codecov.yml ignores) - if [ -f {{.COVERAGE_DIR}}/network.out.tmp ]; then - grep -v "\.pb\.go" {{.COVERAGE_DIR}}/network.out.tmp | grep -v "mock_.*\.go" > {{.COVERAGE_DIR}}/network.out || echo "mode: atomic" > {{.COVERAGE_DIR}}/network.out - rm {{.COVERAGE_DIR}}/network.out.tmp - fi - else - go test -C ./e2e/network . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v - fi - - test:e2e:spire: - desc: Run end-to-end tests for SPIRE deployment - cmds: - # Run SPIRE deployment - - defer: { task: test:spire:cleanup } - - task: test:spire - # Run SDK tests - - task: sdk:deps:javascript - - task: sdk:test-env:spiffe:load-test-image - - task: sdk:test:all:spiffe - - # TODO: move spire out from here - # TODO: change to Ingress services instead of LoadBalancer - test:spire: - desc: Test SPIRE federation setup between DIR and DIRCTL clusters - deps: - - task: helm:gen - vars: - AUTH_MODE: '{{ .AUTH_MODE | default "x509" }}' # or "jwt" - DIR_TRUST_DOMAIN: '{{ .DIR_TRUST_DOMAIN | default "dir.example" }}' - DIRCTL_TRUST_DOMAIN: '{{ .DIRCTL_TRUST_DOMAIN | default "dirctl.example" }}' - DIR_DNS_NAME_TEMPLATE: '{{ .DIR_DNS_NAME_TEMPLATE | default "127.0.0.1.nip.io" }}' - cmds: - # Setup DIR cluster - - task: deploy:kubernetes:setup-cluster - vars: - KIND_CLUSTER_NAME: "{{ .DIR_TRUST_DOMAIN }}" - - # Start cloud provider for LoadBalancer support - - | - echo "Starting Kind cloud provider for LoadBalancer support..." - if [[ {{OS}} == "darwin" ]]; then - sudo go run sigs.k8s.io/cloud-provider-kind@latest > /dev/null 2>&1 & - elif [[ {{OS}} == "linux" ]]; then - go run sigs.k8s.io/cloud-provider-kind@latest > /dev/null 2>&1 & - else - echo "Unknown OS" - exit 1 - fi - echo "Cloud provider started in background" - - # Deploy SPIRE on DIR cluster - - task: deploy:kubernetes:spire - vars: - TRUST_DOMAIN: "{{ .DIR_TRUST_DOMAIN }}" - BUNDLE_PATH: /tmp/{{ .DIR_TRUST_DOMAIN }}.spiffe - - # Setup DIRCTL cluster - - task: deploy:kubernetes:setup-cluster - vars: - KIND_CLUSTER_NAME: "{{ .DIRCTL_TRUST_DOMAIN }}" - - # Start cloud provider for LoadBalancer support - - | - echo "Starting Kind cloud provider for LoadBalancer support..." - go run sigs.k8s.io/cloud-provider-kind@latest > /dev/null 2>&1 & - echo "Cloud provider started in background" - - # Deploy SPIRE on DIRCTL cluster - - task: deploy:kubernetes:spire - vars: - TRUST_DOMAIN: "{{ .DIRCTL_TRUST_DOMAIN }}" - BUNDLE_PATH: /tmp/{{ .DIRCTL_TRUST_DOMAIN }}.spiffe - - # Get DIR cluster service addresses - - task: deploy:kubernetes:context - vars: - KIND_CLUSTER_NAME: "{{ .DIR_TRUST_DOMAIN }}" - - | - echo "Getting DIR cluster service addresses..." - DIR_BUNDLE_IP=$(kubectl get service -n spire spire-server -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 2>/dev/null || echo "127.0.0.1") - echo "DIR_BUNDLE_ADDRESS=${DIR_BUNDLE_IP}:8443" >> /tmp/spire-addresses.env - - # Get DIRCTL cluster service addresses - - task: deploy:kubernetes:context - vars: - KIND_CLUSTER_NAME: "{{ .DIRCTL_TRUST_DOMAIN }}" - - | - echo "Getting DIRCTL cluster service addresses..." - DIRCTL_BUNDLE_IP=$(kubectl get service -n spire spire-server -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 2>/dev/null || echo "127.0.0.1") - echo "DIRCTL_BUNDLE_ADDRESS=${DIRCTL_BUNDLE_IP}:8443" >> /tmp/spire-addresses.env - - # Create DIR server federation config - - | - source /tmp/spire-addresses.env - echo "Creating DIR server federation config..." - cat > /tmp/server-federation.yaml << EOF - apiserver: - service: - type: LoadBalancer - config: - authn: - enabled: true - mode: "{{ .AUTH_MODE }}" - audiences: - - "spiffe://{{ .DIR_TRUST_DOMAIN }}/spire/server" - authz: - enabled: true - trust_domain: {{ .DIR_TRUST_DOMAIN }} - extraEnv: - - name: DIRECTORY_SERVER_OASF_API_VALIDATION_DISABLE - value: "true" - spire: - enabled: true - trustDomain: {{ .DIR_TRUST_DOMAIN }} - className: dir-spire - dnsNameTemplates: - - "{{ .DIR_DNS_NAME_TEMPLATE }}" - federation: - - trustDomain: {{ .DIRCTL_TRUST_DOMAIN }} - bundleEndpointURL: https://${DIRCTL_BUNDLE_ADDRESS} - bundleEndpointProfile: - type: https_spiffe - endpointSPIFFEID: spiffe://{{ .DIRCTL_TRUST_DOMAIN }}/spire/server - trustDomainBundle: |- - $(cat /tmp/{{ .DIRCTL_TRUST_DOMAIN }}.spiffe) - EOF - - # Deploy DIR server with federation - - task: deploy:kubernetes:context - vars: - KIND_CLUSTER_NAME: "{{ .DIR_TRUST_DOMAIN }}" - - task: deploy:kubernetes:dir - vars: - KIND_CLUSTER_NAME: "{{ .DIR_TRUST_DOMAIN }}" - HELM_EXTRA_ARGS: "-f /tmp/server-federation.yaml" - DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL: 1s - - # Get DIR API server address - - task: deploy:kubernetes:context - vars: - KIND_CLUSTER_NAME: "{{ .DIR_TRUST_DOMAIN }}" - - | - echo "Getting DIR API server address..." - DIR_API_IP=$(kubectl get service -n dir-server dir-apiserver -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 2>/dev/null || echo "127.0.0.1") - echo "DIR_SERVER_ADDRESS=${DIR_API_IP}:8888" >> /tmp/spire-addresses.env - - # Create DIRCTL client federation config (for dirctl in dir.example) - - | - source /tmp/spire-addresses.env - echo "Creating DIRCTL client federation config for dir.example..." - cat > /tmp/client-federation-dir-example.yaml << EOF - env: - - name: DIRECTORY_CLIENT_SERVER_ADDRESS - value: ${DIR_SERVER_ADDRESS} - - name: DIRECTORY_CLIENT_AUTH_MODE - value: "{{ .AUTH_MODE }}" - - name: DIRECTORY_CLIENT_JWT_AUDIENCE - value: "spiffe://{{ .DIR_TRUST_DOMAIN }}/spire/server" - spire: - enabled: true - trustDomain: {{ .DIR_TRUST_DOMAIN }} - className: dir-spire - EOF - - # Deploy DIRCTL client in dir.example trust domain (same cluster as dir) - - task: deploy:kubernetes:context - vars: - KIND_CLUSTER_NAME: "{{ .DIR_TRUST_DOMAIN }}" - - task: deploy:kubernetes:dirctl - vars: - KIND_CLUSTER_NAME: "{{ .DIR_TRUST_DOMAIN }}" - HELM_NAMESPACE: "dir-client" - HELM_EXTRA_ARGS: "-f /tmp/client-federation-dir-example.yaml" - - # Create DIRCTL client federation config (for dirctl.example cluster) - - | - source /tmp/spire-addresses.env - echo "Creating DIRCTL client federation config..." - cat > /tmp/client-federation.yaml << EOF - env: - - name: DIRECTORY_CLIENT_SERVER_ADDRESS - value: ${DIR_SERVER_ADDRESS} - - name: DIRECTORY_CLIENT_AUTH_MODE - value: "{{ .AUTH_MODE }}" - - name: DIRECTORY_CLIENT_JWT_AUDIENCE - value: "spiffe://{{ .DIR_TRUST_DOMAIN }}/spire/server" - spire: - enabled: true - trustDomain: {{ .DIRCTL_TRUST_DOMAIN }} - className: dir-spire - federation: - - trustDomain: {{ .DIR_TRUST_DOMAIN }} - bundleEndpointURL: https://${DIR_BUNDLE_ADDRESS} - bundleEndpointProfile: - type: https_spiffe - endpointSPIFFEID: spiffe://{{ .DIR_TRUST_DOMAIN }}/spire/server - trustDomainBundle: |- - $(cat /tmp/{{ .DIR_TRUST_DOMAIN }}.spiffe) - EOF - - # Deploy DIRCTL client with federation (in dirctl.example cluster) - - task: deploy:kubernetes:context - vars: - KIND_CLUSTER_NAME: "{{ .DIRCTL_TRUST_DOMAIN }}" - - task: deploy:kubernetes:dirctl - vars: - KIND_CLUSTER_NAME: "{{ .DIRCTL_TRUST_DOMAIN }}" - HELM_EXTRA_ARGS: "-f /tmp/client-federation.yaml" - - # Display completion status - - | - source /tmp/spire-addresses.env - echo "==============================================" - echo "SPIRE federation test deployment complete!" - echo "==============================================" - echo "DIR Server: ${DIR_SERVER_ADDRESS}" - echo "DIR Bundle: ${DIR_BUNDLE_ADDRESS}" - echo "DIRCTL Bundle: ${DIRCTL_BUNDLE_ADDRESS}" - echo "" - echo "Trust domains setup:" - echo " DIR: {{ .DIR_TRUST_DOMAIN }}" - echo " DIRCTL Internal: {{ .DIR_TRUST_DOMAIN }}" - echo " DIRCTL External: {{ .DIRCTL_TRUST_DOMAIN }}" - echo "" - echo "To verify deployment:" - echo " kubectl --context kind-{{ .DIR_TRUST_DOMAIN }} get pods -n dir-server" - echo " kubectl --context kind-{{ .DIR_TRUST_DOMAIN }} logs -n dir-client -l app.kubernetes.io/name=dirctl" - echo " kubectl --context kind-{{ .DIRCTL_TRUST_DOMAIN }} logs -n dir-client -l app.kubernetes.io/name=dirctl" - echo "" - echo "To cleanup:" - echo " task test:spire:cleanup" - - test:spire:cleanup: - desc: Cleanup SPIRE federation test clusters - vars: - DIR_TRUST_DOMAIN: '{{ .DIR_TRUST_DOMAIN | default "dir.example" }}' - DIRCTL_TRUST_DOMAIN: '{{ .DIRCTL_TRUST_DOMAIN | default "dirctl.example" }}' - cmds: - - echo "Cleaning up DIR cluster ({{ .DIR_TRUST_DOMAIN }})..." - - task: deploy:kubernetes:cleanup - vars: - KIND_CLUSTER_NAME: "{{ .DIR_TRUST_DOMAIN }}" - - echo "Cleaning up DIRCTL cluster ({{ .DIRCTL_TRUST_DOMAIN }})..." - - task: deploy:kubernetes:cleanup - vars: - KIND_CLUSTER_NAME: "{{ .DIRCTL_TRUST_DOMAIN }}" - - echo "Cleanup complete!" - - test:e2e:coverage: - desc: Run end-to-end tests with coverage - aliases: [e2e:coverage] - deps: - - build:coverage - vars: - COVERAGE_DIR: '{{ .COVERAGE_DIR | default (print .ROOT_DIR "/.coverage/e2e") }}' - cmds: - - echo "Removing existing coverage directory and reports" - - rm -rf {{.COVERAGE_DIR}}/* - - echo "Creating new coverage directory" - - mkdir -p {{.COVERAGE_DIR}} - - E2E_COVERAGE_ENABLED=true task test:e2e - - task: test:e2e:coverage:process - vars: - COVERAGE_DIR: "{{ .COVERAGE_DIR }}" - - test:e2e:coverage:extract-pods: - desc: Extract coverage data from Kubernetes pods - internal: true - vars: - E2E_COVERAGE_ENABLED: '{{ .E2E_COVERAGE_ENABLED | default "false" }}' - NAMESPACE: '{{ .NAMESPACE | default "dir-server" }}' - POD_SELECTOR: '{{ .POD_SELECTOR | default "app.kubernetes.io/name=apiserver" }}' - COVERAGE_DIR: '{{ .COVERAGE_DIR | default (print .ROOT_DIR "/.coverage/e2e/pods") }}' - cmds: - - | - if [ "{{.E2E_COVERAGE_ENABLED}}" = "true" ]; then - echo "[coverage] Triggering graceful shutdown of pod to flush coverage data..." - - # Get pod matching the selector - POD_NAME=$({{.KUBECTL_BIN}} get pods -n {{.NAMESPACE}} -l {{.POD_SELECTOR}} -o jsonpath='{.items[*].metadata.name}') - - if [ -n "$POD_NAME" ]; then - # Send SIGTERM to PID 1 to trigger coverage flush (like the PoC) - echo "[coverage] Sending SIGTERM to PID 1 in pod: $POD_NAME" - {{.KUBECTL_BIN}} exec -n {{.NAMESPACE}} $POD_NAME -- sh -c 'kill -TERM 1' 2>/dev/null || true - - # Wait for container to restart (coverage files persist in emptyDir volume) - echo "[coverage] Waiting for container to restart and coverage to be flushed..." - sleep 5 - - # Extract coverage files (emptyDir persists across container restarts) - echo "[coverage] Extracting coverage data from pod: $POD_NAME" - mkdir -p {{.COVERAGE_DIR}}/pod-$POD_NAME - {{.KUBECTL_BIN}} cp -n {{.NAMESPACE}} $POD_NAME:/tmp/coverage {{.COVERAGE_DIR}}/pod-$POD_NAME 2>&1 || echo "[coverage][warn] Failed to extract from $POD_NAME" - else - echo "[coverage][warn] No pod found matching selector {{.POD_SELECTOR}}" - fi - echo "[coverage] Coverage data extracted to {{.COVERAGE_DIR}}" - else - echo "[coverage] E2E_COVERAGE_ENABLED is false, skipping pod coverage extraction" - fi - - test:e2e:coverage:process: - desc: Collect and merge E2E coverage data from pods - vars: - COVERAGE_DIR: '{{ .COVERAGE_DIR | default (print .ROOT_DIR "/.coverage/e2e") }}' - cmds: - - | - echo "[e2e-coverage] Collecting and merging E2E coverage data from pods" - if [ -d "{{.COVERAGE_DIR}}/pods" ]; then - # Build comma-separated list of pod directories - pod_dirs=$(find {{.COVERAGE_DIR}}/pods -mindepth 1 -maxdepth 1 -type d 2>/dev/null | tr '\n' ',' | sed 's/,$//') - - if [ -n "$pod_dirs" ]; then - echo " Found pod coverage directories" - mkdir -p {{.COVERAGE_DIR}}/server-binary - go tool covdata merge -i="$pod_dirs" -o={{.COVERAGE_DIR}}/server-binary - go tool covdata textfmt -i={{.COVERAGE_DIR}}/server-binary -o={{.COVERAGE_DIR}}/server.out.tmp - # Filter out generated files (matching codecov.yml ignores) - if [ -f {{.COVERAGE_DIR}}/server.out.tmp ]; then - grep -v "\.pb\.go" {{.COVERAGE_DIR}}/server.out.tmp | grep -v "mock_.*\.go" > {{.COVERAGE_DIR}}/server.out || echo "mode: atomic" > {{.COVERAGE_DIR}}/server.out - rm {{.COVERAGE_DIR}}/server.out.tmp - fi - else - echo "No pod coverage found" - fi - else - echo "No pods directory found" - fi - - ## - ## Linters - ## - lint:go: - desc: Run Golang linters - deps: - - task: deps:golangci-lint - vars: - FIX: '{{ .FIX | default "false" }}' - FIX_FLAG: '{{if eq .FIX "true"}}--fix{{end}}' - cmds: - - for: { var: GO_MOD_DIR } - cmd: | - echo "Running golangci-lint in {{.ITEM}}" - cd {{.ITEM}} - {{.GOLANGCI_LINT_BIN}} run --config {{.ROOT_DIR}}/.golangci.yml {{.FIX_FLAG}} - - lint:buf: - desc: Run Buf linters - deps: - - task: deps:protoc - - task: deps:bufbuild - dir: ./proto - cmds: - - "{{.BUFBUILD_BIN}} lint" - - lint:helm: - desc: Run Helm linters - deps: - - task: deps:helm - vars: - HELM_CHARTS: - sh: find ./install/charts -maxdepth 1 -mindepth 1 -type d -exec basename {} \; - cmds: - - for: { var: HELM_CHARTS } - cmd: | - echo "Running helm lint on {{.ITEM}}" - {{.HELM_BIN}} dependency update ./install/charts/{{.ITEM}} - {{.HELM_BIN}} lint ./install/charts/{{.ITEM}} --with-subcharts - - lint: - desc: Run all linters - deps: - - lint:go - - lint:buf - - lint:helm - - ## - ## License - ## - license: - desc: Check licenses - deps: - - task: deps:licensei - cmds: - - for: { var: GO_MOD_DIR } - cmd: echo "Running licensei in {{.ITEM}}" && cd {{.ITEM}} && {{ .LICENSEI_BIN }} check --config {{.ROOT_DIR}}/.licensei.toml - - license:cache: - desc: Check licenses - deps: - - task: deps:licensei - cmds: - - for: { var: GO_MOD_DIR } - cmd: echo "Running licensei in {{.ITEM}}" && cd {{.ITEM}} && {{ .LICENSEI_BIN }} cache --config {{.ROOT_DIR}}/.licensei.toml - - ## - ## Various proof-of-concept tasks - ## - poc:integration: - desc: Run integration against VS Code and Continue proof-of-concept. - dir: ./docs/research/integrations - prompt: - - | - Are you sure you want to run integration proof-of-concept? - This will overwrite your local workspace VSCode and Continue configuration. - vars: - RECORD_FILE: '{{ .RECORD_FILE | default "docs/research/integrations/demo.record.json" }}' - cmd: | - # Prepare Python environment - python3 -m venv venv - . ./venv/bin/activate - python3 -m pip install pyyaml - - # Run script - python3 ./importer.py \ - -record={{.ROOT_DIR}}/{{.RECORD_FILE}} \ - -vscode_path={{.ROOT_DIR}}/.vscode \ - -continue_path={{.ROOT_DIR}}/.continue/assistants - - # Print env requirements - cat .env.example - - poc:mcp-to-oasf: - desc: Import MCP-to-OASF Exporter Agent into the current workspace. - cmds: - - task: poc:integration - vars: - RECORD_FILE: "docs/research/integrations/mcp-to-oasf-agent/extractor.record.json" - - ## - ## Dependencies - ## - deps: - desc: Install dependencies - cmds: - - task: deps:helm - - task: deps:kubectl - - task: deps:kind - - task: deps:protoc - - task: deps:bufbuild - - task: deps:uv - - task: deps:cosign - - deps:bin-dir: - desc: Create bin directory - internal: true - run: once - cmd: mkdir -p {{.BIN_DIR}} - status: - - test -d {{.BIN_DIR}} - - deps:dirctl-bin: - desc: Compile dirctl binary - internal: true - run: once - cmds: - - task: cli:compile - status: - - test -f {{.BIN_DIR}}/dirctl - - deps:helm: - desc: Ensure supported Helm version is installed - internal: true - deps: - - deps:bin-dir - preconditions: - - which curl - - which tar - cmds: - - cmd: echo "Downloading Helm v{{.HELM_VERSION}}..." - - cmd: curl -sSfL 'https://get.helm.sh/helm-v{{.HELM_VERSION}}-{{OS}}-{{ARCH}}.tar.gz' --output - | tar xzvOf - '{{OS}}-{{ARCH}}/helm' > {{.HELM_BIN}} - - cmd: chmod +x {{.HELM_BIN}} - status: - - test -x {{.HELM_BIN}} - - deps:kubectl: - desc: Ensure supported kubectl version is installed - internal: true - deps: - - deps:bin-dir - preconditions: - - which curl - cmds: - - cmd: echo "Downloading Kubectl v{{.KUBECTL_VERSION}}..." - - cmd: curl -L "https://dl.k8s.io/release/v{{.KUBECTL_VERSION}}/bin/{{OS}}/{{ARCH}}/kubectl" -o {{.KUBECTL_BIN}} - - cmd: chmod +x {{.KUBECTL_BIN}} - status: - - test -x {{.KUBECTL_BIN}} - - deps:kind: - desc: Ensure supported kind version is installed - internal: true - deps: - - deps:bin-dir - preconditions: - - which go - cmds: - - cmd: echo "Downloading Kind v{{.KIND_VERSION}}..." - - cmd: GOBIN={{.BIN_DIR}} go install sigs.k8s.io/kind@v{{.KIND_VERSION}} - - cmd: mv {{.BIN_DIR}}/kind {{.KIND_BIN}} - status: - - test -x {{.KIND_BIN}} - - deps:protoc: - desc: Ensure supported Protoc version and plugins are installed - internal: true - deps: - - deps:bin-dir - preconditions: - - which go - - which curl - - which unzip - vars: - ARCH_TYPE: '{{ if eq ARCH "arm64" }}aarch_64{{ else if eq ARCH "amd64" }}x86_64{{else if eq ARCH "s390x"}}x390_64{{ else }}{{ARCH}}{{ end }}' - OS_VARIANT: '{{ if eq OS "darwin" }}osx-universal_binary{{ else if eq OS "windows" }}win64{{else}}linux-{{.ARCH_TYPE}}{{ end }}' - cmds: - - cmd: echo "Downloading Protoc v{{.PROTOC_VERSION}}..." - - cmd: | - curl -sL https://github.com/protocolbuffers/protobuf/releases/download/v{{.PROTOC_VERSION}}/protoc-{{.PROTOC_VERSION}}-{{.OS_VARIANT}}.zip -o {{.BIN_DIR}}/tmp.zip - unzip -j {{.BIN_DIR}}/tmp.zip "bin/protoc" -d {{.BIN_DIR}} - mv {{.BIN_DIR}}/protoc {{.PROTOC_BIN}} - rm {{.BIN_DIR}}/tmp.zip - - cmd: chmod +x {{.PROTOC_BIN}} - - cmd: echo "Downloading go plugins for protoc..." - - cmd: go install google.golang.org/protobuf/cmd/protoc-gen-go@latest - - cmd: go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest - - cmd: go install github.com/NathanBaulch/protoc-gen-cobra@latest - status: - - test -x {{.PROTOC_BIN}} - - deps:bufbuild: - desc: Ensure supported bufbuild version is installed - internal: true - deps: - - deps:bin-dir - preconditions: - - which curl - vars: - ARCH_TYPE: '{{ if eq ARCH "amd64" }}x86_64{{ else }}{{ARCH}}{{ end }}' - cmds: - - cmd: echo "Downloading BufBuild v{{.BUFBUILD_VERSION}}..." - - cmd: | - curl -L "https://github.com/bufbuild/buf/releases/download/v{{.BUFBUILD_VERSION}}/buf-{{OS}}-{{.ARCH_TYPE}}" -o {{.BUFBUILD_BIN}} - - cmd: chmod +x {{.BUFBUILD_BIN}} - status: - - test -x {{.BUFBUILD_BIN}} - - deps:tidy: - desc: Ensure dependencies are up-to-date - cmds: - - for: { var: GO_MOD_DIR } - cmd: go -C {{.ITEM}} mod tidy -go={{.GO_VERSION}} - - deps:multimod-bin: - desc: Build the multimod binary - internal: true - deps: - - deps:bin-dir - vars: - MULTIMOD_REPO_DIR: "{{ .BIN_DIR }}/opentelemetry-go-build-tools" - cmds: - - git clone https://github.com/open-telemetry/opentelemetry-go-build-tools --branch multimod/v{{.MULTIMOD_VERSION}} {{.MULTIMOD_REPO_DIR}} - - go build -C {{.MULTIMOD_REPO_DIR}}/multimod -o {{.MULTIMOD_BIN}} main.go - - rm -rf {{.MULTIMOD_REPO_DIR}} - status: - - test -x {{.MULTIMOD_BIN}} - - deps:golangci-lint: - desc: Install golangci-lint - internal: true - deps: - - deps:bin-dir - cmds: - - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s v{{.GOLANGCI_LINT_VERSION}} - - mv {{.BIN_DIR}}/golangci-lint {{.GOLANGCI_LINT_BIN}} - - chmod +x {{.GOLANGCI_LINT_BIN}} - status: - - test -x {{.GOLANGCI_LINT_BIN}} - - deps:licensei: - desc: Install licensei - internal: true - deps: - - deps:bin-dir - cmds: - - curl -sfL https://raw.githubusercontent.com/goph/licensei/master/install.sh | bash -s v{{.LICENSEI_VERSION}} - - mv {{.BIN_DIR}}/licensei {{.LICENSEI_BIN}} - - chmod +x {{.LICENSEI_BIN}} - status: - - test -x {{.LICENSEI_BIN}} - - deps:uv: - desc: Install uv - internal: true - deps: - - deps:bin-dir - env: - UV_INSTALL_DIR: "{{ .BIN_DIR }}" - cmds: - - curl -sfL https://astral.sh/uv/{{.UV_VERSION}}/install.sh | sh - - mv {{.BIN_DIR}}/uv {{.UV_BIN}} - - chmod +x {{.UV_BIN}} - - rm {{.BIN_DIR}}/uvx - status: - - test -x {{.BIN_DIR}}/uv - - deps:cosign: - desc: Install sigstore cosign - dir: "{{ .BIN_DIR }}" - internal: true - cmds: - - curl -sfL https://github.com/sigstore/cosign/releases/download/v{{.COSIGN_VERSION}}/cosign-{{OS}}-{{ARCH}} -o cosign-{{.COSIGN_VERSION}} - - chmod +x cosign-{{.COSIGN_VERSION}} - status: - - test -x cosign-{{.COSIGN_VERSION}} - - deps:htpasswd: - desc: Install htpasswd - dir: "{{ .BIN_DIR }}" - internal: true - cmds: - - npm install -g htpasswd - - ## - ## Helm - ## - helm:gen: - desc: Update Helm dependencies for chart and subcharts - internal: true - deps: - - deps:helm - vars: - HELM_ALL_CHART_PATHS: - sh: find . -name Chart.yaml -exec dirname {} \; - cmds: - # Add Helm repo - - "{{ .HELM_BIN }} repo add project-zot http://zotregistry.dev/helm-charts" - - "{{ .HELM_BIN }} repo add spiffe https://spiffe.github.io/helm-charts-hardened" - - # Update dependencies - - for: { var: HELM_ALL_CHART_PATHS } - cmd: "cd {{ .ITEM }} && {{ .HELM_BIN }} dependency update" +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +version: "3" + +env: + GOWORK: off + +vars: + ## Version + RELEASE_VERSION: + sh: grep 'version:' versions.yaml | awk '{print $2}' + RELEASE_VERSION_LDFLAG: "-X 'github.com/agntcy/dir/api/version.Version={{ .RELEASE_VERSION }}'" + COMMIT_SHA: + sh: git rev-parse --short HEAD + COMMIT_SHA_LDFLAG: "-X 'github.com/agntcy/dir/api/version.CommitHash={{ .COMMIT_SHA }}'" + VERSION_LDFLAGS: "{{ .RELEASE_VERSION_LDFLAG }} {{ .COMMIT_SHA_LDFLAG }}" + + ## Image config + IMAGE_REPO: '{{ .IMAGE_REPO | default "ghcr.io/agntcy" }}' + IMAGE_TAG: "{{ .IMAGE_TAG | default .COMMIT_SHA }}" + IMAGE_BAKE_ENV: "IMAGE_REPO={{.IMAGE_REPO}} IMAGE_TAG={{.IMAGE_TAG}}" + IMAGE_BAKE_OPTS: '{{ .IMAGE_BAKE_OPTS | default "" }}' + BAKE_ENV: '{{ .IMAGE_BAKE_ENV }} EXTRA_LDFLAGS="{{.VERSION_LDFLAGS}}"' + COVERAGE_IMAGE_TAG: "{{ .IMAGE_TAG | default .COMMIT_SHA }}-coverage" + COVERAGE_IMAGE_BAKE_ENV: "IMAGE_REPO={{.IMAGE_REPO}} IMAGE_TAG={{.COVERAGE_IMAGE_TAG}}" + COVERAGE_BAKE_ENV: '{{ .COVERAGE_IMAGE_BAKE_ENV }} EXTRA_LDFLAGS="{{.VERSION_LDFLAGS}}"' + COVERAGE_PKGS: '{{ .COVERAGE_PKGS | default "github.com/agntcy/dir/api/...,github.com/agntcy/dir/cli/...,github.com/agntcy/dir/client/...,github.com/agntcy/dir/importer/...,github.com/agntcy/dir/utils/..." }}' + + ## Dependency config + BIN_DIR: "{{ .ROOT_DIR }}/bin" + DIRCTL_BIN: "{{ .BIN_DIR}}/dirctl" + HELM_VERSION: "3.16.3" + HELM_BIN: "{{ .BIN_DIR }}/helm-{{.HELM_VERSION}}" + KUBECTL_VERSION: "1.31.3" + KUBECTL_BIN: "{{ .BIN_DIR }}/kubectl-{{.KUBECTL_VERSION}}" + KIND_VERSION: "0.25.0" + KIND_BIN: "{{ .BIN_DIR }}/kind-{{.KIND_VERSION}}" + PROTOC_VERSION: "27.1" + PROTOC_BIN: "{{ .BIN_DIR }}/protoc-{{.PROTOC_VERSION}}" + BUFBUILD_VERSION: "1.50.1" + BUFBUILD_BIN: "{{ .BIN_DIR }}/bufbuild-{{.BUFBUILD_VERSION}}" + GO_VERSION: "1.25.2" + MULTIMOD_VERSION: "0.17.0" + MULTIMOD_BIN: "{{ .BIN_DIR }}/multimod-{{.MULTIMOD_VERSION}}" + GOLANGCI_LINT_VERSION: "2.5.0" + GOLANGCI_LINT_BIN: "{{ .BIN_DIR }}/golangci-lint-{{.GOLANGCI_LINT_VERSION}}" + LICENSEI_VERSION: "0.9.0" + LICENSEI_BIN: "{{ .BIN_DIR }}/licensei-{{.LICENSEI_VERSION}}" + UV_VERSION: "0.8.23" + UV_BIN: "{{ .BIN_DIR }}/uv-{{.UV_VERSION}}" + UV_PUBLISH_TOKEN: '{{ .UV_PUBLISH_TOKEN | default "" }}' + COSIGN_VERSION: "2.5.3" + COSIGN_BIN: "{{ .BIN_DIR }}/cosign-{{.COSIGN_VERSION}}" + HUB_API_VERSION: "main" + ZOT_VERSION: "2.1.11" + SPIRE_VERSION: "1.13.3" + + ## Go module discovery + GO_MOD_DIR: + sh: find . -name go.mod -not -path "./tmp*" -exec dirname {} \; + GO_MOD_DIR_UNIT_TEST: + sh: find . -name go.mod -not -path "./e2e*" -not -path "./tmp*" -exec dirname {} \; + +tasks: + ## + ## General + ## + default: + cmds: + - task -l + + gen: + desc: Generate code for all components + cmds: + - task: api:gen + - task: helm:gen + + check: + desc: Checks for all code violations + cmds: + - task: lint + - task: license + + build: + desc: Build images for all components + deps: + - task: deps:tidy + - task: gen + vars: + GOARCH: "{{ .GOARCH | default ARCH }}" + EXTRA_FLAGS: '{{ .EXTRA_FLAGS | default "" }}' + cmds: + - "{{.BAKE_ENV}} docker buildx bake {{.IMAGE_BAKE_OPTS}} --set *.platform=linux/{{.GOARCH}} {{.EXTRA_FLAGS}}" + + build:coverage: + desc: Build images for all components with coverage instrumentation + cmds: + - task: build + vars: + BAKE_ENV: "{{ .COVERAGE_BAKE_ENV }}" + # TODO: -coverpkg should be set to include all packages (server, api, utils) in the coverage report + # but it's not working as expected, so we're using the default coverage package for now + EXTRA_FLAGS: 'coverage --set *.args.BUILD_OPTS="-cover -covermode=atomic"' + + build:all: + desc: Build images for all components for multiple platforms + cmds: + - "{{.BAKE_ENV}} docker buildx bake {{.IMAGE_BAKE_OPTS}} --set *.platform=linux/amd64,linux/arm64" + + pull: + desc: Pull images for all components + cmds: + - | + images=$({{.BAKE_ENV}} docker buildx bake default --print | jq -r '.target | with_entries(.value |= .tags[0]) | to_entries[] | .value') + echo "$images" | while read image; do + echo "Pulling image: $image" + docker pull $image + done + + push: + desc: Build and push images for all components + prompt: + - Are you sure you want to push the images to remote registry? + cmds: + - "{{.BAKE_ENV}} docker buildx bake {{.IMAGE_BAKE_OPTS}} --set=*.output=type=registry" + + release:create: + desc: Prepare release + deps: + - task: deps:multimod-bin + - task: sdk:deps:python + - task: sdk:deps:javascript + vars: + RELEASE_VERSION: "{{ .RELEASE_VERSION }}" + cmds: + # Switch to new branch + - 'if [ "$(git rev-parse --abbrev-ref HEAD)" != "release/{{.RELEASE_VERSION}}" ]; then git checkout -b release/{{.RELEASE_VERSION}}; fi' + # Update versions.yaml with the new version + - 'awk ''{gsub(/version: .*/,"version: {{.RELEASE_VERSION}}")}1'' versions.yaml > versions.yaml.tmp' + - "mv versions.yaml.tmp versions.yaml" + # Update SDK packages with the new version + - "cd sdk/dir-py && {{ .UV_BIN }} version {{.RELEASE_VERSION}} && cd -" + - "cd sdk/dir-js && npm version {{.RELEASE_VERSION}} --allow-same-version --no-git-tag-version && cd -" + # Add release changes + - | + git add . + git commit -S -m "release(dir): prepare release {{.RELEASE_VERSION}}" + # Verify Go release + - | + {{ .MULTIMOD_BIN }} verify + {{ .MULTIMOD_BIN }} prerelease --all-module-sets --skip-go-mod-tidy=true --commit-to-different-branch=false + # Push prepared release + - task: release:push + + release:push: + internal: true + vars: + RELEASE_VERSION: "{{ .RELEASE_VERSION }}" + prompt: + - "Are you sure you want to push the release branch release/{{.RELEASE_VERSION}} to remote repository?" + cmds: + - | + git push --set-upstream origin release/{{.RELEASE_VERSION}} || true + + ## + ## API + ## + api:gen: + desc: Generates API stubs + dir: ./proto + deps: + - task: deps:protoc + - task: deps:bufbuild + # NOTE(ramizpolic): This allows Taskfile YAML parsing to accept '{' as a starting command token. + # In translation, this is interpreted as a regular multi-line shell script. + cmds: + - "{{.BUFBUILD_BIN}} dep update" + - "{{.BUFBUILD_BIN}} generate" + + api:clean: + desc: Clean generated API stubs + deps: + - api:clean:go + - api:clean:python + - api:clean:javascript + + api:clean:go: + desc: Clean generated golang API stubs + dir: ./api + cmds: + - find . \( -name "*.pb.go" \) -type f -delete + + api:clean:python: + desc: Clean generated Python API stubs + dir: ./sdk/dir-py/agntcy + cmd: rm -drf ./dir + + api:clean:javascript: + desc: Clean generated JS/TS API stubs + dir: ./sdk/dir-js/ + cmd: rm -drf ./api + + ## + ## CLI + ## + cli:compile: + desc: Compile CLI binaries + dir: ./cli + vars: + GOOS: "{{ .GOOS | default OS }}" + GOARCH: "{{ .GOARCH | default ARCH }}" + BINARY_NAME: '{{ .BINARY_NAME | default "dirctl" }}' + OUT_BINARY: '{{ if eq OS "windows" }}{{ .ROOT_DIR }}\\bin\\{{ .BINARY_NAME }}.exe{{ else }}{{ .ROOT_DIR }}/bin/{{ .BINARY_NAME }}{{ end }}' + LDFLAGS: "-s -w -extldflags -static {{ .VERSION_LDFLAGS }}" + TRY_SKIP_COMPILE: '{{ .TRY_SKIP_COMPILE | default "false" }}' + cmds: + - | + if [ "{{.TRY_SKIP_COMPILE}}" = "true" ]; then + if [ -f "{{.OUT_BINARY}}" ]; then + echo "Binary {{.OUT_BINARY}} already exists, skipping compilation." + exit 0 + else + echo "Binary {{.OUT_BINARY}} does not exist, proceeding with compilation." + fi + fi + + CGO_ENABLED=0 GOOS={{.GOOS}} GOARCH={{.GOARCH}} go build -ldflags="{{ .LDFLAGS }}" -o "{{.OUT_BINARY}}" cli.go + + cli:compile:all: + desc: Compile CLI client binaries for multiple platforms + aliases: [compile] + cmds: + - for: + matrix: + OS: ["linux", "darwin", "windows"] + ARCH: ["amd64", "arm64"] + cmd: | + # Skip unsupported combinations (e.g., Windows ARM64) + if [ "{{.ITEM.OS}}" = "windows" ] && [ "{{.ITEM.ARCH}}" = "arm64" ]; then + echo "Skipping unsupported platform: {{.ITEM.OS}}/{{.ITEM.ARCH}}" + else + GOOS={{.ITEM.OS}} GOARCH={{.ITEM.ARCH}} BINARY_NAME=dirctl-{{.ITEM.OS}}-{{.ITEM.ARCH}} task cli:compile + fi + + ## + ## Client SDK + ## + sdk:deps:common: + desc: Common dependencies for SDKs + vars: + TRY_SKIP_COMPILE: "{{ .TRY_SKIP_COMPILE }}" + cmds: + - task: deps:cosign + - task: cli:compile + vars: + TRY_SKIP_COMPILE: "{{.TRY_SKIP_COMPILE}}" + - task: sdk:deps:javascript + - task: sdk:deps:python + + sdk:deps:cicd:iodc-token-generation: + desc: Get Fulcio OIDC token for CICD + requires: + vars: [CLIENT_ID] + cmds: + - | + OIDC_TOKEN=$(curl -s -H "Authorization: bearer $ACTIONS_ID_TOKEN_REQUEST_TOKEN" \ + "$ACTIONS_ID_TOKEN_REQUEST_URL&audience=sigstore" | jq -r '.value') + + CLIENT_ID="{{.CLIENT_ID}}" + PROVIDER_URL="https://token.actions.githubusercontent.com" + + echo "OIDC_PROVIDER_URL=${PROVIDER_URL}" + echo "CLIENT_ID=${CLIENT_ID}" + echo "OIDC_TOKEN=${OIDC_TOKEN}" + + sdk:build:all: + desc: Build all client SDK package + cmds: + - task: sdk:build:javascript + - task: sdk:build:python + + sdk:build:python: + desc: Build python client SDK package + dir: ./sdk/dir-py + deps: + - task: sdk:deps:python + cmds: + - "{{.UV_BIN}} build" + + sdk:build:javascript: + desc: Build javascript client SDK package + dir: ./sdk/dir-js + deps: + - task: sdk:deps:javascript + cmds: + - npm run build + + sdk:test-env:create: + desc: Create Kubernetes cluster test environment + cmds: + - task: deploy:kubernetes:local + vars: + DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL: 1s + - task: deploy:kubernetes:local:port-forward + + sdk:test-env:delete: + desc: Delete Kubernetes cluster test environment + cmds: + - task: deploy:kubernetes:local:port-forward:cleanup + - task: deploy:kubernetes:local:cleanup + + sdk:test-env:spiffe:load-test-image: + desc: Load the SDK tests into KinD + deps: + - task: api:gen + vars: + GOARCH: "{{ .GOARCH | default ARCH }}" + cmds: + - "{{.BAKE_ENV}} docker buildx bake sdks-test --set *.platform=linux/{{.GOARCH}} {{.IMAGE_BAKE_OPTS}}" + - "{{.KUBECTL_BIN}} config use-context kind-dir.example" + - "{{.KIND_BIN}} load docker-image ghcr.io/agntcy/sdks-test:{{.IMAGE_TAG}} --name dir.example" + + sdk:test:all:spiffe: + desc: Test all client SDK pacakges with spiffe + dir: ./e2e/sdk + cmds: + - "{{.KUBECTL_BIN}} config use-context kind-dir.example" + - "{{.HELM_BIN}} uninstall sdk-tests --wait --keep-history --ignore-not-found > /dev/null || true" + - "{{.HELM_BIN}} install --replace --timeout 3m --wait --wait-for-jobs sdk-tests ./chart -f chart/values.yaml --set image.tag={{.IMAGE_TAG}} > /dev/null || true" + - | + status=$({{.KUBECTL_BIN}} get job sdks-test -o jsonpath='{.status.conditions[0].type}') + status_value=$({{.KUBECTL_BIN}} get job sdks-test -o jsonpath='{.status.conditions[0].status}') + + if [[ "$status" == "SuccessCriteriaMet" ]] && [[ "$status_value" == "True" ]]; then + echo "SDKs test are finished successfully! ✅" + exit 0 + fi + + if [[ "$status" == "FailureTarget" ]] && [[ "$status_value" == "True" ]]; then + {{.KUBECTL_BIN}} logs jobs/sdks-test + echo "SDKs test are failed! ❎" + exit 1 + fi + + echo "Unknown error happend, check logs! ⚠️" + exit 1 + + sdk:test:all: + desc: Test all client SDK packages + cmds: + - task: sdk:test:javascript + - task: sdk:test:python + + sdk:test:python: + desc: Test python client SDK package + dir: ./sdk/dir-py + deps: + - task: sdk:deps:python + env: + KIND_CLUSTER_NAME: "sdk-py-test" + cmds: + - task: sdk:test-env:create + - defer: { task: sdk:test-env:delete } + - | + export DIRCTL_PATH="$(printf "%s" "${DIRCTL_PATH:-{{ .DIRCTL_BIN }}}")" + export COSIGN_PATH="$(printf "%s" "${COSIGN_PATH:-{{ .COSIGN_BIN }}}")" + + '{{.UV_BIN}}' run pytest + + sdk:test:javascript: + desc: Test javascript client SDK package + dir: ./sdk/dir-js + deps: + - task: sdk:deps:javascript + env: + KIND_CLUSTER_NAME: "sdk-js-test" + cmds: + - task: sdk:test-env:create + - defer: { task: sdk:test-env:delete } + - | + export DIRCTL_PATH="$(printf "%s" "${DIRCTL_PATH:-{{ .DIRCTL_BIN }}}")" + export COSIGN_PATH="$(printf "%s" "${COSIGN_PATH:-{{ .COSIGN_BIN }}}")" + + npm run test + + sdk:deps:python: + desc: Install deps for python SDK package + dir: ./sdk/dir-py + deps: + - task: deps:bufbuild + - task: deps:uv + cmds: + - task: api:gen + - "{{.UV_BIN}} sync --all-packages" + + sdk:deps:javascript: + desc: Install deps for javascript SDK package + dir: ./sdk/dir-js + cmds: + - npm install + - task: api:gen + + sdk:release:all: + desc: Release all client SDK package + env: + UV_PUBLISH_TOKEN: "{{ .UV_PUBLISH_TOKEN }}" + NODE_AUTH_TOKEN: "{{ .NODE_AUTH_TOKEN }}" + cmds: + - task: sdk:release:javascript + - task: sdk:release:python + + sdk:release:python: + ignore_error: true # FIXME: Need to check if package version already exists + desc: Release python client SDK package + dir: ./sdk/dir-py + env: + UV_PUBLISH_TOKEN: "{{ .UV_PUBLISH_TOKEN }}" + deps: + - task: deps:uv + cmds: + - "{{.UV_BIN}} publish" + + sdk:release:javascript: + ignore_error: true # FIXME: Need to check if package version already exists + desc: Release javascript client SDK package + dir: ./sdk/dir-js + env: + NODE_AUTH_TOKEN: "{{ .NODE_AUTH_TOKEN }}" + cmd: | + version=$(npm pkg get version) + + if [[ $version == *"rc"* ]]; then + npm publish --scope=@agntcy --access public --tag rc-{{.COMMIT_SHA}} + else + npm publish --scope=@agntcy --access public + fi + + ## + ## Server + ## + server:build: + desc: Build Directory server image + cmds: + - "{{.BAKE_ENV}} docker buildx bake {{.IMAGE_BAKE_OPTS}} dir-apiserver" + + server:start: + desc: Start Directory server + dir: server/cmd + env: + DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL: 1s + cmds: + - defer: { task: server:store:stop } + - task: server:store:start + - go run main.go + + server:store:pull: + desc: Pull local OCI registry server docker image + vars: + IMAGE: "{{.IMAGE}}" + cmds: + - | + docker pull {{.IMAGE}} + + server:store:start: + desc: Start local OCI registry server for storage + internal: true + vars: + IMAGE: ghcr.io/project-zot/zot-linux-{{ARCH}}:v{{.ZOT_VERSION}} + deps: + - task: server:store:pull + vars: + IMAGE: "{{.IMAGE}}" + cmds: + - | + # mount config + cat > /tmp/config.json < {{ .CREDS_FILE }} << EOF + export HTPASSWD_USERNAME="{{ .HTPASSWD_USERNAME }}" + export HTPASSWD_PASSWORD="${HTPASSWD_PASSWORD}" + export HTPASSWD_AUTH_HEADER="${HTPASSWD_AUTH_HEADER}" + export HTPASSWD_SYNC_USERNAME="{{ .HTPASSWD_SYNC_USERNAME }}" + export HTPASSWD_SYNC_PASSWORD="${HTPASSWD_SYNC_PASSWORD}" + EOF + + # Write htpasswd file (for helm --set-file commands) + cat > {{ .HTPASSWD_FILE }} << EOF + {{ .HTPASSWD_USERNAME }}:${HTPASSWD_ADMIN} + {{ .HTPASSWD_SYNC_USERNAME }}:${HTPASSWD_SYNC} + EOF + + deploy:kubernetes:cleanup-htpasswd-creds: + desc: Cleanup htpasswd credentials and files + vars: + CREDS_FILE: '{{ .CREDS_FILE | default "/tmp/dir-htpasswd-creds.env" }}' + HTPASSWD_FILE: '{{ .HTPASSWD_FILE | default "/tmp/dir-htpasswd" }}' + cmds: + - rm -f {{ .CREDS_FILE }} + - rm -f {{ .HTPASSWD_FILE }} + + deploy:kubernetes:local: + aliases: [deploy:local] + desc: Deploy a local Directory server in Kubernetes + deps: + - deploy:kubernetes:setup-cluster + vars: + # Kind args + KIND_CLUSTER_NAME: '{{ .KIND_CLUSTER_NAME | default "agntcy-cluster" }}' + KIND_CREATE_OPTS: '{{ .KIND_CREATE_OPTS | default "" }}' + # Helm args + HELM_NAMESPACE: '{{ .HELM_NAMESPACE | default "dir-server" }}' + HELM_CHART_PATH: "{{ .ROOT_DIR }}/install/charts/dir" + HELM_VALUES_PATH: "{{ .ROOT_DIR }}/install/charts/dir/values.yaml" + # Coverage config + E2E_COVERAGE_ENABLED: '{{ .E2E_COVERAGE_ENABLED | default "false" }}' + DEPLOY_IMAGE_TAG: '{{ if eq .E2E_COVERAGE_ENABLED "true" }}{{ .COVERAGE_IMAGE_TAG }}{{ else }}{{ .IMAGE_TAG }}{{ end }}' + cmds: + # TODO: make logic idempotent so that running functional tests does not change previous contexts + + # Generate credentials and htpasswd file (using defaults) + - task: deploy:kubernetes:gen-htpasswd-creds + + # Cleanup credentials on exit (using defaults) + - defer: + task: deploy:kubernetes:cleanup-htpasswd-creds + + # Deploy chart + - | + # Load credentials + source /tmp/dir-htpasswd-creds.env + + {{ .HELM_BIN }} dependency build {{ .HELM_CHART_PATH }} + + {{ .HELM_BIN }} upgrade dir \ + {{ .HELM_CHART_PATH }} \ + -f {{ .HELM_VALUES_PATH }} \ + --set apiserver.image.tag="{{ .DEPLOY_IMAGE_TAG }}" \ + {{ if .DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL }}--set apiserver.config.publication.scheduler_interval="{{ .DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL }}"{{ end }} \ + --set apiserver.config.routing.refresh_interval="1s" \ + --set apiserver.secrets.ociAuth.username="${HTPASSWD_USERNAME}" \ + --set apiserver.secrets.ociAuth.password="${HTPASSWD_PASSWORD}" \ + --set apiserver.zot.authHeader="${HTPASSWD_AUTH_HEADER}" \ + --set-file apiserver.zot.secretFiles.htpasswd="/tmp/dir-htpasswd" \ + --set apiserver.secrets.syncAuth.username="${HTPASSWD_SYNC_USERNAME}" \ + --set apiserver.secrets.syncAuth.password="${HTPASSWD_SYNC_PASSWORD}" \ + {{ if .DIRECTORY_SERVER_RATELIMIT_ENABLED }}--set apiserver.config.ratelimit.enabled="{{ .DIRECTORY_SERVER_RATELIMIT_ENABLED }}"{{ end }} \ + {{ if .DIRECTORY_SERVER_RATELIMIT_GLOBAL_RPS }}--set apiserver.config.ratelimit.global_rps="{{ .DIRECTORY_SERVER_RATELIMIT_GLOBAL_RPS }}"{{ end }} \ + {{ if .DIRECTORY_SERVER_RATELIMIT_GLOBAL_BURST }}--set apiserver.config.ratelimit.global_burst="{{ .DIRECTORY_SERVER_RATELIMIT_GLOBAL_BURST }}"{{ end }} \ + {{ if .DIRECTORY_SERVER_RATELIMIT_PER_CLIENT_RPS }}--set apiserver.config.ratelimit.per_client_rps="{{ .DIRECTORY_SERVER_RATELIMIT_PER_CLIENT_RPS }}"{{ end }} \ + {{ if .DIRECTORY_SERVER_RATELIMIT_PER_CLIENT_BURST }}--set apiserver.config.ratelimit.per_client_burst="{{ .DIRECTORY_SERVER_RATELIMIT_PER_CLIENT_BURST }}"{{ end }} \ + {{ if eq .E2E_COVERAGE_ENABLED "true" }}--set-json 'apiserver.extraEnv=[{"name":"GOCOVERDIR","value":"/tmp/coverage"}]' --set apiserver.coverageVolume=true{{ end }} \ + {{ if .DIRECTORY_SERVER_OASF_API_VALIDATION_SCHEMA_URL }}--set apiserver.config.oasf_api_validation.schema_url="{{ .DIRECTORY_SERVER_OASF_API_VALIDATION_SCHEMA_URL }}"{{ end }} \ + {{ if .DIRECTORY_SERVER_OASF_API_VALIDATION_DISABLE }}--set apiserver.config.oasf_api_validation.disable="{{ .DIRECTORY_SERVER_OASF_API_VALIDATION_DISABLE }}"{{ end }} \ + {{ if ne .DIRECTORY_SERVER_OASF_API_VALIDATION_STRICT_MODE nil }}--set apiserver.config.oasf_api_validation.strict_mode="{{ .DIRECTORY_SERVER_OASF_API_VALIDATION_STRICT_MODE }}"{{ end }} \ + --namespace {{ .HELM_NAMESPACE }} \ + --create-namespace \ + --install \ + --wait \ + --wait-for-jobs \ + --timeout "15m" + + deploy:kubernetes:context: + desc: Switch context to given Kubernetes cluster + vars: + KIND_CLUSTER_NAME: '{{ .KIND_CLUSTER_NAME | default "agntcy-cluster" }}' + cmds: + - "{{ .KIND_BIN }} export kubeconfig --name {{ .KIND_CLUSTER_NAME }}" + + deploy:kubernetes:dir: + desc: Deploy DIR helm chart + vars: + HELM_NAMESPACE: '{{ .HELM_NAMESPACE | default "dir-server" }}' + HELM_CHART_PATH: "{{ .ROOT_DIR }}/install/charts/dir" + HELM_VALUES_PATH: "{{ .ROOT_DIR }}/install/charts/dir/values.yaml" + HELM_EXTRA_ARGS: '{{ .HELM_EXTRA_ARGS | default "" }}' + cmds: + # Generate credentials and htpasswd file (using defaults) + - task: deploy:kubernetes:gen-htpasswd-creds + + # Cleanup credentials on exit (using defaults) + - defer: + task: deploy:kubernetes:cleanup-htpasswd-creds + + # Deploy chart + - | + # Load credentials + source /tmp/dir-htpasswd-creds.env + + {{ .HELM_BIN }} dependency build {{ .HELM_CHART_PATH }} + + {{ .HELM_BIN }} upgrade --install dir \ + {{ .HELM_CHART_PATH }} \ + -f {{ .HELM_VALUES_PATH }} \ + --set apiserver.image.tag="{{ .IMAGE_TAG }}" \ + {{ if .DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL }}--set apiserver.config.publication.scheduler_interval="{{ .DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL }}"{{ end }} \ + --set apiserver.secrets.ociAuth.username="${HTPASSWD_USERNAME}" \ + --set apiserver.secrets.ociAuth.password="${HTPASSWD_PASSWORD}" \ + --set apiserver.zot.authHeader="${HTPASSWD_AUTH_HEADER}" \ + --set-file apiserver.zot.secretFiles.htpasswd="/tmp/dir-htpasswd" \ + --set apiserver.secrets.syncAuth.username="${HTPASSWD_SYNC_USERNAME}" \ + --set apiserver.secrets.syncAuth.password="${HTPASSWD_SYNC_PASSWORD}" \ + --set apiserver.log_level="DEBUG" \ + {{ .HELM_EXTRA_ARGS }} \ + --namespace {{ .HELM_NAMESPACE }} \ + --create-namespace \ + --install \ + --wait \ + --wait-for-jobs \ + --timeout "15m" + + deploy:kubernetes:dirctl: + desc: Deploy DIRCTL helm chart + vars: + HELM_NAMESPACE: '{{ .HELM_NAMESPACE | default "dir-client" }}' + HELM_CHART_PATH: "{{ .ROOT_DIR }}/install/charts/dirctl" + HELM_VALUES_PATH: "{{ .ROOT_DIR }}/install/charts/dirctl/values.yaml" + HELM_EXTRA_ARGS: '{{ .HELM_EXTRA_ARGS | default "" }}' + cmds: + - | + {{ .HELM_BIN }} upgrade --install dirctl \ + {{ .HELM_CHART_PATH }} \ + -f {{ .HELM_VALUES_PATH }} \ + --set image.tag="{{ .IMAGE_TAG }}" \ + {{ .HELM_EXTRA_ARGS }} \ + --namespace {{ .HELM_NAMESPACE }} \ + --create-namespace \ + --install \ + --wait \ + --wait-for-jobs \ + --timeout "15m" + + deploy:kubernetes:spire: + desc: Deploy SPIRE helm chart + vars: + TRUST_DOMAIN: '{{ .TRUST_DOMAIN | default "example.org" }}' + SERVICE_TYPE: '{{ .SERVICE_TYPE | default "LoadBalancer" }}' + BUNDLE_PATH: '{{ .BUNDLE_PATH | default "/tmp/spire-bundle.spiffe" }}' + cmds: + - | + {{ .HELM_BIN }} upgrade --install spire-crds spire-crds \ + --repo https://spiffe.github.io/helm-charts-hardened/ \ + --namespace spire-crds \ + --create-namespace \ + --install \ + --wait \ + --wait-for-jobs \ + --timeout "15m" + + - | + {{ .HELM_BIN }} upgrade --install spire spire \ + --repo https://spiffe.github.io/helm-charts-hardened/ \ + --set global.spire.trustDomain="{{ .TRUST_DOMAIN }}" \ + --set spire-server.image.tag="{{ .SPIRE_VERSION }}" \ + --set spire-agent.image.tag="{{ .SPIRE_VERSION }}" \ + --set spire-server.service.type="{{ .SERVICE_TYPE }}" \ + --set spire-server.federation.enabled="true" \ + --set spire-server.controllerManager.watchClassless="true" \ + --set spire-server.controllerManager.className="dir-spire" \ + --namespace spire \ + --create-namespace \ + --install \ + --wait \ + --wait-for-jobs \ + --timeout "15m" + + - | + {{ .KUBECTL_BIN }} get configmap -n spire spire-bundle -o json | jq '.data."bundle.spiffe"' -r > {{ .BUNDLE_PATH }} + + deploy:kubernetes:local:port-forward: + aliases: [deploy:local:port-forward] + desc: Set up port-forwarding for the local deployment + vars: + # Helm args + HELM_NAMESPACE: '{{ .HELM_NAMESPACE | default "dir-server" }}' + cmds: + # Port-forward dependency services + - | + {{ .KUBECTL_BIN }} port-forward service/dir-apiserver 8888:8888 -n {{ .HELM_NAMESPACE }} & + {{ .KUBECTL_BIN }} port-forward service/dir-apiserver 9090:9090 -n {{ .HELM_NAMESPACE }} & + {{ .KUBECTL_BIN }} port-forward service/dir-ingress-controller 8080:80 -n {{ .HELM_NAMESPACE }} & + + # Delay to ensure services are online + - sleep 10 + + deploy:kubernetes:local:port-forward:cleanup: + aliases: [deploy:local:port-forward:cleanup] + desc: Cleanup port-forwarding processes + cmds: + # Kill any existing port-forward processes for the dir-apiserver and dir-ingress-controller services + - kill -9 $(ps aux | grep port-forward | grep -E "(dir-apiserver|dir-ingress-controller)" | awk '{print $2}') || true + + deploy:kubernetes:local:cleanup: + aliases: [deploy:local:cleanup, deploy:kubernetes:cleanup] + desc: Cleanup Kubernetes environment for local deployment + deps: + - deps:kind + vars: + # Kind args + KIND_CLUSTER_NAME: '{{ .KIND_CLUSTER_NAME | default "agntcy-cluster" }}' + cmds: + - "{{ .KIND_BIN }} delete cluster --name {{ .KIND_CLUSTER_NAME }}" + + deploy:kubernetes:network:bootstrap: + internal: true + desc: Deploy a bootstrap Directory server in Kubernetes + deps: + - task: deploy:kubernetes:setup-cluster + vars: + E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" + - deps:dirctl-bin + vars: + # Helm args + HELM_CHART_PATH: "{{ .ROOT_DIR }}/install/charts/dir" + HELM_VALUES_PATH: "{{ .ROOT_DIR }}/install/charts/dir/values.yaml" + # Coverage config + E2E_COVERAGE_ENABLED: '{{ .E2E_COVERAGE_ENABLED | default "false" }}' + DEPLOY_IMAGE_TAG: '{{ if eq .E2E_COVERAGE_ENABLED "true" }}{{ .COVERAGE_IMAGE_TAG }}{{ else }}{{ .IMAGE_TAG }}{{ end }}' + cmds: + # Generate private key if it doesn't exist + - | + test -f /tmp/node.privkey || {{ .BIN_DIR }}/dirctl network init --output /tmp/node.privkey + + # Generate the bootstrap peer ID and export it to the environment file + - | + bootstrap_peerid=$({{ .BIN_DIR }}/dirctl network info /tmp/node.privkey) + echo "PEER ID: ${bootstrap_peerid}" + echo BOOTSTRAP_PEER_ID="${bootstrap_peerid}" > .env + + # Generate credentials and htpasswd file + - task: deploy:kubernetes:gen-htpasswd-creds + vars: + CREDS_FILE: "/tmp/dir-htpasswd-creds-bootstrap.env" + HTPASSWD_FILE: "/tmp/zot-htpasswd-bootstrap" + + # Cleanup credentials on exit + - defer: + task: deploy:kubernetes:cleanup-htpasswd-creds + vars: + CREDS_FILE: "/tmp/dir-htpasswd-creds-bootstrap.env" + HTPASSWD_FILE: "/tmp/zot-htpasswd-bootstrap" + + # Deploy the bootstrap server using Helm + - | + # Load credentials + source /tmp/dir-htpasswd-creds-bootstrap.env + + {{ .HELM_BIN }} dependency build {{ .HELM_CHART_PATH }} + + {{ .HELM_BIN }} upgrade agntcy-dir \ + {{ .HELM_CHART_PATH }} \ + -f {{ .HELM_VALUES_PATH }} \ + --set apiserver.image.tag="{{ .DEPLOY_IMAGE_TAG }}" \ + {{ if .PUBLICATION_SCHEDULER_INTERVAL }}--set apiserver.config.publication.scheduler_interval="{{ .PUBLICATION_SCHEDULER_INTERVAL }}"{{ end }} \ + --set apiserver.config.routing.refresh_interval="1s" \ + --set apiserver.secrets.privKey="$(cat /tmp/node.privkey)" \ + --set apiserver.config.routing.key_path="/etc/agntcy/dir/node.privkey" \ + --set apiserver.config.routing.listen_address="/ip4/0.0.0.0/tcp/8999" \ + --set apiserver.config.routing.directory_api_address="agntcy-dir-apiserver.bootstrap.svc.cluster.local:8888" \ + --set apiserver.config.store.oci.registry_address="agntcy-dir-zot.bootstrap.svc.cluster.local:5000" \ + --set apiserver.zot.extraVolumes[0].persistentVolumeClaim.claimName="agntcy-dir-zot-config" \ + --set apiserver.secrets.ociAuth.username="${HTPASSWD_USERNAME}" \ + --set apiserver.secrets.ociAuth.password="${HTPASSWD_PASSWORD}" \ + --set apiserver.zot.authHeader="${HTPASSWD_AUTH_HEADER}" \ + --set-file apiserver.zot.secretFiles.htpasswd="/tmp/zot-htpasswd-bootstrap" \ + --set apiserver.secrets.syncAuth.username="${HTPASSWD_SYNC_USERNAME}" \ + --set apiserver.secrets.syncAuth.password="${HTPASSWD_SYNC_PASSWORD}" \ + {{ if eq .E2E_COVERAGE_ENABLED "true" }}--set-json 'apiserver.extraEnv=[{"name":"GOCOVERDIR","value":"/tmp/coverage"}]' --set apiserver.coverageVolume=true{{ end }} \ + --namespace "bootstrap" \ + --create-namespace \ + --install \ + --wait \ + --wait-for-jobs \ + --timeout "15m" + + deploy:kubernetes:network: + aliases: [deploy:network] + desc: Deploy a network of Directory servers in Kubernetes (1 bootstrap + 3 peers) + vars: + HELM_CHART_PATH: "{{ .ROOT_DIR }}/install/charts/dir" + HELM_VALUES_PATH: "{{ .ROOT_DIR }}/install/charts/dir/values.yaml" + # Coverage config + E2E_COVERAGE_ENABLED: '{{ .E2E_COVERAGE_ENABLED | default "false" }}' + DEPLOY_IMAGE_TAG: '{{ if eq .E2E_COVERAGE_ENABLED "true" }}{{ .COVERAGE_IMAGE_TAG }}{{ else }}{{ .IMAGE_TAG }}{{ end }}' + cmds: + # Deploy bootstrap with explicit var passing (not as dependency) + - task: deploy:kubernetes:network:bootstrap + vars: + PUBLICATION_SCHEDULER_INTERVAL: "{{ .PUBLICATION_SCHEDULER_INTERVAL }}" + E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" + + # Deploy the peer servers using Helm + - for: + matrix: + PEER: ["peer1", "peer2", "peer3"] + cmd: | + export $(cat .env) + + # Generate fresh credentials for this peer using helper task + task deploy:kubernetes:gen-htpasswd-creds \ + CREDS_FILE=/tmp/dir-htpasswd-creds-{{ .ITEM.PEER }}.env \ + HTPASSWD_FILE=/tmp/zot-htpasswd-{{ .ITEM.PEER }} + + # Load credentials + source /tmp/dir-htpasswd-creds-{{ .ITEM.PEER }}.env + + {{ .HELM_BIN }} dependency build {{ .HELM_CHART_PATH }} + {{ .HELM_BIN }} upgrade agntcy-dir \ + {{ .HELM_CHART_PATH }} \ + -f {{ .HELM_VALUES_PATH }} \ + --set apiserver.image.tag="{{ .DEPLOY_IMAGE_TAG }}" \ + {{ if .PUBLICATION_SCHEDULER_INTERVAL }}--set apiserver.config.publication.scheduler_interval="{{ .PUBLICATION_SCHEDULER_INTERVAL }}"{{ end }} \ + --set apiserver.config.routing.refresh_interval="1s" \ + --set apiserver.config.store.oci.registry_address="agntcy-dir-zot.{{ .ITEM.PEER }}.svc.cluster.local:5000" \ + --set apiserver.config.routing.bootstrap_peers[0]="/dns4/agntcy-dir-apiserver-routing.bootstrap.svc.cluster.local/tcp/8999/p2p/${BOOTSTRAP_PEER_ID}" \ + --set apiserver.config.routing.directory_api_address="agntcy-dir-apiserver.{{ .ITEM.PEER }}.svc.cluster.local:8888" \ + --set apiserver.zot.extraVolumes[0].persistentVolumeClaim.claimName="agntcy-dir-zot-config" \ + --set apiserver.secrets.ociAuth.username="${HTPASSWD_USERNAME}" \ + --set apiserver.secrets.ociAuth.password="${HTPASSWD_PASSWORD}" \ + --set apiserver.zot.authHeader="${HTPASSWD_AUTH_HEADER}" \ + --set-file apiserver.zot.secretFiles.htpasswd="/tmp/zot-htpasswd-{{ .ITEM.PEER }}" \ + --set apiserver.secrets.syncAuth.username="${HTPASSWD_SYNC_USERNAME}" \ + --set apiserver.secrets.syncAuth.password="${HTPASSWD_SYNC_PASSWORD}" \ + {{ if eq .E2E_COVERAGE_ENABLED "true" }}--set-json 'apiserver.extraEnv=[{"name":"GOCOVERDIR","value":"/tmp/coverage"}]' --set apiserver.coverageVolume=true{{ end }} \ + --namespace "{{ .ITEM.PEER }}" \ + --create-namespace \ + --install \ + --wait \ + --wait-for-jobs \ + --timeout "15m" + + # Cleanup temp files + rm -f /tmp/zot-htpasswd-{{ .ITEM.PEER }} + rm -f /tmp/dir-htpasswd-creds-{{ .ITEM.PEER }}.env + + deploy:kubernetes:network:port-forward: + aliases: [deploy:network:port-forward] + desc: Set up port-forwarding for the peers + cmds: + # Port-forward dependency services + - "{{ .KUBECTL_BIN }} port-forward svc/agntcy-dir-apiserver -n peer1 8890:8888 &" + - "{{ .KUBECTL_BIN }} port-forward svc/agntcy-dir-apiserver -n peer2 8891:8888 &" + - "{{ .KUBECTL_BIN }} port-forward svc/agntcy-dir-apiserver -n peer3 8892:8888 &" + + # Delay to ensure services are online + - sleep 10 + + deploy:kubernetes:network:port-forward:cleanup: + aliases: [deploy:network:port-forward:cleanup] + desc: Cleanup port-forwarding processes + cmds: + # Kill any existing port-forward processes for the agntcy-dir-apiserver service + - kill -9 $(ps aux | grep port-forward | grep agntcy-dir-apiserver | awk '{print $2}') || true + + deploy:kubernetes:network:cleanup: + aliases: [deploy:network:cleanup] + desc: Cleanup Kubernetes environment for network deployment + vars: + # Kind args + KIND_CLUSTER_NAME: '{{ .KIND_CLUSTER_NAME | default "agntcy-cluster" }}' + cmds: + # Delete helm releases + - for: + matrix: + PEER: ["bootstrap", "peer1", "peer2", "peer3"] + cmd: | + {{ .HELM_BIN }} delete --namespace {{ .ITEM.PEER }} agntcy-dir + + - "{{ .KIND_BIN }} delete cluster --name {{ .KIND_CLUSTER_NAME }}" + + ## + ## Test + ## + test:unit: + desc: Run unit tests on codebase + aliases: [test] + env: + GOWORK: off + vars: + EXTRA_ARGS: '{{ .EXTRA_ARGS | default "" }}' + cmds: + - for: { var: GO_MOD_DIR_UNIT_TEST } + cmd: | + echo "Running tests in {{.ITEM}}" + go -C {{.ITEM}} test ./... {{.EXTRA_ARGS}} + + test:unit:coverage: + desc: Run all unit tests with coverage and generate summaries + HTML reports + vars: + COVERAGE_DIR: '{{ .COVERAGE_DIR | default (print .ROOT_DIR "/.coverage/unit") }}' + cmds: + - echo "Removing existing coverage directory and reports" + - rm -rf {{.COVERAGE_DIR}}/* + - echo "Creating new coverage directory" + - mkdir -p {{.COVERAGE_DIR}} + - | + set -euo pipefail + # Build list of modules from GO_MOD_DIR_UNIT_TEST + modules="" + {{range $dir := .GO_MOD_DIR_UNIT_TEST | splitList "\n"}} + {{if $dir}}modules="$modules $(basename {{$dir}})"{{end}} + {{end}} + for m in $modules; do + if [ -d "$m" ]; then + echo "[coverage] Testing module: $m" + ( + cd "$m" + # Run tests with coverprofile (text format) + if go test -covermode=atomic -coverprofile="{{.COVERAGE_DIR}}/$m.out.tmp" ./... -json 2>&1 | tee "{{.COVERAGE_DIR}}/test-report-$m.json" >/dev/null; then + echo "[coverage] Completed: $m" + else + status=$? + echo "[coverage][warn] Tests failed in $m (exit $status); continuing" + fi + ) + # Filter out generated files (matching codecov.yml ignores) + if [ -f "{{.COVERAGE_DIR}}/$m.out.tmp" ]; then + grep -v "\.pb\.go" "{{.COVERAGE_DIR}}/$m.out.tmp" | grep -v "mock_.*\.go" > "{{.COVERAGE_DIR}}/$m.out" || echo "mode: atomic" > "{{.COVERAGE_DIR}}/$m.out" + rm "{{.COVERAGE_DIR}}/$m.out.tmp" + else + echo "[coverage] No coverage generated for $m" + echo "mode: atomic" > "{{.COVERAGE_DIR}}/$m.out" + fi + fi + done + + - | + set -euo pipefail + echo "[coverage] Generating per-module summaries" + : > {{.COVERAGE_DIR}}/summary.txt + # Build list of modules from GO_MOD_DIR_UNIT_TEST + modules="" + {{range $dir := .GO_MOD_DIR_UNIT_TEST | splitList "\n"}} + {{if $dir}}modules="$modules $(basename {{$dir}})"{{end}} + {{end}} + for m in $modules; do + if [ -f "{{.COVERAGE_DIR}}/$m.out" ]; then + if (cd "$m" && go tool cover -func={{.COVERAGE_DIR}}/$m.out > {{.COVERAGE_DIR}}/$m.func.txt 2>/dev/null); then + tail -n1 {{.COVERAGE_DIR}}/$m.func.txt | sed "s/^total:/[$m] total:/" >> {{.COVERAGE_DIR}}/summary.txt || true + else + echo "[$m] total: (error generating summary)" >> {{.COVERAGE_DIR}}/summary.txt + fi + fi + done + echo "[coverage] Summary:"; cat {{.COVERAGE_DIR}}/summary.txt + + - | + set -euo pipefail + echo "[coverage] Generating HTML reports" + # Build list of modules from GO_MOD_DIR_UNIT_TEST + modules="" + {{range $dir := .GO_MOD_DIR_UNIT_TEST | splitList "\n"}} + {{if $dir}}modules="$modules $(basename {{$dir}})"{{end}} + {{end}} + for m in $modules; do + if [ -f "{{.COVERAGE_DIR}}/$m.out" ] && [ -d "$m" ]; then + (cd "$m" && go tool cover -html={{.COVERAGE_DIR}}/$m.out -o {{.COVERAGE_DIR}}/$m.html || true) + fi + done + echo "[coverage] Generated HTML files:"; ls -1 {{.COVERAGE_DIR}}/*.html 2>/dev/null | sed 's|{{.COVERAGE_DIR}}/| - |' || echo " (none)" + + bench: + desc: Run bench tests on codebase + cmds: # run in sequence + - task: server:bench + - echo "Done" + + test:e2e: + desc: Run end-to-end tests for local deployment and network deployment + aliases: [e2e] + cmds: + - task: test:e2e:local + - task: test:e2e:network + + test:e2e:local:cli: + desc: Run only local CLI tests (with dedicated infrastructure) + aliases: [e2e:local:cli] + vars: + PUBLICATION_SCHEDULER_INTERVAL: '{{ .PUBLICATION_SCHEDULER_INTERVAL | default "1s" }}' + RATELIMIT_ENABLED: '{{ .RATELIMIT_ENABLED | default "false" }}' + RATELIMIT_GLOBAL_RPS: '{{ .RATELIMIT_GLOBAL_RPS | default "100" }}' + RATELIMIT_GLOBAL_BURST: '{{ .RATELIMIT_GLOBAL_BURST | default "200" }}' + E2E_COVERAGE_ENABLED: '{{ .E2E_COVERAGE_ENABLED | default "false" }}' + COVERAGE_DIR: '{{ .COVERAGE_DIR | default (print .ROOT_DIR "/.coverage/e2e") }}' + env: + DIRECTORY_E2E_DEPLOYMENT_MODE: "local" + cmds: + - defer: { task: deploy:kubernetes:local:cleanup } + - defer: { task: deploy:kubernetes:local:port-forward:cleanup } + - defer: + task: test:e2e:coverage:extract-pods + vars: + E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" + NAMESPACE: "dir-server" + COVERAGE_DIR: "{{ .COVERAGE_DIR }}" + - task: deploy:kubernetes:local + vars: + DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL: "{{ .PUBLICATION_SCHEDULER_INTERVAL }}" + DIRECTORY_SERVER_RATELIMIT_ENABLED: "{{ .RATELIMIT_ENABLED }}" + DIRECTORY_SERVER_RATELIMIT_GLOBAL_RPS: "{{ .RATELIMIT_GLOBAL_RPS }}" + DIRECTORY_SERVER_RATELIMIT_GLOBAL_BURST: "{{ .RATELIMIT_GLOBAL_BURST }}" + E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" + - task: deploy:kubernetes:local:port-forward + - | + # Run E2E tests with coverage if enabled + if [ "{{.E2E_COVERAGE_ENABLED}}" = "true" ]; then + mkdir -p {{.COVERAGE_DIR}} + go test -C ./e2e/local -covermode=atomic -coverpkg={{ .COVERAGE_PKGS }} -coverprofile={{.COVERAGE_DIR}}/local-cli.out.tmp . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v + # Filter out generated files (matching codecov.yml ignores) + if [ -f {{.COVERAGE_DIR}}/local-cli.out.tmp ]; then + grep -v "\.pb\.go" {{.COVERAGE_DIR}}/local-cli.out.tmp | grep -v "mock_.*\.go" > {{.COVERAGE_DIR}}/local-cli.out || echo "mode: atomic" > {{.COVERAGE_DIR}}/local-cli.out + rm {{.COVERAGE_DIR}}/local-cli.out.tmp + fi + else + go test -C ./e2e/local . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v + fi + + test:e2e:client: + desc: Run only client library tests (with dedicated infrastructure) + aliases: [e2e:client] + vars: + PUBLICATION_SCHEDULER_INTERVAL: '{{ .PUBLICATION_SCHEDULER_INTERVAL | default "1s" }}' + RATELIMIT_ENABLED: '{{ .RATELIMIT_ENABLED | default "true" }}' + RATELIMIT_GLOBAL_RPS: '{{ .RATELIMIT_GLOBAL_RPS | default "100" }}' + RATELIMIT_GLOBAL_BURST: '{{ .RATELIMIT_GLOBAL_BURST | default "200" }}' + E2E_COVERAGE_ENABLED: '{{ .E2E_COVERAGE_ENABLED | default "false" }}' + COVERAGE_DIR: '{{ .COVERAGE_DIR | default (print .ROOT_DIR "/.coverage/e2e") }}' + env: + DIRECTORY_E2E_DEPLOYMENT_MODE: "local" + cmds: + - defer: { task: deploy:kubernetes:local:cleanup } + - defer: { task: deploy:kubernetes:local:port-forward:cleanup } + - defer: + task: test:e2e:coverage:extract-pods + vars: + E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" + NAMESPACE: "dir-server" + COVERAGE_DIR: "{{ .COVERAGE_DIR }}" + - task: deploy:kubernetes:local + vars: + DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL: "{{ .PUBLICATION_SCHEDULER_INTERVAL }}" + DIRECTORY_SERVER_RATELIMIT_ENABLED: "{{ .RATELIMIT_ENABLED }}" + DIRECTORY_SERVER_RATELIMIT_GLOBAL_RPS: "{{ .RATELIMIT_GLOBAL_RPS }}" + DIRECTORY_SERVER_RATELIMIT_GLOBAL_BURST: "{{ .RATELIMIT_GLOBAL_BURST }}" + E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" + - task: deploy:kubernetes:local:port-forward + - | + # Run E2E tests with coverage if enabled + if [ "{{.E2E_COVERAGE_ENABLED}}" = "true" ]; then + mkdir -p {{.COVERAGE_DIR}} + go test -C ./e2e/client -covermode=atomic -coverpkg={{ .COVERAGE_PKGS }} -coverprofile={{.COVERAGE_DIR}}/local-client.out.tmp . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v + # Filter out generated files (matching codecov.yml ignores) + if [ -f {{.COVERAGE_DIR}}/local-client.out.tmp ]; then + grep -v "\.pb\.go" {{.COVERAGE_DIR}}/local-client.out.tmp | grep -v "mock_.*\.go" > {{.COVERAGE_DIR}}/local-client.out || echo "mode: atomic" > {{.COVERAGE_DIR}}/local-client.out + rm {{.COVERAGE_DIR}}/local-client.out.tmp + fi + else + go test -C ./e2e/client . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v + fi + + test:e2e:local: + desc: Run end-to-end tests for local deployment (Client + CLI + Rate limiting tests) + aliases: [e2e:local] + vars: + PUBLICATION_SCHEDULER_INTERVAL: '{{ .PUBLICATION_SCHEDULER_INTERVAL | default "1s" }}' + RATELIMIT_ENABLED: '{{ .RATELIMIT_ENABLED | default "true" }}' + RATELIMIT_GLOBAL_RPS: '{{ .RATELIMIT_GLOBAL_RPS | default "100" }}' + RATELIMIT_GLOBAL_BURST: '{{ .RATELIMIT_GLOBAL_BURST | default "200" }}' + E2E_COVERAGE_ENABLED: '{{ .E2E_COVERAGE_ENABLED | default "false" }}' + COVERAGE_DIR: '{{ .COVERAGE_DIR | default (print .ROOT_DIR "/.coverage/e2e") }}' + env: + DIRECTORY_E2E_DEPLOYMENT_MODE: "local" + cmds: + - defer: { task: deploy:kubernetes:local:cleanup } + - defer: { task: deploy:kubernetes:local:port-forward:cleanup } + - defer: + task: test:e2e:coverage:extract-pods + vars: + E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" + NAMESPACE: "dir-server" + # Bootstrap infrastructure once for all test suites + - task: deploy:kubernetes:local + vars: + DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL: "{{ .PUBLICATION_SCHEDULER_INTERVAL }}" + DIRECTORY_SERVER_RATELIMIT_ENABLED: "{{ .RATELIMIT_ENABLED }}" + DIRECTORY_SERVER_RATELIMIT_GLOBAL_RPS: "{{ .RATELIMIT_GLOBAL_RPS }}" + DIRECTORY_SERVER_RATELIMIT_GLOBAL_BURST: "{{ .RATELIMIT_GLOBAL_BURST }}" + E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" + - task: deploy:kubernetes:local:port-forward + # Run client library tests first (faster feedback) + - | + if [ "{{.E2E_COVERAGE_ENABLED}}" = "true" ]; then + mkdir -p {{.COVERAGE_DIR}} + go test -C ./e2e/client -covermode=atomic -coverpkg={{ .COVERAGE_PKGS }} -coverprofile={{.COVERAGE_DIR}}/local-client.out.tmp . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v -ginkgo.label-filter="!ratelimit" + # Filter out generated files (matching codecov.yml ignores) + if [ -f {{.COVERAGE_DIR}}/local-client.out.tmp ]; then + grep -v "\.pb\.go" {{.COVERAGE_DIR}}/local-client.out.tmp | grep -v "mock_.*\.go" > {{.COVERAGE_DIR}}/local-client.out || echo "mode: atomic" > {{.COVERAGE_DIR}}/local-client.out + rm {{.COVERAGE_DIR}}/local-client.out.tmp + fi + else + go test -C ./e2e/client . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v -ginkgo.label-filter="!ratelimit" + fi + # Run local CLI tests second (same infrastructure) + - | + if [ "{{.E2E_COVERAGE_ENABLED}}" = "true" ]; then + mkdir -p {{.COVERAGE_DIR}} + go test -C ./e2e/local -covermode=atomic -coverpkg={{ .COVERAGE_PKGS }} -coverprofile={{.COVERAGE_DIR}}/local-cli.out.tmp . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v + # Filter out generated files (matching codecov.yml ignores) + if [ -f {{.COVERAGE_DIR}}/local-cli.out.tmp ]; then + grep -v "\.pb\.go" {{.COVERAGE_DIR}}/local-cli.out.tmp | grep -v "mock_.*\.go" > {{.COVERAGE_DIR}}/local-cli.out || echo "mode: atomic" > {{.COVERAGE_DIR}}/local-cli.out + rm {{.COVERAGE_DIR}}/local-cli.out.tmp + fi + else + go test -C ./e2e/local . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v + fi + # Wait 3 seconds for rate limit reset + - sleep 3 + # Run rate limiting tests LAST + - | + if [ "{{.E2E_COVERAGE_ENABLED}}" = "true" ]; then + mkdir -p {{.COVERAGE_DIR}} + go test -C ./e2e/client -coverpkg={{ .COVERAGE_PKGS }} -coverprofile={{.COVERAGE_DIR}}/local-client.out . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v -ginkgo.label-filter="ratelimit" + else + go test -C ./e2e/client . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v -ginkgo.label-filter="ratelimit" + fi + + test:e2e:network: + desc: Run end-to-end tests for network deployment (Multi-peer CLI tests) + aliases: [e2e:network] + vars: + PUBLICATION_SCHEDULER_INTERVAL: '{{ .PUBLICATION_SCHEDULER_INTERVAL | default "1s" }}' + E2E_COVERAGE_ENABLED: '{{ .E2E_COVERAGE_ENABLED | default "false" }}' + COVERAGE_DIR: '{{ .COVERAGE_DIR | default (print .ROOT_DIR "/.coverage/e2e") }}' + env: + DIRECTORY_E2E_DEPLOYMENT_MODE: "network" + cmds: + - defer: { task: deploy:kubernetes:network:cleanup } + - defer: { task: deploy:kubernetes:network:port-forward:cleanup } + # Extract coverage from all network namespaces + - defer: + task: test:e2e:coverage:extract-pods + vars: + E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" + NAMESPACE: "bootstrap" + - defer: + task: test:e2e:coverage:extract-pods + vars: + E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" + NAMESPACE: "peer1" + - defer: + task: test:e2e:coverage:extract-pods + vars: + E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" + NAMESPACE: "peer2" + - defer: + task: test:e2e:coverage:extract-pods + vars: + E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" + NAMESPACE: "peer3" + # Bootstrap + # NOTE: Run as a dedicated task instead of dependency, otherwise the port forwarding won't work + - task: deploy:kubernetes:network + vars: + PUBLICATION_SCHEDULER_INTERVAL: "{{ .PUBLICATION_SCHEDULER_INTERVAL }}" + E2E_COVERAGE_ENABLED: "{{ .E2E_COVERAGE_ENABLED }}" + - task: deploy:kubernetes:network:port-forward + # Run network tests with proper isolation and cleanup + - | + if [ "{{.E2E_COVERAGE_ENABLED}}" = "true" ]; then + mkdir -p {{.COVERAGE_DIR}} + go test -C ./e2e/network -covermode=atomic -coverpkg={{ .COVERAGE_PKGS }} -coverprofile={{.COVERAGE_DIR}}/network.out.tmp . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v + # Filter out generated files (matching codecov.yml ignores) + if [ -f {{.COVERAGE_DIR}}/network.out.tmp ]; then + grep -v "\.pb\.go" {{.COVERAGE_DIR}}/network.out.tmp | grep -v "mock_.*\.go" > {{.COVERAGE_DIR}}/network.out || echo "mode: atomic" > {{.COVERAGE_DIR}}/network.out + rm {{.COVERAGE_DIR}}/network.out.tmp + fi + else + go test -C ./e2e/network . -v -failfast -test.v -test.paniconexit0 -ginkgo.timeout 2h -timeout 2h -ginkgo.v + fi + + test:e2e:spire: + desc: Run end-to-end tests for SPIRE deployment + cmds: + # Run SPIRE deployment + - defer: { task: test:spire:cleanup } + - task: test:spire + # Run SDK tests + - task: sdk:deps:javascript + - task: sdk:test-env:spiffe:load-test-image + - task: sdk:test:all:spiffe + + # TODO: move spire out from here + # TODO: change to Ingress services instead of LoadBalancer + test:spire: + desc: Test SPIRE federation setup between DIR and DIRCTL clusters + deps: + - task: helm:gen + vars: + AUTH_MODE: '{{ .AUTH_MODE | default "x509" }}' # or "jwt" + DIR_TRUST_DOMAIN: '{{ .DIR_TRUST_DOMAIN | default "dir.example" }}' + DIRCTL_TRUST_DOMAIN: '{{ .DIRCTL_TRUST_DOMAIN | default "dirctl.example" }}' + DIR_DNS_NAME_TEMPLATE: '{{ .DIR_DNS_NAME_TEMPLATE | default "127.0.0.1.nip.io" }}' + cmds: + # Setup DIR cluster + - task: deploy:kubernetes:setup-cluster + vars: + KIND_CLUSTER_NAME: "{{ .DIR_TRUST_DOMAIN }}" + + # Start cloud provider for LoadBalancer support + - | + echo "Starting Kind cloud provider for LoadBalancer support..." + if [[ {{OS}} == "darwin" ]]; then + sudo go run sigs.k8s.io/cloud-provider-kind@latest > /dev/null 2>&1 & + elif [[ {{OS}} == "linux" ]]; then + go run sigs.k8s.io/cloud-provider-kind@latest > /dev/null 2>&1 & + else + echo "Unknown OS" + exit 1 + fi + echo "Cloud provider started in background" + + # Deploy SPIRE on DIR cluster + - task: deploy:kubernetes:spire + vars: + TRUST_DOMAIN: "{{ .DIR_TRUST_DOMAIN }}" + BUNDLE_PATH: /tmp/{{ .DIR_TRUST_DOMAIN }}.spiffe + + # Setup DIRCTL cluster + - task: deploy:kubernetes:setup-cluster + vars: + KIND_CLUSTER_NAME: "{{ .DIRCTL_TRUST_DOMAIN }}" + + # Start cloud provider for LoadBalancer support + - | + echo "Starting Kind cloud provider for LoadBalancer support..." + go run sigs.k8s.io/cloud-provider-kind@latest > /dev/null 2>&1 & + echo "Cloud provider started in background" + + # Deploy SPIRE on DIRCTL cluster + - task: deploy:kubernetes:spire + vars: + TRUST_DOMAIN: "{{ .DIRCTL_TRUST_DOMAIN }}" + BUNDLE_PATH: /tmp/{{ .DIRCTL_TRUST_DOMAIN }}.spiffe + + # Get DIR cluster service addresses + - task: deploy:kubernetes:context + vars: + KIND_CLUSTER_NAME: "{{ .DIR_TRUST_DOMAIN }}" + - | + echo "Getting DIR cluster service addresses..." + DIR_BUNDLE_IP=$(kubectl get service -n spire spire-server -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 2>/dev/null || echo "127.0.0.1") + echo "DIR_BUNDLE_ADDRESS=${DIR_BUNDLE_IP}:8443" >> /tmp/spire-addresses.env + + # Get DIRCTL cluster service addresses + - task: deploy:kubernetes:context + vars: + KIND_CLUSTER_NAME: "{{ .DIRCTL_TRUST_DOMAIN }}" + - | + echo "Getting DIRCTL cluster service addresses..." + DIRCTL_BUNDLE_IP=$(kubectl get service -n spire spire-server -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 2>/dev/null || echo "127.0.0.1") + echo "DIRCTL_BUNDLE_ADDRESS=${DIRCTL_BUNDLE_IP}:8443" >> /tmp/spire-addresses.env + + # Create DIR server federation config + - | + source /tmp/spire-addresses.env + echo "Creating DIR server federation config..." + cat > /tmp/server-federation.yaml << EOF + apiserver: + service: + type: LoadBalancer + config: + authn: + enabled: true + mode: "{{ .AUTH_MODE }}" + audiences: + - "spiffe://{{ .DIR_TRUST_DOMAIN }}/spire/server" + authz: + enabled: true + trust_domain: {{ .DIR_TRUST_DOMAIN }} + extraEnv: + - name: DIRECTORY_SERVER_OASF_API_VALIDATION_DISABLE + value: "true" + spire: + enabled: true + trustDomain: {{ .DIR_TRUST_DOMAIN }} + className: dir-spire + dnsNameTemplates: + - "{{ .DIR_DNS_NAME_TEMPLATE }}" + federation: + - trustDomain: {{ .DIRCTL_TRUST_DOMAIN }} + bundleEndpointURL: https://${DIRCTL_BUNDLE_ADDRESS} + bundleEndpointProfile: + type: https_spiffe + endpointSPIFFEID: spiffe://{{ .DIRCTL_TRUST_DOMAIN }}/spire/server + trustDomainBundle: |- + $(cat /tmp/{{ .DIRCTL_TRUST_DOMAIN }}.spiffe) + EOF + + # Deploy DIR server with federation + - task: deploy:kubernetes:context + vars: + KIND_CLUSTER_NAME: "{{ .DIR_TRUST_DOMAIN }}" + - task: deploy:kubernetes:dir + vars: + KIND_CLUSTER_NAME: "{{ .DIR_TRUST_DOMAIN }}" + HELM_EXTRA_ARGS: "-f /tmp/server-federation.yaml" + DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL: 1s + + # Get DIR API server address + - task: deploy:kubernetes:context + vars: + KIND_CLUSTER_NAME: "{{ .DIR_TRUST_DOMAIN }}" + - | + echo "Getting DIR API server address..." + DIR_API_IP=$(kubectl get service -n dir-server dir-apiserver -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 2>/dev/null || echo "127.0.0.1") + echo "DIR_SERVER_ADDRESS=${DIR_API_IP}:8888" >> /tmp/spire-addresses.env + + # Create DIRCTL client federation config (for dirctl in dir.example) + - | + source /tmp/spire-addresses.env + echo "Creating DIRCTL client federation config for dir.example..." + cat > /tmp/client-federation-dir-example.yaml << EOF + env: + - name: DIRECTORY_CLIENT_SERVER_ADDRESS + value: ${DIR_SERVER_ADDRESS} + - name: DIRECTORY_CLIENT_AUTH_MODE + value: "{{ .AUTH_MODE }}" + - name: DIRECTORY_CLIENT_JWT_AUDIENCE + value: "spiffe://{{ .DIR_TRUST_DOMAIN }}/spire/server" + spire: + enabled: true + trustDomain: {{ .DIR_TRUST_DOMAIN }} + className: dir-spire + EOF + + # Deploy DIRCTL client in dir.example trust domain (same cluster as dir) + - task: deploy:kubernetes:context + vars: + KIND_CLUSTER_NAME: "{{ .DIR_TRUST_DOMAIN }}" + - task: deploy:kubernetes:dirctl + vars: + KIND_CLUSTER_NAME: "{{ .DIR_TRUST_DOMAIN }}" + HELM_NAMESPACE: "dir-client" + HELM_EXTRA_ARGS: "-f /tmp/client-federation-dir-example.yaml" + + # Create DIRCTL client federation config (for dirctl.example cluster) + - | + source /tmp/spire-addresses.env + echo "Creating DIRCTL client federation config..." + cat > /tmp/client-federation.yaml << EOF + env: + - name: DIRECTORY_CLIENT_SERVER_ADDRESS + value: ${DIR_SERVER_ADDRESS} + - name: DIRECTORY_CLIENT_AUTH_MODE + value: "{{ .AUTH_MODE }}" + - name: DIRECTORY_CLIENT_JWT_AUDIENCE + value: "spiffe://{{ .DIR_TRUST_DOMAIN }}/spire/server" + spire: + enabled: true + trustDomain: {{ .DIRCTL_TRUST_DOMAIN }} + className: dir-spire + federation: + - trustDomain: {{ .DIR_TRUST_DOMAIN }} + bundleEndpointURL: https://${DIR_BUNDLE_ADDRESS} + bundleEndpointProfile: + type: https_spiffe + endpointSPIFFEID: spiffe://{{ .DIR_TRUST_DOMAIN }}/spire/server + trustDomainBundle: |- + $(cat /tmp/{{ .DIR_TRUST_DOMAIN }}.spiffe) + EOF + + # Deploy DIRCTL client with federation (in dirctl.example cluster) + - task: deploy:kubernetes:context + vars: + KIND_CLUSTER_NAME: "{{ .DIRCTL_TRUST_DOMAIN }}" + - task: deploy:kubernetes:dirctl + vars: + KIND_CLUSTER_NAME: "{{ .DIRCTL_TRUST_DOMAIN }}" + HELM_EXTRA_ARGS: "-f /tmp/client-federation.yaml" + + # Display completion status + - | + source /tmp/spire-addresses.env + echo "==============================================" + echo "SPIRE federation test deployment complete!" + echo "==============================================" + echo "DIR Server: ${DIR_SERVER_ADDRESS}" + echo "DIR Bundle: ${DIR_BUNDLE_ADDRESS}" + echo "DIRCTL Bundle: ${DIRCTL_BUNDLE_ADDRESS}" + echo "" + echo "Trust domains setup:" + echo " DIR: {{ .DIR_TRUST_DOMAIN }}" + echo " DIRCTL Internal: {{ .DIR_TRUST_DOMAIN }}" + echo " DIRCTL External: {{ .DIRCTL_TRUST_DOMAIN }}" + echo "" + echo "To verify deployment:" + echo " kubectl --context kind-{{ .DIR_TRUST_DOMAIN }} get pods -n dir-server" + echo " kubectl --context kind-{{ .DIR_TRUST_DOMAIN }} logs -n dir-client -l app.kubernetes.io/name=dirctl" + echo " kubectl --context kind-{{ .DIRCTL_TRUST_DOMAIN }} logs -n dir-client -l app.kubernetes.io/name=dirctl" + echo "" + echo "To cleanup:" + echo " task test:spire:cleanup" + + test:spire:cleanup: + desc: Cleanup SPIRE federation test clusters + vars: + DIR_TRUST_DOMAIN: '{{ .DIR_TRUST_DOMAIN | default "dir.example" }}' + DIRCTL_TRUST_DOMAIN: '{{ .DIRCTL_TRUST_DOMAIN | default "dirctl.example" }}' + cmds: + - echo "Cleaning up DIR cluster ({{ .DIR_TRUST_DOMAIN }})..." + - task: deploy:kubernetes:cleanup + vars: + KIND_CLUSTER_NAME: "{{ .DIR_TRUST_DOMAIN }}" + - echo "Cleaning up DIRCTL cluster ({{ .DIRCTL_TRUST_DOMAIN }})..." + - task: deploy:kubernetes:cleanup + vars: + KIND_CLUSTER_NAME: "{{ .DIRCTL_TRUST_DOMAIN }}" + - echo "Cleanup complete!" + + test:e2e:coverage: + desc: Run end-to-end tests with coverage + aliases: [e2e:coverage] + deps: + - build:coverage + vars: + COVERAGE_DIR: '{{ .COVERAGE_DIR | default (print .ROOT_DIR "/.coverage/e2e") }}' + cmds: + - echo "Removing existing coverage directory and reports" + - rm -rf {{.COVERAGE_DIR}}/* + - echo "Creating new coverage directory" + - mkdir -p {{.COVERAGE_DIR}} + - E2E_COVERAGE_ENABLED=true task test:e2e + - task: test:e2e:coverage:process + vars: + COVERAGE_DIR: "{{ .COVERAGE_DIR }}" + + test:e2e:coverage:extract-pods: + desc: Extract coverage data from Kubernetes pods + internal: true + vars: + E2E_COVERAGE_ENABLED: '{{ .E2E_COVERAGE_ENABLED | default "false" }}' + NAMESPACE: '{{ .NAMESPACE | default "dir-server" }}' + POD_SELECTOR: '{{ .POD_SELECTOR | default "app.kubernetes.io/name=apiserver" }}' + COVERAGE_DIR: '{{ .COVERAGE_DIR | default (print .ROOT_DIR "/.coverage/e2e/pods") }}' + cmds: + - | + if [ "{{.E2E_COVERAGE_ENABLED}}" = "true" ]; then + echo "[coverage] Triggering graceful shutdown of pod to flush coverage data..." + + # Get pod matching the selector + POD_NAME=$({{.KUBECTL_BIN}} get pods -n {{.NAMESPACE}} -l {{.POD_SELECTOR}} -o jsonpath='{.items[*].metadata.name}') + + if [ -n "$POD_NAME" ]; then + # Send SIGTERM to PID 1 to trigger coverage flush (like the PoC) + echo "[coverage] Sending SIGTERM to PID 1 in pod: $POD_NAME" + {{.KUBECTL_BIN}} exec -n {{.NAMESPACE}} $POD_NAME -- sh -c 'kill -TERM 1' 2>/dev/null || true + + # Wait for container to restart (coverage files persist in emptyDir volume) + echo "[coverage] Waiting for container to restart and coverage to be flushed..." + sleep 5 + + # Extract coverage files (emptyDir persists across container restarts) + echo "[coverage] Extracting coverage data from pod: $POD_NAME" + mkdir -p {{.COVERAGE_DIR}}/pod-$POD_NAME + {{.KUBECTL_BIN}} cp -n {{.NAMESPACE}} $POD_NAME:/tmp/coverage {{.COVERAGE_DIR}}/pod-$POD_NAME 2>&1 || echo "[coverage][warn] Failed to extract from $POD_NAME" + else + echo "[coverage][warn] No pod found matching selector {{.POD_SELECTOR}}" + fi + echo "[coverage] Coverage data extracted to {{.COVERAGE_DIR}}" + else + echo "[coverage] E2E_COVERAGE_ENABLED is false, skipping pod coverage extraction" + fi + + test:e2e:coverage:process: + desc: Collect and merge E2E coverage data from pods + vars: + COVERAGE_DIR: '{{ .COVERAGE_DIR | default (print .ROOT_DIR "/.coverage/e2e") }}' + cmds: + - | + echo "[e2e-coverage] Collecting and merging E2E coverage data from pods" + if [ -d "{{.COVERAGE_DIR}}/pods" ]; then + # Build comma-separated list of pod directories + pod_dirs=$(find {{.COVERAGE_DIR}}/pods -mindepth 1 -maxdepth 1 -type d 2>/dev/null | tr '\n' ',' | sed 's/,$//') + + if [ -n "$pod_dirs" ]; then + echo " Found pod coverage directories" + mkdir -p {{.COVERAGE_DIR}}/server-binary + go tool covdata merge -i="$pod_dirs" -o={{.COVERAGE_DIR}}/server-binary + go tool covdata textfmt -i={{.COVERAGE_DIR}}/server-binary -o={{.COVERAGE_DIR}}/server.out.tmp + # Filter out generated files (matching codecov.yml ignores) + if [ -f {{.COVERAGE_DIR}}/server.out.tmp ]; then + grep -v "\.pb\.go" {{.COVERAGE_DIR}}/server.out.tmp | grep -v "mock_.*\.go" > {{.COVERAGE_DIR}}/server.out || echo "mode: atomic" > {{.COVERAGE_DIR}}/server.out + rm {{.COVERAGE_DIR}}/server.out.tmp + fi + else + echo "No pod coverage found" + fi + else + echo "No pods directory found" + fi + + ## + ## Linters + ## + lint:go: + desc: Run Golang linters + deps: + - task: deps:golangci-lint + vars: + FIX: '{{ .FIX | default "false" }}' + FIX_FLAG: '{{if eq .FIX "true"}}--fix{{end}}' + cmds: + - for: { var: GO_MOD_DIR } + cmd: | + echo "Running golangci-lint in {{.ITEM}}" + cd {{.ITEM}} + {{.GOLANGCI_LINT_BIN}} run --config {{.ROOT_DIR}}/.golangci.yml {{.FIX_FLAG}} + + lint:buf: + desc: Run Buf linters + deps: + - task: deps:protoc + - task: deps:bufbuild + dir: ./proto + cmds: + - "{{.BUFBUILD_BIN}} lint" + + lint:helm: + desc: Run Helm linters + deps: + - task: deps:helm + vars: + HELM_CHARTS: + sh: find ./install/charts -maxdepth 1 -mindepth 1 -type d -exec basename {} \; + cmds: + - for: { var: HELM_CHARTS } + cmd: | + echo "Running helm lint on {{.ITEM}}" + {{.HELM_BIN}} dependency update ./install/charts/{{.ITEM}} + {{.HELM_BIN}} lint ./install/charts/{{.ITEM}} --with-subcharts + + lint: + desc: Run all linters + deps: + - lint:go + - lint:buf + - lint:helm + + ## + ## License + ## + license: + desc: Check licenses + deps: + - task: deps:licensei + cmds: + - for: { var: GO_MOD_DIR } + cmd: echo "Running licensei in {{.ITEM}}" && cd {{.ITEM}} && {{ .LICENSEI_BIN }} check --config {{.ROOT_DIR}}/.licensei.toml + + license:cache: + desc: Check licenses + deps: + - task: deps:licensei + cmds: + - for: { var: GO_MOD_DIR } + cmd: echo "Running licensei in {{.ITEM}}" && cd {{.ITEM}} && {{ .LICENSEI_BIN }} cache --config {{.ROOT_DIR}}/.licensei.toml + + ## + ## Various proof-of-concept tasks + ## + poc:integration: + desc: Run integration against VS Code and Continue proof-of-concept. + dir: ./docs/research/integrations + prompt: + - | + Are you sure you want to run integration proof-of-concept? + This will overwrite your local workspace VSCode and Continue configuration. + vars: + RECORD_FILE: '{{ .RECORD_FILE | default "docs/research/integrations/demo.record.json" }}' + cmd: | + # Prepare Python environment + python3 -m venv venv + . ./venv/bin/activate + python3 -m pip install pyyaml + + # Run script + python3 ./importer.py \ + -record={{.ROOT_DIR}}/{{.RECORD_FILE}} \ + -vscode_path={{.ROOT_DIR}}/.vscode \ + -continue_path={{.ROOT_DIR}}/.continue/assistants + + # Print env requirements + cat .env.example + + poc:mcp-to-oasf: + desc: Import MCP-to-OASF Exporter Agent into the current workspace. + cmds: + - task: poc:integration + vars: + RECORD_FILE: "docs/research/integrations/mcp-to-oasf-agent/extractor.record.json" + + ## + ## Dependencies + ## + deps: + desc: Install dependencies + cmds: + - task: deps:helm + - task: deps:kubectl + - task: deps:kind + - task: deps:protoc + - task: deps:bufbuild + - task: deps:uv + - task: deps:cosign + + deps:bin-dir: + desc: Create bin directory + internal: true + run: once + cmd: mkdir -p {{.BIN_DIR}} + status: + - test -d {{.BIN_DIR}} + + deps:dirctl-bin: + desc: Compile dirctl binary + internal: true + run: once + cmds: + - task: cli:compile + status: + - test -f {{.BIN_DIR}}/dirctl + + deps:helm: + desc: Ensure supported Helm version is installed + internal: true + deps: + - deps:bin-dir + preconditions: + - which curl + - which tar + cmds: + - cmd: echo "Downloading Helm v{{.HELM_VERSION}}..." + - cmd: curl -sSfL 'https://get.helm.sh/helm-v{{.HELM_VERSION}}-{{OS}}-{{ARCH}}.tar.gz' --output - | tar xzvOf - '{{OS}}-{{ARCH}}/helm' > {{.HELM_BIN}} + - cmd: chmod +x {{.HELM_BIN}} + status: + - test -x {{.HELM_BIN}} + + deps:kubectl: + desc: Ensure supported kubectl version is installed + internal: true + deps: + - deps:bin-dir + preconditions: + - which curl + cmds: + - cmd: echo "Downloading Kubectl v{{.KUBECTL_VERSION}}..." + - cmd: curl -L "https://dl.k8s.io/release/v{{.KUBECTL_VERSION}}/bin/{{OS}}/{{ARCH}}/kubectl" -o {{.KUBECTL_BIN}} + - cmd: chmod +x {{.KUBECTL_BIN}} + status: + - test -x {{.KUBECTL_BIN}} + + deps:kind: + desc: Ensure supported kind version is installed + internal: true + deps: + - deps:bin-dir + preconditions: + - which go + cmds: + - cmd: echo "Downloading Kind v{{.KIND_VERSION}}..." + - cmd: GOBIN={{.BIN_DIR}} go install sigs.k8s.io/kind@v{{.KIND_VERSION}} + - cmd: mv {{.BIN_DIR}}/kind {{.KIND_BIN}} + status: + - test -x {{.KIND_BIN}} + + deps:protoc: + desc: Ensure supported Protoc version and plugins are installed + internal: true + deps: + - deps:bin-dir + preconditions: + - which go + - which curl + - which unzip + vars: + ARCH_TYPE: '{{ if eq ARCH "arm64" }}aarch_64{{ else if eq ARCH "amd64" }}x86_64{{else if eq ARCH "s390x"}}x390_64{{ else }}{{ARCH}}{{ end }}' + OS_VARIANT: '{{ if eq OS "darwin" }}osx-universal_binary{{ else if eq OS "windows" }}win64{{else}}linux-{{.ARCH_TYPE}}{{ end }}' + cmds: + - cmd: echo "Downloading Protoc v{{.PROTOC_VERSION}}..." + - cmd: | + curl -sL https://github.com/protocolbuffers/protobuf/releases/download/v{{.PROTOC_VERSION}}/protoc-{{.PROTOC_VERSION}}-{{.OS_VARIANT}}.zip -o {{.BIN_DIR}}/tmp.zip + unzip -j {{.BIN_DIR}}/tmp.zip "bin/protoc" -d {{.BIN_DIR}} + mv {{.BIN_DIR}}/protoc {{.PROTOC_BIN}} + rm {{.BIN_DIR}}/tmp.zip + - cmd: chmod +x {{.PROTOC_BIN}} + - cmd: echo "Downloading go plugins for protoc..." + - cmd: go install google.golang.org/protobuf/cmd/protoc-gen-go@latest + - cmd: go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest + - cmd: go install github.com/NathanBaulch/protoc-gen-cobra@latest + status: + - test -x {{.PROTOC_BIN}} + + deps:bufbuild: + desc: Ensure supported bufbuild version is installed + internal: true + deps: + - deps:bin-dir + preconditions: + - which curl + vars: + ARCH_TYPE: '{{ if eq ARCH "amd64" }}x86_64{{ else }}{{ARCH}}{{ end }}' + cmds: + - cmd: echo "Downloading BufBuild v{{.BUFBUILD_VERSION}}..." + - cmd: | + curl -L "https://github.com/bufbuild/buf/releases/download/v{{.BUFBUILD_VERSION}}/buf-{{OS}}-{{.ARCH_TYPE}}" -o {{.BUFBUILD_BIN}} + - cmd: chmod +x {{.BUFBUILD_BIN}} + status: + - test -x {{.BUFBUILD_BIN}} + + deps:tidy: + desc: Ensure dependencies are up-to-date + cmds: + - for: { var: GO_MOD_DIR } + cmd: go -C {{.ITEM}} mod tidy -go={{.GO_VERSION}} + + deps:multimod-bin: + desc: Build the multimod binary + internal: true + deps: + - deps:bin-dir + vars: + MULTIMOD_REPO_DIR: "{{ .BIN_DIR }}/opentelemetry-go-build-tools" + cmds: + - git clone https://github.com/open-telemetry/opentelemetry-go-build-tools --branch multimod/v{{.MULTIMOD_VERSION}} {{.MULTIMOD_REPO_DIR}} + - go build -C {{.MULTIMOD_REPO_DIR}}/multimod -o {{.MULTIMOD_BIN}} main.go + - rm -rf {{.MULTIMOD_REPO_DIR}} + status: + - test -x {{.MULTIMOD_BIN}} + + deps:golangci-lint: + desc: Install golangci-lint + internal: true + deps: + - deps:bin-dir + cmds: + - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s v{{.GOLANGCI_LINT_VERSION}} + - mv {{.BIN_DIR}}/golangci-lint {{.GOLANGCI_LINT_BIN}} + - chmod +x {{.GOLANGCI_LINT_BIN}} + status: + - test -x {{.GOLANGCI_LINT_BIN}} + + deps:licensei: + desc: Install licensei + internal: true + deps: + - deps:bin-dir + cmds: + - curl -sfL https://raw.githubusercontent.com/goph/licensei/master/install.sh | bash -s v{{.LICENSEI_VERSION}} + - mv {{.BIN_DIR}}/licensei {{.LICENSEI_BIN}} + - chmod +x {{.LICENSEI_BIN}} + status: + - test -x {{.LICENSEI_BIN}} + + deps:uv: + desc: Install uv + internal: true + deps: + - deps:bin-dir + env: + UV_INSTALL_DIR: "{{ .BIN_DIR }}" + cmds: + - curl -sfL https://astral.sh/uv/{{.UV_VERSION}}/install.sh | sh + - mv {{.BIN_DIR}}/uv {{.UV_BIN}} + - chmod +x {{.UV_BIN}} + - rm {{.BIN_DIR}}/uvx + status: + - test -x {{.BIN_DIR}}/uv + + deps:cosign: + desc: Install sigstore cosign + dir: "{{ .BIN_DIR }}" + internal: true + cmds: + - curl -sfL https://github.com/sigstore/cosign/releases/download/v{{.COSIGN_VERSION}}/cosign-{{OS}}-{{ARCH}} -o cosign-{{.COSIGN_VERSION}} + - chmod +x cosign-{{.COSIGN_VERSION}} + status: + - test -x cosign-{{.COSIGN_VERSION}} + + deps:htpasswd: + desc: Install htpasswd + dir: "{{ .BIN_DIR }}" + internal: true + cmds: + - npm install -g htpasswd + + ## + ## Helm + ## + helm:gen: + desc: Update Helm dependencies for chart and subcharts + internal: true + deps: + - deps:helm + vars: + HELM_ALL_CHART_PATHS: + sh: find . -name Chart.yaml -exec dirname {} \; + cmds: + # Add Helm repo + - "{{ .HELM_BIN }} repo add project-zot http://zotregistry.dev/helm-charts" + - "{{ .HELM_BIN }} repo add spiffe https://spiffe.github.io/helm-charts-hardened" + + # Update dependencies + - for: { var: HELM_ALL_CHART_PATHS } + cmd: "cd {{ .ITEM }} && {{ .HELM_BIN }} dependency update" diff --git a/api/LICENSE.md b/api/LICENSE.md index d9a10c0d8..7cd40e552 100644 --- a/api/LICENSE.md +++ b/api/LICENSE.md @@ -1,176 +1,176 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/api/core/v1/cid.go b/api/core/v1/cid.go index c454f5995..1a1448741 100644 --- a/api/core/v1/cid.go +++ b/api/core/v1/cid.go @@ -1,96 +1,96 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package v1 - -import ( - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - - cid "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" - ocidigest "github.com/opencontainers/go-digest" -) - -// ConvertDigestToCID converts an OCI digest to a CID string. -// Uses the same CID parameters as the original Record.GetCid(): CIDv1, codec 1, SHA2-256. -func ConvertDigestToCID(digest ocidigest.Digest) (string, error) { - // Validate digest - if err := digest.Validate(); err != nil { - return "", fmt.Errorf("invalid digest format: %s", digest) - } - - if digest.Algorithm() != ocidigest.SHA256 { - return "", fmt.Errorf("unsupported digest algorithm %s, only SHA256 is supported", digest.Algorithm()) - } - - // Extract the hex-encoded hash from the OCI digest - hashHex := digest.Hex() - - // Convert hex string to bytes - hashBytes, err := hex.DecodeString(hashHex) - if err != nil { - return "", fmt.Errorf("failed to decode digest hash from hex %s: %w", hashHex, err) - } - - // Create multihash from the digest bytes - mhash, err := mh.Encode(hashBytes, mh.SHA2_256) - if err != nil { - return "", fmt.Errorf("failed to create multihash: %w", err) - } - - // Create CID with same parameters as original Record.GetCid() - cidVal := cid.NewCidV1(1, mhash) // Version 1, codec 1, with our multihash - - return cidVal.String(), nil -} - -// ConvertCIDToDigest converts a CID string to an OCI digest. -// This is the reverse of ConvertDigestToCID. -func ConvertCIDToDigest(cidString string) (ocidigest.Digest, error) { - // Decode the CID - c, err := cid.Decode(cidString) - if err != nil { - return "", fmt.Errorf("failed to decode CID %s: %w", cidString, err) - } - - // Extract multihash bytes - mhBytes := c.Hash() - - // Decode the multihash - decoded, err := mh.Decode(mhBytes) - if err != nil { - return "", fmt.Errorf("failed to decode multihash from CID %s: %w", cidString, err) - } - - // Validate it's SHA2-256 - if decoded.Code != uint64(mh.SHA2_256) { - return "", fmt.Errorf("unsupported hash type %d in CID %s, only SHA2-256 is supported", decoded.Code, cidString) - } - - // Create OCI digest from the hash bytes - return ocidigest.NewDigestFromBytes(ocidigest.SHA256, decoded.Digest), nil -} - -// CalculateDigest calculates a SHA2-256 digest from raw bytes. -// This is used as a fallback when oras.PushBytes is not available. -func CalculateDigest(data []byte) (ocidigest.Digest, error) { - if len(data) == 0 { - return "", errors.New("cannot calculate digest of empty data") - } - - // Calculate SHA2-256 hash - hash := sha256.Sum256(data) - - // Create OCI digest - return ocidigest.NewDigestFromBytes(ocidigest.SHA256, hash[:]), nil -} - -// IsValidCID validates a CID string. -func IsValidCID(cidString string) bool { - _, err := cid.Decode(cidString) - - return err == nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package v1 + +import ( + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + + cid "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + ocidigest "github.com/opencontainers/go-digest" +) + +// ConvertDigestToCID converts an OCI digest to a CID string. +// Uses the same CID parameters as the original Record.GetCid(): CIDv1, codec 1, SHA2-256. +func ConvertDigestToCID(digest ocidigest.Digest) (string, error) { + // Validate digest + if err := digest.Validate(); err != nil { + return "", fmt.Errorf("invalid digest format: %s", digest) + } + + if digest.Algorithm() != ocidigest.SHA256 { + return "", fmt.Errorf("unsupported digest algorithm %s, only SHA256 is supported", digest.Algorithm()) + } + + // Extract the hex-encoded hash from the OCI digest + hashHex := digest.Hex() + + // Convert hex string to bytes + hashBytes, err := hex.DecodeString(hashHex) + if err != nil { + return "", fmt.Errorf("failed to decode digest hash from hex %s: %w", hashHex, err) + } + + // Create multihash from the digest bytes + mhash, err := mh.Encode(hashBytes, mh.SHA2_256) + if err != nil { + return "", fmt.Errorf("failed to create multihash: %w", err) + } + + // Create CID with same parameters as original Record.GetCid() + cidVal := cid.NewCidV1(1, mhash) // Version 1, codec 1, with our multihash + + return cidVal.String(), nil +} + +// ConvertCIDToDigest converts a CID string to an OCI digest. +// This is the reverse of ConvertDigestToCID. +func ConvertCIDToDigest(cidString string) (ocidigest.Digest, error) { + // Decode the CID + c, err := cid.Decode(cidString) + if err != nil { + return "", fmt.Errorf("failed to decode CID %s: %w", cidString, err) + } + + // Extract multihash bytes + mhBytes := c.Hash() + + // Decode the multihash + decoded, err := mh.Decode(mhBytes) + if err != nil { + return "", fmt.Errorf("failed to decode multihash from CID %s: %w", cidString, err) + } + + // Validate it's SHA2-256 + if decoded.Code != uint64(mh.SHA2_256) { + return "", fmt.Errorf("unsupported hash type %d in CID %s, only SHA2-256 is supported", decoded.Code, cidString) + } + + // Create OCI digest from the hash bytes + return ocidigest.NewDigestFromBytes(ocidigest.SHA256, decoded.Digest), nil +} + +// CalculateDigest calculates a SHA2-256 digest from raw bytes. +// This is used as a fallback when oras.PushBytes is not available. +func CalculateDigest(data []byte) (ocidigest.Digest, error) { + if len(data) == 0 { + return "", errors.New("cannot calculate digest of empty data") + } + + // Calculate SHA2-256 hash + hash := sha256.Sum256(data) + + // Create OCI digest + return ocidigest.NewDigestFromBytes(ocidigest.SHA256, hash[:]), nil +} + +// IsValidCID validates a CID string. +func IsValidCID(cidString string) bool { + _, err := cid.Decode(cidString) + + return err == nil +} diff --git a/api/core/v1/cid_test.go b/api/core/v1/cid_test.go index 8e0a4d65e..e584df218 100644 --- a/api/core/v1/cid_test.go +++ b/api/core/v1/cid_test.go @@ -1,182 +1,182 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package v1 - -import ( - "testing" - - ocidigest "github.com/opencontainers/go-digest" -) - -func TestCalculateDigest(t *testing.T) { - // Test cases - tests := []struct { - name string - data []byte - wantDigest string - wantErr bool - }{ - { - name: "Empty data", - data: []byte{}, - wantErr: true, - }, - { - name: "Hello World", - data: []byte("Hello, World!"), - wantDigest: "sha256:dffd6021bb2bd5b0af676290809ec3a53191dd81c7f70a4b28688a362182986f", - }, - { - name: "Random data", - data: []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05}, - wantDigest: "sha256:17e88db187afd62c16e5debf3e6527cd006bc012bc90b51a810cd80c2d511f43", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotDigest, err := CalculateDigest(tt.data) - if (err != nil) != tt.wantErr { - t.Errorf("CalculateDigest() error = %v, wantErr %v", err, tt.wantErr) - - return - } - - if gotDigest.String() != tt.wantDigest { - t.Errorf("CalculateDigest() = %v, want %v", gotDigest, tt.wantDigest) - } - }) - } -} - -func TestConvertDigestToCID(t *testing.T) { - // Test cases - tests := []struct { - name string - digest string - wantCID string - wantErr bool - }{ - { - name: "Empty digest", - digest: "", - wantErr: true, - }, - { - name: "Unsupported algorithm", - digest: "md5:d41d8cd98f00b204e9800998ecf8427e", - wantErr: true, - }, - { - name: "Invalid digest format", - digest: "d41d8cd98f00b204e9800998ecf8427e", - wantErr: true, - }, - { - name: "Valid SHA256 digest", - digest: "sha256:dffd6021bb2bd5b0af676290809ec3a53191dd81c7f70a4b28688a362182986f", - wantCID: "baeareig77vqcdozl2wyk6z3cscaj5q5fggi53aoh64fewkdiri3cdauyn4", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotCID, err := ConvertDigestToCID(ocidigest.Digest(tt.digest)) - if (err != nil) != tt.wantErr { - t.Errorf("ConvertDigestToCID() error = %v, wantErr %v", err, tt.wantErr) - - return - } - - if gotCID != tt.wantCID { - t.Errorf("ConvertDigestToCID() = %v, want %v", gotCID, tt.wantCID) - } - }) - } -} - -func TestConvertCIDToDigest(t *testing.T) { - // Test cases - tests := []struct { - name string - cid string - wantDigest string - wantErr bool - }{ - { - name: "Empty CID", - cid: "", - wantErr: true, - }, - { - name: "Invalid CID format", - cid: "invalid-cid-string", - wantErr: true, - }, - { - name: "Unsupported hash type in CID", - cid: "bafkreigh2akiscaildc7t5j5x3t6l5g7x7y6z7x7y6z7x7y6z7x7y6z7x7y6z", // This is a CID with a different hash type - wantErr: true, - }, - { - name: "Valid CID", - cid: "baeareig77vqcdozl2wyk6z3cscaj5q5fggi53aoh64fewkdiri3cdauyn4", - wantDigest: "sha256:dffd6021bb2bd5b0af676290809ec3a53191dd81c7f70a4b28688a362182986f", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotDigest, err := ConvertCIDToDigest(tt.cid) - if (err != nil) != tt.wantErr { - t.Errorf("ConvertCIDToDigest() error = %v, wantErr %v", err, tt.wantErr) - - return - } - - if gotDigest.String() != tt.wantDigest { - t.Errorf("ConvertCIDToDigest() = %v, want %v", gotDigest.String(), tt.wantDigest) - } - }) - } -} - -func TestIsValidCID(t *testing.T) { - // Test cases - tests := []struct { - name string - cid string - wantValid bool - }{ - { - name: "Valid CID", - cid: "baeareig77vqcdozl2wyk6z3cscaj5q5fggi53aoh64fewkdiri3cdauyn4", - wantValid: true, - }, - { - name: "Invalid CID - wrong format", - cid: "invalid-cid-string", - wantValid: false, - }, - { - name: "Empty CID", - cid: "", - wantValid: false, - }, - { - name: "CID with invalid characters", - cid: "bafybeigdyrzt5tqz5f5u3j5x3t6l5g7x7y6z7x7y6z7x7y6z7x7y6z7x7y6z7x!", - wantValid: false, - }, - { - name: "CID with spaces", - cid: "bafybeigdyrzt 5tqz5f5u3j5x3t6l5g7x7y6z7x7y6z7x7y6z7x7y6z7x", - wantValid: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if gotValid := IsValidCID(tt.cid); gotValid != tt.wantValid { - t.Errorf("IsValidCID() = %v, want %v", gotValid, tt.wantValid) - } - }) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package v1 + +import ( + "testing" + + ocidigest "github.com/opencontainers/go-digest" +) + +func TestCalculateDigest(t *testing.T) { + // Test cases + tests := []struct { + name string + data []byte + wantDigest string + wantErr bool + }{ + { + name: "Empty data", + data: []byte{}, + wantErr: true, + }, + { + name: "Hello World", + data: []byte("Hello, World!"), + wantDigest: "sha256:dffd6021bb2bd5b0af676290809ec3a53191dd81c7f70a4b28688a362182986f", + }, + { + name: "Random data", + data: []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05}, + wantDigest: "sha256:17e88db187afd62c16e5debf3e6527cd006bc012bc90b51a810cd80c2d511f43", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotDigest, err := CalculateDigest(tt.data) + if (err != nil) != tt.wantErr { + t.Errorf("CalculateDigest() error = %v, wantErr %v", err, tt.wantErr) + + return + } + + if gotDigest.String() != tt.wantDigest { + t.Errorf("CalculateDigest() = %v, want %v", gotDigest, tt.wantDigest) + } + }) + } +} + +func TestConvertDigestToCID(t *testing.T) { + // Test cases + tests := []struct { + name string + digest string + wantCID string + wantErr bool + }{ + { + name: "Empty digest", + digest: "", + wantErr: true, + }, + { + name: "Unsupported algorithm", + digest: "md5:d41d8cd98f00b204e9800998ecf8427e", + wantErr: true, + }, + { + name: "Invalid digest format", + digest: "d41d8cd98f00b204e9800998ecf8427e", + wantErr: true, + }, + { + name: "Valid SHA256 digest", + digest: "sha256:dffd6021bb2bd5b0af676290809ec3a53191dd81c7f70a4b28688a362182986f", + wantCID: "baeareig77vqcdozl2wyk6z3cscaj5q5fggi53aoh64fewkdiri3cdauyn4", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotCID, err := ConvertDigestToCID(ocidigest.Digest(tt.digest)) + if (err != nil) != tt.wantErr { + t.Errorf("ConvertDigestToCID() error = %v, wantErr %v", err, tt.wantErr) + + return + } + + if gotCID != tt.wantCID { + t.Errorf("ConvertDigestToCID() = %v, want %v", gotCID, tt.wantCID) + } + }) + } +} + +func TestConvertCIDToDigest(t *testing.T) { + // Test cases + tests := []struct { + name string + cid string + wantDigest string + wantErr bool + }{ + { + name: "Empty CID", + cid: "", + wantErr: true, + }, + { + name: "Invalid CID format", + cid: "invalid-cid-string", + wantErr: true, + }, + { + name: "Unsupported hash type in CID", + cid: "bafkreigh2akiscaildc7t5j5x3t6l5g7x7y6z7x7y6z7x7y6z7x7y6z7x7y6z", // This is a CID with a different hash type + wantErr: true, + }, + { + name: "Valid CID", + cid: "baeareig77vqcdozl2wyk6z3cscaj5q5fggi53aoh64fewkdiri3cdauyn4", + wantDigest: "sha256:dffd6021bb2bd5b0af676290809ec3a53191dd81c7f70a4b28688a362182986f", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotDigest, err := ConvertCIDToDigest(tt.cid) + if (err != nil) != tt.wantErr { + t.Errorf("ConvertCIDToDigest() error = %v, wantErr %v", err, tt.wantErr) + + return + } + + if gotDigest.String() != tt.wantDigest { + t.Errorf("ConvertCIDToDigest() = %v, want %v", gotDigest.String(), tt.wantDigest) + } + }) + } +} + +func TestIsValidCID(t *testing.T) { + // Test cases + tests := []struct { + name string + cid string + wantValid bool + }{ + { + name: "Valid CID", + cid: "baeareig77vqcdozl2wyk6z3cscaj5q5fggi53aoh64fewkdiri3cdauyn4", + wantValid: true, + }, + { + name: "Invalid CID - wrong format", + cid: "invalid-cid-string", + wantValid: false, + }, + { + name: "Empty CID", + cid: "", + wantValid: false, + }, + { + name: "CID with invalid characters", + cid: "bafybeigdyrzt5tqz5f5u3j5x3t6l5g7x7y6z7x7y6z7x7y6z7x7y6z7x7y6z7x!", + wantValid: false, + }, + { + name: "CID with spaces", + cid: "bafybeigdyrzt 5tqz5f5u3j5x3t6l5g7x7y6z7x7y6z7x7y6z7x7y6z7x", + wantValid: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if gotValid := IsValidCID(tt.cid); gotValid != tt.wantValid { + t.Errorf("IsValidCID() = %v, want %v", gotValid, tt.wantValid) + } + }) + } +} diff --git a/api/core/v1/oasf.go b/api/core/v1/oasf.go index 2fe635218..7d425b4ef 100644 --- a/api/core/v1/oasf.go +++ b/api/core/v1/oasf.go @@ -1,61 +1,61 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package v1 - -import ( - decodingv1 "buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go/agntcy/oasfsdk/decoding/v1" - typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" - typesv1alpha1 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha1" - typesv1alpha2 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha2" - "github.com/agntcy/oasf-sdk/pkg/decoder" -) - -// DecodedRecord is an interface representing a decoded OASF record. -// It provides methods to access the underlying record data. -type DecodedRecord interface { - // GetRecord returns the underlying record data, which can be of supported type. - GetRecord() any - - // HasV1Alpha0 checks if the record is of type V1Alpha0. - HasV1Alpha0() bool - GetV1Alpha0() *typesv1alpha0.Record - - // HasV1Alpha1 checks if the record is of type V1Alpha1. - HasV1Alpha1() bool - GetV1Alpha1() *typesv1alpha1.Record - - // HasV1Alpha2 checks if the record is of type V1Alpha2. - HasV1Alpha2() bool - GetV1Alpha2() *typesv1alpha2.Record -} - -type decodedRecord struct { - *decodingv1.DecodeRecordResponse -} - -func (d *decodedRecord) GetRecord() any { - if d == nil || d.DecodeRecordResponse == nil { - return nil - } - - switch data := d.DecodeRecordResponse.GetRecord().(type) { - case *decodingv1.DecodeRecordResponse_V1Alpha0: - return data.V1Alpha0 - case *decodingv1.DecodeRecordResponse_V1Alpha1: - return data.V1Alpha1 - case *decodingv1.DecodeRecordResponse_V1Alpha2: - return data.V1Alpha2 - default: - return nil - } -} - -// New creates a Record for a supported OASF typed record. -func New[T typesv1alpha0.Record | typesv1alpha1.Record | typesv1alpha2.Record](record *T) *Record { - data, _ := decoder.StructToProto(record) - - return &Record{ - Data: data, - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package v1 + +import ( + decodingv1 "buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go/agntcy/oasfsdk/decoding/v1" + typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" + typesv1alpha1 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha1" + typesv1alpha2 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha2" + "github.com/agntcy/oasf-sdk/pkg/decoder" +) + +// DecodedRecord is an interface representing a decoded OASF record. +// It provides methods to access the underlying record data. +type DecodedRecord interface { + // GetRecord returns the underlying record data, which can be of supported type. + GetRecord() any + + // HasV1Alpha0 checks if the record is of type V1Alpha0. + HasV1Alpha0() bool + GetV1Alpha0() *typesv1alpha0.Record + + // HasV1Alpha1 checks if the record is of type V1Alpha1. + HasV1Alpha1() bool + GetV1Alpha1() *typesv1alpha1.Record + + // HasV1Alpha2 checks if the record is of type V1Alpha2. + HasV1Alpha2() bool + GetV1Alpha2() *typesv1alpha2.Record +} + +type decodedRecord struct { + *decodingv1.DecodeRecordResponse +} + +func (d *decodedRecord) GetRecord() any { + if d == nil || d.DecodeRecordResponse == nil { + return nil + } + + switch data := d.DecodeRecordResponse.GetRecord().(type) { + case *decodingv1.DecodeRecordResponse_V1Alpha0: + return data.V1Alpha0 + case *decodingv1.DecodeRecordResponse_V1Alpha1: + return data.V1Alpha1 + case *decodingv1.DecodeRecordResponse_V1Alpha2: + return data.V1Alpha2 + default: + return nil + } +} + +// New creates a Record for a supported OASF typed record. +func New[T typesv1alpha0.Record | typesv1alpha1.Record | typesv1alpha2.Record](record *T) *Record { + data, _ := decoder.StructToProto(record) + + return &Record{ + Data: data, + } +} diff --git a/api/core/v1/record.go b/api/core/v1/record.go index fce9fa276..30c75a023 100644 --- a/api/core/v1/record.go +++ b/api/core/v1/record.go @@ -1,234 +1,234 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package v1 - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "sync" - "time" - - "github.com/agntcy/oasf-sdk/pkg/decoder" - "github.com/agntcy/oasf-sdk/pkg/validator" - "google.golang.org/protobuf/proto" -) - -const ( - maxRecordSize = 1024 * 1024 * 4 // 4MB - - // DefaultSchemaURL is the default OASF schema URL for API-based validation. - DefaultSchemaURL = "https://schema.oasf.outshift.com" - - // DefaultValidationTimeout is the default timeout for API-based validation HTTP calls. - // This ensures validation doesn't block indefinitely if the OASF server is slow or unreachable. - DefaultValidationTimeout = 30 * time.Second -) - -var ( - defaultValidator *validator.Validator - configMu sync.RWMutex - schemaURL = DefaultSchemaURL - disableAPIValidation = false - strictValidation = true -) - -func init() { - var err error - - defaultValidator, err = validator.New() - if err != nil { - panic(fmt.Sprintf("failed to initialize OASF-SDK validator: %v", err)) - } -} - -// SetSchemaURL configures the schema URL to use for API-based validation. -// This function is thread-safe and can be called concurrently with validation operations. -func SetSchemaURL(url string) { - configMu.Lock() - defer configMu.Unlock() - - schemaURL = url -} - -// SetDisableAPIValidation configures whether to disable API-based validation. -// When true, embedded schemas will be used instead of the API validator. -// This function is thread-safe and can be called concurrently with validation operations. -func SetDisableAPIValidation(disable bool) { - configMu.Lock() - defer configMu.Unlock() - - disableAPIValidation = disable -} - -// SetStrictValidation configures whether to use strict validation mode. -// When true, uses strict validation (fails on unknown attributes, deprecated fields, etc.). -// When false, uses lax validation (more permissive, only fails on critical errors). -// This function is thread-safe and can be called concurrently with validation operations. -func SetStrictValidation(strict bool) { - configMu.Lock() - defer configMu.Unlock() - - strictValidation = strict -} - -// GetCid calculates and returns the CID for this record. -// The CID is calculated from the record's content using CIDv1, codec 1, SHA2-256. -// Uses canonical JSON marshaling to ensure consistent, cross-language compatible results. -// Returns empty string if calculation fails. -func (r *Record) GetCid() string { - if r == nil || r.GetData() == nil { - return "" - } - - // Use canonical marshaling for CID calculation - canonicalBytes, err := r.Marshal() - if err != nil { - return "" - } - - // Calculate digest using local utilities - digest, err := CalculateDigest(canonicalBytes) - if err != nil { - return "" - } - - // Convert digest to CID using local utilities - cid, err := ConvertDigestToCID(digest) - if err != nil { - return "" - } - - return cid -} - -// Marshal marshals the Record using canonical JSON serialization. -// This ensures deterministic, cross-language compatible byte representation. -// The output represents the pure Record data and is used for both CID calculation and storage. -func (r *Record) Marshal() ([]byte, error) { - if r == nil || r.GetData() == nil { - return nil, nil - } - - // Extract the data marshal it canonically - // Use regular JSON marshaling to match the format users work with - // Step 1: Convert to JSON using regular json.Marshal (consistent with cli/cmd/pull) - jsonBytes, err := json.Marshal(r.GetData()) - if err != nil { - return nil, fmt.Errorf("failed to marshal Record: %w", err) - } - - // Step 2: Parse and re-marshal to ensure deterministic map key ordering. - // This is critical - maps must have consistent key order for deterministic results. - var normalized interface{} - if err := json.Unmarshal(jsonBytes, &normalized); err != nil { - return nil, fmt.Errorf("failed to normalize JSON for canonical ordering: %w", err) - } - - // Step 3: Marshal with sorted keys for deterministic output. - // encoding/json.Marshal sorts map keys alphabetically. - canonicalBytes, err := json.Marshal(normalized) - if err != nil { - return nil, fmt.Errorf("failed to marshal normalized JSON with sorted keys: %w", err) - } - - return canonicalBytes, nil -} - -func (r *Record) GetSchemaVersion() string { - if r == nil || r.GetData() == nil { - return "" - } - - // Get schema version from raw using OASF SDK - schemaVersion, _ := decoder.GetRecordSchemaVersion(r.GetData()) - - return schemaVersion -} - -// Decode decodes the Record's data into a concrete type using the OASF SDK. -func (r *Record) Decode() (DecodedRecord, error) { - if r == nil || r.GetData() == nil { - return nil, errors.New("record is nil") - } - - // Decode the record using OASF SDK - decoded, err := decoder.DecodeRecord(r.GetData()) - if err != nil { - return nil, fmt.Errorf("failed to decode Record: %w", err) - } - - // Wrap in our DecodedRecord interface - return &decodedRecord{ - DecodeRecordResponse: decoded, - }, nil -} - -// Validate validates the Record's data against its embedded schema using the OASF SDK. -func (r *Record) Validate(ctx context.Context) (bool, []string, error) { - if r == nil || r.GetData() == nil { - return false, []string{"record is nil"}, nil - } - - recordSize := proto.Size(r) - if recordSize > maxRecordSize { - return false, []string{fmt.Sprintf("record size %d bytes exceeds maximum allowed size of %d bytes (4MB)", recordSize, maxRecordSize)}, nil - } - - // Validate the record using OASF SDK - // Read configuration atomically to avoid race conditions - configMu.RLock() - - currentSchemaURL := schemaURL - currentDisableAPIValidation := disableAPIValidation - currentStrictValidation := strictValidation - - configMu.RUnlock() - - // If API validation is not disabled, use API-based validation with configured schema URL - if !currentDisableAPIValidation { - // Create a context with timeout for API validation HTTP calls. - // We use the caller's context as parent so validation respects cancellation, - // but add our own timeout to prevent hanging if the OASF server is slow/unreachable. - validationCtx, cancel := context.WithTimeout(ctx, DefaultValidationTimeout) - defer cancel() - - //nolint:wrapcheck - return defaultValidator.ValidateRecord( - validationCtx, - r.GetData(), - validator.WithSchemaURL(currentSchemaURL), - validator.WithStrict(currentStrictValidation), - ) - } - - // Use embedded schemas (no HTTP calls, so we can use the original context) - //nolint:wrapcheck - return defaultValidator.ValidateRecord(ctx, r.GetData()) -} - -// UnmarshalRecord unmarshals canonical Record JSON bytes to a Record. -func UnmarshalRecord(data []byte) (*Record, error) { - // Load data from JSON bytes - dataStruct, err := decoder.JsonToProto(data) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal Record: %w", err) - } - - // Construct a record - record := &Record{ - Data: dataStruct, - } - - // If we can decode the record, then it is structurally valid. - // Loaded record may be syntactically valid but semantically invalid (e.g. missing required fields). - // We leave full semantic validation to the caller. - _, err = record.Decode() - if err != nil { - return nil, fmt.Errorf("failed to decode Record: %w", err) - } - - return record, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package v1 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "sync" + "time" + + "github.com/agntcy/oasf-sdk/pkg/decoder" + "github.com/agntcy/oasf-sdk/pkg/validator" + "google.golang.org/protobuf/proto" +) + +const ( + maxRecordSize = 1024 * 1024 * 4 // 4MB + + // DefaultSchemaURL is the default OASF schema URL for API-based validation. + DefaultSchemaURL = "https://schema.oasf.outshift.com" + + // DefaultValidationTimeout is the default timeout for API-based validation HTTP calls. + // This ensures validation doesn't block indefinitely if the OASF server is slow or unreachable. + DefaultValidationTimeout = 30 * time.Second +) + +var ( + defaultValidator *validator.Validator + configMu sync.RWMutex + schemaURL = DefaultSchemaURL + disableAPIValidation = false + strictValidation = true +) + +func init() { + var err error + + defaultValidator, err = validator.New() + if err != nil { + panic(fmt.Sprintf("failed to initialize OASF-SDK validator: %v", err)) + } +} + +// SetSchemaURL configures the schema URL to use for API-based validation. +// This function is thread-safe and can be called concurrently with validation operations. +func SetSchemaURL(url string) { + configMu.Lock() + defer configMu.Unlock() + + schemaURL = url +} + +// SetDisableAPIValidation configures whether to disable API-based validation. +// When true, embedded schemas will be used instead of the API validator. +// This function is thread-safe and can be called concurrently with validation operations. +func SetDisableAPIValidation(disable bool) { + configMu.Lock() + defer configMu.Unlock() + + disableAPIValidation = disable +} + +// SetStrictValidation configures whether to use strict validation mode. +// When true, uses strict validation (fails on unknown attributes, deprecated fields, etc.). +// When false, uses lax validation (more permissive, only fails on critical errors). +// This function is thread-safe and can be called concurrently with validation operations. +func SetStrictValidation(strict bool) { + configMu.Lock() + defer configMu.Unlock() + + strictValidation = strict +} + +// GetCid calculates and returns the CID for this record. +// The CID is calculated from the record's content using CIDv1, codec 1, SHA2-256. +// Uses canonical JSON marshaling to ensure consistent, cross-language compatible results. +// Returns empty string if calculation fails. +func (r *Record) GetCid() string { + if r == nil || r.GetData() == nil { + return "" + } + + // Use canonical marshaling for CID calculation + canonicalBytes, err := r.Marshal() + if err != nil { + return "" + } + + // Calculate digest using local utilities + digest, err := CalculateDigest(canonicalBytes) + if err != nil { + return "" + } + + // Convert digest to CID using local utilities + cid, err := ConvertDigestToCID(digest) + if err != nil { + return "" + } + + return cid +} + +// Marshal marshals the Record using canonical JSON serialization. +// This ensures deterministic, cross-language compatible byte representation. +// The output represents the pure Record data and is used for both CID calculation and storage. +func (r *Record) Marshal() ([]byte, error) { + if r == nil || r.GetData() == nil { + return nil, nil + } + + // Extract the data marshal it canonically + // Use regular JSON marshaling to match the format users work with + // Step 1: Convert to JSON using regular json.Marshal (consistent with cli/cmd/pull) + jsonBytes, err := json.Marshal(r.GetData()) + if err != nil { + return nil, fmt.Errorf("failed to marshal Record: %w", err) + } + + // Step 2: Parse and re-marshal to ensure deterministic map key ordering. + // This is critical - maps must have consistent key order for deterministic results. + var normalized interface{} + if err := json.Unmarshal(jsonBytes, &normalized); err != nil { + return nil, fmt.Errorf("failed to normalize JSON for canonical ordering: %w", err) + } + + // Step 3: Marshal with sorted keys for deterministic output. + // encoding/json.Marshal sorts map keys alphabetically. + canonicalBytes, err := json.Marshal(normalized) + if err != nil { + return nil, fmt.Errorf("failed to marshal normalized JSON with sorted keys: %w", err) + } + + return canonicalBytes, nil +} + +func (r *Record) GetSchemaVersion() string { + if r == nil || r.GetData() == nil { + return "" + } + + // Get schema version from raw using OASF SDK + schemaVersion, _ := decoder.GetRecordSchemaVersion(r.GetData()) + + return schemaVersion +} + +// Decode decodes the Record's data into a concrete type using the OASF SDK. +func (r *Record) Decode() (DecodedRecord, error) { + if r == nil || r.GetData() == nil { + return nil, errors.New("record is nil") + } + + // Decode the record using OASF SDK + decoded, err := decoder.DecodeRecord(r.GetData()) + if err != nil { + return nil, fmt.Errorf("failed to decode Record: %w", err) + } + + // Wrap in our DecodedRecord interface + return &decodedRecord{ + DecodeRecordResponse: decoded, + }, nil +} + +// Validate validates the Record's data against its embedded schema using the OASF SDK. +func (r *Record) Validate(ctx context.Context) (bool, []string, error) { + if r == nil || r.GetData() == nil { + return false, []string{"record is nil"}, nil + } + + recordSize := proto.Size(r) + if recordSize > maxRecordSize { + return false, []string{fmt.Sprintf("record size %d bytes exceeds maximum allowed size of %d bytes (4MB)", recordSize, maxRecordSize)}, nil + } + + // Validate the record using OASF SDK + // Read configuration atomically to avoid race conditions + configMu.RLock() + + currentSchemaURL := schemaURL + currentDisableAPIValidation := disableAPIValidation + currentStrictValidation := strictValidation + + configMu.RUnlock() + + // If API validation is not disabled, use API-based validation with configured schema URL + if !currentDisableAPIValidation { + // Create a context with timeout for API validation HTTP calls. + // We use the caller's context as parent so validation respects cancellation, + // but add our own timeout to prevent hanging if the OASF server is slow/unreachable. + validationCtx, cancel := context.WithTimeout(ctx, DefaultValidationTimeout) + defer cancel() + + //nolint:wrapcheck + return defaultValidator.ValidateRecord( + validationCtx, + r.GetData(), + validator.WithSchemaURL(currentSchemaURL), + validator.WithStrict(currentStrictValidation), + ) + } + + // Use embedded schemas (no HTTP calls, so we can use the original context) + //nolint:wrapcheck + return defaultValidator.ValidateRecord(ctx, r.GetData()) +} + +// UnmarshalRecord unmarshals canonical Record JSON bytes to a Record. +func UnmarshalRecord(data []byte) (*Record, error) { + // Load data from JSON bytes + dataStruct, err := decoder.JsonToProto(data) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal Record: %w", err) + } + + // Construct a record + record := &Record{ + Data: dataStruct, + } + + // If we can decode the record, then it is structurally valid. + // Loaded record may be syntactically valid but semantically invalid (e.g. missing required fields). + // We leave full semantic validation to the caller. + _, err = record.Decode() + if err != nil { + return nil, fmt.Errorf("failed to decode Record: %w", err) + } + + return record, nil +} diff --git a/api/core/v1/record.pb.go b/api/core/v1/record.pb.go index 9cdfe7104..84406aed4 100644 --- a/api/core/v1/record.pb.go +++ b/api/core/v1/record.pb.go @@ -1,409 +1,409 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.5 -// protoc (unknown) -// source: agntcy/dir/core/v1/record.proto - -package v1 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - structpb "google.golang.org/protobuf/types/known/structpb" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Defines a reference or a globally unique content identifier of a record. -type RecordRef struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Globally-unique content identifier (CID) of the record. - // Specs: https://github.com/multiformats/cid - Cid string `protobuf:"bytes,1,opt,name=cid,proto3" json:"cid,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RecordRef) Reset() { - *x = RecordRef{} - mi := &file_agntcy_dir_core_v1_record_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RecordRef) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RecordRef) ProtoMessage() {} - -func (x *RecordRef) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_core_v1_record_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RecordRef.ProtoReflect.Descriptor instead. -func (*RecordRef) Descriptor() ([]byte, []int) { - return file_agntcy_dir_core_v1_record_proto_rawDescGZIP(), []int{0} -} - -func (x *RecordRef) GetCid() string { - if x != nil { - return x.Cid - } - return "" -} - -// Defines metadata about a record. -type RecordMeta struct { - state protoimpl.MessageState `protogen:"open.v1"` - // CID of the record. - Cid string `protobuf:"bytes,1,opt,name=cid,proto3" json:"cid,omitempty"` - // Annotations attached to the record. - Annotations map[string]string `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Schema version of the record. - SchemaVersion string `protobuf:"bytes,3,opt,name=schema_version,json=schemaVersion,proto3" json:"schema_version,omitempty"` - // Creation timestamp of the record in the RFC3339 format. - // Specs: https://www.rfc-editor.org/rfc/rfc3339.html - CreatedAt string `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RecordMeta) Reset() { - *x = RecordMeta{} - mi := &file_agntcy_dir_core_v1_record_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RecordMeta) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RecordMeta) ProtoMessage() {} - -func (x *RecordMeta) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_core_v1_record_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RecordMeta.ProtoReflect.Descriptor instead. -func (*RecordMeta) Descriptor() ([]byte, []int) { - return file_agntcy_dir_core_v1_record_proto_rawDescGZIP(), []int{1} -} - -func (x *RecordMeta) GetCid() string { - if x != nil { - return x.Cid - } - return "" -} - -func (x *RecordMeta) GetAnnotations() map[string]string { - if x != nil { - return x.Annotations - } - return nil -} - -func (x *RecordMeta) GetSchemaVersion() string { - if x != nil { - return x.SchemaVersion - } - return "" -} - -func (x *RecordMeta) GetCreatedAt() string { - if x != nil { - return x.CreatedAt - } - return "" -} - -// Record is a generic object that encapsulates data of different Record types. -// -// Supported schemas: -// -// v0.3.1: https://schema.oasf.outshift.com/0.3.1/objects/agent -// v0.7.0: https://schema.oasf.outshift.com/0.7.0/objects/record -type Record struct { - state protoimpl.MessageState `protogen:"open.v1"` - Data *structpb.Struct `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *Record) Reset() { - *x = Record{} - mi := &file_agntcy_dir_core_v1_record_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Record) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Record) ProtoMessage() {} - -func (x *Record) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_core_v1_record_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Record.ProtoReflect.Descriptor instead. -func (*Record) Descriptor() ([]byte, []int) { - return file_agntcy_dir_core_v1_record_proto_rawDescGZIP(), []int{2} -} - -func (x *Record) GetData() *structpb.Struct { - if x != nil { - return x.Data - } - return nil -} - -// RecordReferrer represents a referrer object or an association -// to a record. The actual structure of the referrer object can vary -// depending on the type of referrer (e.g., signature, public key, etc.). -// -// RecordReferrer types in the `agntcy.dir.` namespace are reserved for -// Directory-specific schemas and will be validated across Dir services. -type RecordReferrer struct { - state protoimpl.MessageState `protogen:"open.v1"` - // The type of the referrer. - // For example, "agntcy.dir.sign.v1.Signature" for signatures. - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // Record reference to which this referrer is associated. - RecordRef *RecordRef `protobuf:"bytes,2,opt,name=record_ref,json=recordRef,proto3" json:"record_ref,omitempty"` - // Annotations attached to the referrer object. - Annotations map[string]string `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Creation timestamp of the record in the RFC3339 format. - // Specs: https://www.rfc-editor.org/rfc/rfc3339.html - CreatedAt string `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - // The actual data of the referrer. - Data *structpb.Struct `protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RecordReferrer) Reset() { - *x = RecordReferrer{} - mi := &file_agntcy_dir_core_v1_record_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RecordReferrer) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RecordReferrer) ProtoMessage() {} - -func (x *RecordReferrer) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_core_v1_record_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RecordReferrer.ProtoReflect.Descriptor instead. -func (*RecordReferrer) Descriptor() ([]byte, []int) { - return file_agntcy_dir_core_v1_record_proto_rawDescGZIP(), []int{3} -} - -func (x *RecordReferrer) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *RecordReferrer) GetRecordRef() *RecordRef { - if x != nil { - return x.RecordRef - } - return nil -} - -func (x *RecordReferrer) GetAnnotations() map[string]string { - if x != nil { - return x.Annotations - } - return nil -} - -func (x *RecordReferrer) GetCreatedAt() string { - if x != nil { - return x.CreatedAt - } - return "" -} - -func (x *RecordReferrer) GetData() *structpb.Struct { - if x != nil { - return x.Data - } - return nil -} - -var File_agntcy_dir_core_v1_record_proto protoreflect.FileDescriptor - -var file_agntcy_dir_core_v1_record_proto_rawDesc = string([]byte{ - 0x0a, 0x1f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x63, 0x6f, 0x72, - 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0x1d, 0x0a, 0x09, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, - 0x12, 0x10, 0x0a, 0x03, 0x63, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x63, - 0x69, 0x64, 0x22, 0xf7, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x4d, 0x65, 0x74, - 0x61, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x63, 0x69, 0x64, 0x12, 0x51, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, - 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x4d, 0x65, 0x74, 0x61, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, - 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x1a, 0x3e, 0x0a, 0x10, - 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x35, 0x0a, 0x06, - 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x2b, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x64, - 0x61, 0x74, 0x61, 0x22, 0xc5, 0x02, 0x0a, 0x0e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, - 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3c, 0x0a, 0x0a, 0x72, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, - 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x52, 0x09, 0x72, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x12, 0x55, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, - 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, - 0x72, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x1d, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x2b, - 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3e, 0x0a, 0x10, 0x41, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0xb3, 0x01, 0x0a, 0x16, - 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x21, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, - 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, 0x43, 0xaa, 0x02, - 0x12, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x43, 0x6f, 0x72, 0x65, - 0x2e, 0x56, 0x31, 0xca, 0x02, 0x12, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, - 0x5c, 0x43, 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x1e, 0x41, 0x67, 0x6e, 0x74, 0x63, - 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, - 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x15, 0x41, 0x67, 0x6e, 0x74, - 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, 0x43, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x56, - 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_agntcy_dir_core_v1_record_proto_rawDescOnce sync.Once - file_agntcy_dir_core_v1_record_proto_rawDescData []byte -) - -func file_agntcy_dir_core_v1_record_proto_rawDescGZIP() []byte { - file_agntcy_dir_core_v1_record_proto_rawDescOnce.Do(func() { - file_agntcy_dir_core_v1_record_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_core_v1_record_proto_rawDesc), len(file_agntcy_dir_core_v1_record_proto_rawDesc))) - }) - return file_agntcy_dir_core_v1_record_proto_rawDescData -} - -var file_agntcy_dir_core_v1_record_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_agntcy_dir_core_v1_record_proto_goTypes = []any{ - (*RecordRef)(nil), // 0: agntcy.dir.core.v1.RecordRef - (*RecordMeta)(nil), // 1: agntcy.dir.core.v1.RecordMeta - (*Record)(nil), // 2: agntcy.dir.core.v1.Record - (*RecordReferrer)(nil), // 3: agntcy.dir.core.v1.RecordReferrer - nil, // 4: agntcy.dir.core.v1.RecordMeta.AnnotationsEntry - nil, // 5: agntcy.dir.core.v1.RecordReferrer.AnnotationsEntry - (*structpb.Struct)(nil), // 6: google.protobuf.Struct -} -var file_agntcy_dir_core_v1_record_proto_depIdxs = []int32{ - 4, // 0: agntcy.dir.core.v1.RecordMeta.annotations:type_name -> agntcy.dir.core.v1.RecordMeta.AnnotationsEntry - 6, // 1: agntcy.dir.core.v1.Record.data:type_name -> google.protobuf.Struct - 0, // 2: agntcy.dir.core.v1.RecordReferrer.record_ref:type_name -> agntcy.dir.core.v1.RecordRef - 5, // 3: agntcy.dir.core.v1.RecordReferrer.annotations:type_name -> agntcy.dir.core.v1.RecordReferrer.AnnotationsEntry - 6, // 4: agntcy.dir.core.v1.RecordReferrer.data:type_name -> google.protobuf.Struct - 5, // [5:5] is the sub-list for method output_type - 5, // [5:5] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name -} - -func init() { file_agntcy_dir_core_v1_record_proto_init() } -func file_agntcy_dir_core_v1_record_proto_init() { - if File_agntcy_dir_core_v1_record_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_core_v1_record_proto_rawDesc), len(file_agntcy_dir_core_v1_record_proto_rawDesc)), - NumEnums: 0, - NumMessages: 6, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_agntcy_dir_core_v1_record_proto_goTypes, - DependencyIndexes: file_agntcy_dir_core_v1_record_proto_depIdxs, - MessageInfos: file_agntcy_dir_core_v1_record_proto_msgTypes, - }.Build() - File_agntcy_dir_core_v1_record_proto = out.File - file_agntcy_dir_core_v1_record_proto_goTypes = nil - file_agntcy_dir_core_v1_record_proto_depIdxs = nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc (unknown) +// source: agntcy/dir/core/v1/record.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Defines a reference or a globally unique content identifier of a record. +type RecordRef struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Globally-unique content identifier (CID) of the record. + // Specs: https://github.com/multiformats/cid + Cid string `protobuf:"bytes,1,opt,name=cid,proto3" json:"cid,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RecordRef) Reset() { + *x = RecordRef{} + mi := &file_agntcy_dir_core_v1_record_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RecordRef) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RecordRef) ProtoMessage() {} + +func (x *RecordRef) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_core_v1_record_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RecordRef.ProtoReflect.Descriptor instead. +func (*RecordRef) Descriptor() ([]byte, []int) { + return file_agntcy_dir_core_v1_record_proto_rawDescGZIP(), []int{0} +} + +func (x *RecordRef) GetCid() string { + if x != nil { + return x.Cid + } + return "" +} + +// Defines metadata about a record. +type RecordMeta struct { + state protoimpl.MessageState `protogen:"open.v1"` + // CID of the record. + Cid string `protobuf:"bytes,1,opt,name=cid,proto3" json:"cid,omitempty"` + // Annotations attached to the record. + Annotations map[string]string `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Schema version of the record. + SchemaVersion string `protobuf:"bytes,3,opt,name=schema_version,json=schemaVersion,proto3" json:"schema_version,omitempty"` + // Creation timestamp of the record in the RFC3339 format. + // Specs: https://www.rfc-editor.org/rfc/rfc3339.html + CreatedAt string `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RecordMeta) Reset() { + *x = RecordMeta{} + mi := &file_agntcy_dir_core_v1_record_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RecordMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RecordMeta) ProtoMessage() {} + +func (x *RecordMeta) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_core_v1_record_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RecordMeta.ProtoReflect.Descriptor instead. +func (*RecordMeta) Descriptor() ([]byte, []int) { + return file_agntcy_dir_core_v1_record_proto_rawDescGZIP(), []int{1} +} + +func (x *RecordMeta) GetCid() string { + if x != nil { + return x.Cid + } + return "" +} + +func (x *RecordMeta) GetAnnotations() map[string]string { + if x != nil { + return x.Annotations + } + return nil +} + +func (x *RecordMeta) GetSchemaVersion() string { + if x != nil { + return x.SchemaVersion + } + return "" +} + +func (x *RecordMeta) GetCreatedAt() string { + if x != nil { + return x.CreatedAt + } + return "" +} + +// Record is a generic object that encapsulates data of different Record types. +// +// Supported schemas: +// +// v0.3.1: https://schema.oasf.outshift.com/0.3.1/objects/agent +// v0.7.0: https://schema.oasf.outshift.com/0.7.0/objects/record +type Record struct { + state protoimpl.MessageState `protogen:"open.v1"` + Data *structpb.Struct `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Record) Reset() { + *x = Record{} + mi := &file_agntcy_dir_core_v1_record_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Record) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Record) ProtoMessage() {} + +func (x *Record) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_core_v1_record_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Record.ProtoReflect.Descriptor instead. +func (*Record) Descriptor() ([]byte, []int) { + return file_agntcy_dir_core_v1_record_proto_rawDescGZIP(), []int{2} +} + +func (x *Record) GetData() *structpb.Struct { + if x != nil { + return x.Data + } + return nil +} + +// RecordReferrer represents a referrer object or an association +// to a record. The actual structure of the referrer object can vary +// depending on the type of referrer (e.g., signature, public key, etc.). +// +// RecordReferrer types in the `agntcy.dir.` namespace are reserved for +// Directory-specific schemas and will be validated across Dir services. +type RecordReferrer struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The type of the referrer. + // For example, "agntcy.dir.sign.v1.Signature" for signatures. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Record reference to which this referrer is associated. + RecordRef *RecordRef `protobuf:"bytes,2,opt,name=record_ref,json=recordRef,proto3" json:"record_ref,omitempty"` + // Annotations attached to the referrer object. + Annotations map[string]string `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Creation timestamp of the record in the RFC3339 format. + // Specs: https://www.rfc-editor.org/rfc/rfc3339.html + CreatedAt string `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // The actual data of the referrer. + Data *structpb.Struct `protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RecordReferrer) Reset() { + *x = RecordReferrer{} + mi := &file_agntcy_dir_core_v1_record_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RecordReferrer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RecordReferrer) ProtoMessage() {} + +func (x *RecordReferrer) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_core_v1_record_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RecordReferrer.ProtoReflect.Descriptor instead. +func (*RecordReferrer) Descriptor() ([]byte, []int) { + return file_agntcy_dir_core_v1_record_proto_rawDescGZIP(), []int{3} +} + +func (x *RecordReferrer) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *RecordReferrer) GetRecordRef() *RecordRef { + if x != nil { + return x.RecordRef + } + return nil +} + +func (x *RecordReferrer) GetAnnotations() map[string]string { + if x != nil { + return x.Annotations + } + return nil +} + +func (x *RecordReferrer) GetCreatedAt() string { + if x != nil { + return x.CreatedAt + } + return "" +} + +func (x *RecordReferrer) GetData() *structpb.Struct { + if x != nil { + return x.Data + } + return nil +} + +var File_agntcy_dir_core_v1_record_proto protoreflect.FileDescriptor + +var file_agntcy_dir_core_v1_record_proto_rawDesc = string([]byte{ + 0x0a, 0x1f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x1d, 0x0a, 0x09, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, + 0x12, 0x10, 0x0a, 0x03, 0x63, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x63, + 0x69, 0x64, 0x22, 0xf7, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x4d, 0x65, 0x74, + 0x61, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x63, 0x69, 0x64, 0x12, 0x51, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, + 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x4d, 0x65, 0x74, 0x61, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, + 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x1a, 0x3e, 0x0a, 0x10, + 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x35, 0x0a, 0x06, + 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x2b, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x64, + 0x61, 0x74, 0x61, 0x22, 0xc5, 0x02, 0x0a, 0x0e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, + 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3c, 0x0a, 0x0a, 0x72, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x52, 0x09, 0x72, + 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x12, 0x55, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, + 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, + 0x72, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x2b, + 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3e, 0x0a, 0x10, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0xb3, 0x01, 0x0a, 0x16, + 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x21, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, 0x43, 0xaa, 0x02, + 0x12, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x43, 0x6f, 0x72, 0x65, + 0x2e, 0x56, 0x31, 0xca, 0x02, 0x12, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, + 0x5c, 0x43, 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x1e, 0x41, 0x67, 0x6e, 0x74, 0x63, + 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, + 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x15, 0x41, 0x67, 0x6e, 0x74, + 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, 0x43, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x56, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_agntcy_dir_core_v1_record_proto_rawDescOnce sync.Once + file_agntcy_dir_core_v1_record_proto_rawDescData []byte +) + +func file_agntcy_dir_core_v1_record_proto_rawDescGZIP() []byte { + file_agntcy_dir_core_v1_record_proto_rawDescOnce.Do(func() { + file_agntcy_dir_core_v1_record_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_core_v1_record_proto_rawDesc), len(file_agntcy_dir_core_v1_record_proto_rawDesc))) + }) + return file_agntcy_dir_core_v1_record_proto_rawDescData +} + +var file_agntcy_dir_core_v1_record_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_agntcy_dir_core_v1_record_proto_goTypes = []any{ + (*RecordRef)(nil), // 0: agntcy.dir.core.v1.RecordRef + (*RecordMeta)(nil), // 1: agntcy.dir.core.v1.RecordMeta + (*Record)(nil), // 2: agntcy.dir.core.v1.Record + (*RecordReferrer)(nil), // 3: agntcy.dir.core.v1.RecordReferrer + nil, // 4: agntcy.dir.core.v1.RecordMeta.AnnotationsEntry + nil, // 5: agntcy.dir.core.v1.RecordReferrer.AnnotationsEntry + (*structpb.Struct)(nil), // 6: google.protobuf.Struct +} +var file_agntcy_dir_core_v1_record_proto_depIdxs = []int32{ + 4, // 0: agntcy.dir.core.v1.RecordMeta.annotations:type_name -> agntcy.dir.core.v1.RecordMeta.AnnotationsEntry + 6, // 1: agntcy.dir.core.v1.Record.data:type_name -> google.protobuf.Struct + 0, // 2: agntcy.dir.core.v1.RecordReferrer.record_ref:type_name -> agntcy.dir.core.v1.RecordRef + 5, // 3: agntcy.dir.core.v1.RecordReferrer.annotations:type_name -> agntcy.dir.core.v1.RecordReferrer.AnnotationsEntry + 6, // 4: agntcy.dir.core.v1.RecordReferrer.data:type_name -> google.protobuf.Struct + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_agntcy_dir_core_v1_record_proto_init() } +func file_agntcy_dir_core_v1_record_proto_init() { + if File_agntcy_dir_core_v1_record_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_core_v1_record_proto_rawDesc), len(file_agntcy_dir_core_v1_record_proto_rawDesc)), + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_agntcy_dir_core_v1_record_proto_goTypes, + DependencyIndexes: file_agntcy_dir_core_v1_record_proto_depIdxs, + MessageInfos: file_agntcy_dir_core_v1_record_proto_msgTypes, + }.Build() + File_agntcy_dir_core_v1_record_proto = out.File + file_agntcy_dir_core_v1_record_proto_goTypes = nil + file_agntcy_dir_core_v1_record_proto_depIdxs = nil +} diff --git a/api/core/v1/record_test.go b/api/core/v1/record_test.go index 8c1cacb8d..25d913d1a 100644 --- a/api/core/v1/record_test.go +++ b/api/core/v1/record_test.go @@ -1,536 +1,536 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package v1_test - -import ( - "context" - "testing" - - oasfv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" - oasfv1alpha1 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha1" - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/stretchr/testify/assert" - "google.golang.org/protobuf/types/known/structpb" -) - -func TestRecord_GetCid(t *testing.T) { - tests := []struct { - name string - record *corev1.Record - want string - wantErr bool - }{ - { - name: "v0.3.1 agent record", - record: corev1.New(&oasfv1alpha0.Record{ - Name: "test-agent", - SchemaVersion: "v0.3.1", - Description: "A test agent", - }), - wantErr: false, - }, - { - name: "v0.5.0 record", - record: corev1.New(&oasfv1alpha1.Record{ - Name: "test-agent-v2", - SchemaVersion: "v0.5.0", - Description: "A test agent in v0.5.0 record", - Version: "1.0.0", - Modules: []*oasfv1alpha1.Module{ - { - Name: "test-extension", - }, - }, - }), - wantErr: false, - }, - { - name: "nil record", - record: nil, - wantErr: true, - }, - { - name: "empty record", - record: &corev1.Record{}, - wantErr: true, // Empty record should fail - no OASF data to marshal - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cid := tt.record.GetCid() - - if tt.wantErr { - assert.Empty(t, cid) - - return - } - - assert.NotEmpty(t, cid) - - // CID should be consistent - calling it again should return the same value. - cid2 := tt.record.GetCid() - assert.Equal(t, cid, cid2, "CID should be deterministic") - - // CID should start with the CIDv1 prefix. - assert.Greater(t, len(cid), 10, "CID should be a reasonable length") - }) - } -} - -func TestRecord_GetCid_Consistency(t *testing.T) { - // Create two identical v0.3.1 records. - record1 := corev1.New(&oasfv1alpha1.Record{ - Name: "test-agent", - SchemaVersion: "0.7.0", - Description: "A test agent", - }) - - record2 := corev1.New(&oasfv1alpha1.Record{ - Name: "test-agent", - SchemaVersion: "0.7.0", - Description: "A test agent", - }) - - // Both records should have the same CID. - cid1 := record1.GetCid() - cid2 := record2.GetCid() - - assert.Equal(t, cid1, cid2, "Identical v0.3.1 records should have identical CIDs") -} - -func TestRecord_GetCid_CrossVersion_Difference(t *testing.T) { - // Create two different records - record1 := corev1.New(&oasfv1alpha0.Record{ - Name: "test-agent", - SchemaVersion: "0.3.1", - Description: "A test agent", - }) - - record2 := corev1.New(&oasfv1alpha1.Record{ - Name: "test-agent", - SchemaVersion: "0.7.0", - Description: "A test agent", - }) - - // Both records should have the same CID. - cid1 := record1.GetCid() - cid2 := record2.GetCid() - - assert.NotEqual(t, cid1, cid2, "Different record versions should have different CIDs") -} - -func TestRecord_Validate(t *testing.T) { - tests := []struct { - name string - record *corev1.Record - wantValid bool - }{ - { - name: "valid 0.7.0 record", - record: corev1.New(&oasfv1alpha1.Record{ - Name: "valid-agent-v2", - SchemaVersion: "0.7.0", - Description: "A valid agent record", - Version: "1.0.0", - CreatedAt: "2024-01-01T00:00:00Z", - Authors: []string{ - "Jane Doe ", - }, - Locators: []*oasfv1alpha1.Locator{ - { - Type: "helm_chart", - Url: "https://example.com/helm-chart.tgz", - }, - }, - Skills: []*oasfv1alpha1.Skill{ - { - Name: "natural_language_processing/natural_language_understanding", - }, - }, - }), - wantValid: true, - }, - { - name: "invalid 0.7.0 record (missing required fields)", - record: corev1.New(&oasfv1alpha1.Record{ - Name: "invalid-agent-v2", - SchemaVersion: "v0.5.0", - Description: "An invalid agent record in v0.5.0 format", - Version: "1.0.0", - }), - wantValid: false, - }, - { - name: "nil record", - record: nil, - wantValid: false, - }, - { - name: "empty record", - record: &corev1.Record{}, - wantValid: false, - }, - { - name: "record with invalid generic data", - record: &corev1.Record{ - Data: &structpb.Struct{ - Fields: map[string]*structpb.Value{ - "invalid_field": { - Kind: &structpb.Value_StringValue{StringValue: "some value"}, - }, - }, - }, - }, - wantValid: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - valid, errors, err := tt.record.Validate(context.Background()) - if err != nil { - if tt.wantValid { - t.Errorf("Validate() unexpected error: %v", err) - } - - return - } - - if valid != tt.wantValid { - t.Errorf("Validate() got valid = %v, errors = %v, want %v", valid, errors, tt.wantValid) - } - - if !valid && len(errors) == 0 { - t.Errorf("Validate() expected errors for invalid record, got none") - } - }) - } -} - -func TestRecord_SetSchemaURL(t *testing.T) { - // Test that SetSchemaURL changes the package-level variable - // Note: This test modifies global state, so we should be careful about test isolation - - // Save original state - originalURL := "" - defer corev1.SetSchemaURL(originalURL) // Restore after test - - tests := []struct { - name string - schemaURL string - wantSet bool - }{ - { - name: "set valid schema URL", - schemaURL: "https://schema.oasf.outshift.com", - wantSet: true, - }, - { - name: "set empty schema URL (disable API validator)", - schemaURL: "", - wantSet: true, - }, - { - name: "set custom schema URL", - schemaURL: "https://custom.schema.url", - wantSet: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // This should not panic or error - corev1.SetSchemaURL(tt.schemaURL) - - // We can't directly verify the internal state, but we can verify - // that calling SetSchemaURL doesn't panic and that validation - // still works afterwards (when using a valid URL or empty URL) - record := corev1.New(&oasfv1alpha1.Record{ - Name: "test-agent", - SchemaVersion: "0.7.0", - Description: "A test agent", - Version: "1.0.0", - CreatedAt: "2024-01-01T00:00:00Z", - Authors: []string{ - "Jane Doe ", - }, - Locators: []*oasfv1alpha1.Locator{ - { - Type: "helm_chart", - Url: "https://example.com/helm-chart.tgz", - }, - }, - Skills: []*oasfv1alpha1.Skill{ - { - Name: "natural_language_processing/natural_language_understanding", - }, - }, - }) - - // Validation should work for valid URLs or empty URL (embedded validation) - // For invalid URLs, we expect a network error which is acceptable for this test - valid, _, err := record.Validate(context.Background()) - if err != nil { - // If it's a network error (like "no such host"), that's expected for invalid URLs - // and we just verify that SetSchemaURL didn't panic - if tt.schemaURL != "" && tt.schemaURL != "https://schema.oasf.outshift.com" { - // For custom/invalid URLs, network errors are expected - return - } - - t.Fatalf("Validate() error = %v", err) - } - - assert.True(t, valid) - }) - } -} - -// testRecordWithValidation is a helper function to create a test record and validate it. -// This reduces code duplication between similar tests. -func testRecordWithValidation(t *testing.T) { - t.Helper() - - record := corev1.New(&oasfv1alpha1.Record{ - Name: "test-agent", - SchemaVersion: "0.7.0", - Description: "A test agent", - Version: "1.0.0", - CreatedAt: "2024-01-01T00:00:00Z", - Authors: []string{ - "Jane Doe ", - }, - Locators: []*oasfv1alpha1.Locator{ - { - Type: "helm_chart", - Url: "https://example.com/helm-chart.tgz", - }, - }, - Skills: []*oasfv1alpha1.Skill{ - { - Name: "natural_language_processing/natural_language_understanding", - }, - }, - Modules: []*oasfv1alpha1.Module{ - { - Name: "test-extension", - }, - }, - }) - - // Validation should still work - valid, _, err := record.Validate(context.Background()) - if err != nil { - t.Fatalf("Validate() error = %v", err) - } - - assert.True(t, valid) -} - -func TestRecord_SetDisableAPIValidation(t *testing.T) { - // Test that SetDisableAPIValidation changes the package-level variable - // Note: This test modifies global state, so we should be careful about test isolation - - // Save original state - originalDisable := false - defer corev1.SetDisableAPIValidation(originalDisable) // Restore after test - - tests := []struct { - name string - disableAPI bool - wantDisableAPI bool - }{ - { - name: "disable API validation", - disableAPI: true, - wantDisableAPI: true, - }, - { - name: "enable API validation", - disableAPI: false, - wantDisableAPI: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // This should not panic or error - corev1.SetDisableAPIValidation(tt.disableAPI) - - // Verify that calling SetDisableAPIValidation doesn't panic and that validation still works - testRecordWithValidation(t) - }) - } -} - -func TestRecord_SetStrictValidation(t *testing.T) { - // Test that SetStrictValidation changes the package-level variable - // Note: This test modifies global state, so we should be careful about test isolation - - // Save original state - originalStrict := true - defer corev1.SetStrictValidation(originalStrict) // Restore after test - - tests := []struct { - name string - strict bool - wantStrict bool - }{ - { - name: "enable strict validation", - strict: true, - wantStrict: true, - }, - { - name: "disable strict validation (lax mode)", - strict: false, - wantStrict: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // This should not panic or error - corev1.SetStrictValidation(tt.strict) - - // Verify that calling SetStrictValidation doesn't panic and that validation still works - testRecordWithValidation(t) - }) - } -} - -func TestRecord_Validate_RecordSize(t *testing.T) { - // Test that Validate checks record size - // Create a record that exceeds max size - // Note: This is difficult to test without creating a very large record, - // but we can test the nil and empty record cases which are part of the validation logic - tests := []struct { - name string - record *corev1.Record - wantValid bool - }{ - { - name: "nil record", - record: nil, - wantValid: false, - }, - { - name: "record with nil data", - record: &corev1.Record{}, - wantValid: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - valid, errors, err := tt.record.Validate(context.Background()) - if err != nil { - if tt.wantValid { - t.Errorf("Validate() unexpected error: %v", err) - } - - return - } - - if valid != tt.wantValid { - t.Errorf("Validate() got valid = %v, errors = %v, want %v", valid, errors, tt.wantValid) - } - - if !valid && len(errors) == 0 { - t.Errorf("Validate() expected errors for invalid record, got none") - } - }) - } -} - -func TestRecord_Decode(t *testing.T) { - tests := []struct { - name string - record *corev1.Record - wantResp interface{} - wantFail bool - }{ - { - name: "valid v0.3.1 record", - record: corev1.New(&oasfv1alpha0.Record{ - Name: "valid-agent-v2", - SchemaVersion: "v0.3.1", - Description: "A valid agent record", - Version: "1.0.0", - CreatedAt: "2024-01-01T00:00:00Z", - }), - wantResp: &oasfv1alpha0.Record{ - Name: "valid-agent-v2", - SchemaVersion: "v0.3.1", - Description: "A valid agent record", - Version: "1.0.0", - CreatedAt: "2024-01-01T00:00:00Z", - }, - }, - { - name: "valid 0.7.0 record", - record: corev1.New(&oasfv1alpha1.Record{ - Name: "valid-agent-v2", - SchemaVersion: "0.7.0", - Description: "A valid agent record", - Version: "1.0.0", - CreatedAt: "2024-01-01T00:00:00Z", - }), - wantResp: &oasfv1alpha1.Record{ - Name: "valid-agent-v2", - SchemaVersion: "0.7.0", - Description: "A valid agent record", - Version: "1.0.0", - CreatedAt: "2024-01-01T00:00:00Z", - }, - }, - { - name: "nil record", - record: nil, - wantFail: true, - }, - { - name: "empty record", - record: &corev1.Record{}, - wantFail: true, - }, - { - name: "record with invalid generic data", - record: &corev1.Record{ - Data: &structpb.Struct{ - Fields: map[string]*structpb.Value{ - "invalid_field": { - Kind: &structpb.Value_StringValue{StringValue: "some value"}, - }, - }, - }, - }, - wantFail: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := tt.record.Decode() - if err != nil { - if !tt.wantFail { - t.Errorf("Decode() unexpected error: %v", err) - } - - return - } - - if got == nil { - t.Errorf("Decode() got nil record, want %v", tt.wantResp) - - return - } - - if !assert.EqualValues(t, tt.wantResp, got.GetRecord()) { - t.Errorf("Decode() got %v, want %v", got, tt.wantResp) - } - }) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package v1_test + +import ( + "context" + "testing" + + oasfv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" + oasfv1alpha1 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha1" + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/types/known/structpb" +) + +func TestRecord_GetCid(t *testing.T) { + tests := []struct { + name string + record *corev1.Record + want string + wantErr bool + }{ + { + name: "v0.3.1 agent record", + record: corev1.New(&oasfv1alpha0.Record{ + Name: "test-agent", + SchemaVersion: "v0.3.1", + Description: "A test agent", + }), + wantErr: false, + }, + { + name: "v0.5.0 record", + record: corev1.New(&oasfv1alpha1.Record{ + Name: "test-agent-v2", + SchemaVersion: "v0.5.0", + Description: "A test agent in v0.5.0 record", + Version: "1.0.0", + Modules: []*oasfv1alpha1.Module{ + { + Name: "test-extension", + }, + }, + }), + wantErr: false, + }, + { + name: "nil record", + record: nil, + wantErr: true, + }, + { + name: "empty record", + record: &corev1.Record{}, + wantErr: true, // Empty record should fail - no OASF data to marshal + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cid := tt.record.GetCid() + + if tt.wantErr { + assert.Empty(t, cid) + + return + } + + assert.NotEmpty(t, cid) + + // CID should be consistent - calling it again should return the same value. + cid2 := tt.record.GetCid() + assert.Equal(t, cid, cid2, "CID should be deterministic") + + // CID should start with the CIDv1 prefix. + assert.Greater(t, len(cid), 10, "CID should be a reasonable length") + }) + } +} + +func TestRecord_GetCid_Consistency(t *testing.T) { + // Create two identical v0.3.1 records. + record1 := corev1.New(&oasfv1alpha1.Record{ + Name: "test-agent", + SchemaVersion: "0.7.0", + Description: "A test agent", + }) + + record2 := corev1.New(&oasfv1alpha1.Record{ + Name: "test-agent", + SchemaVersion: "0.7.0", + Description: "A test agent", + }) + + // Both records should have the same CID. + cid1 := record1.GetCid() + cid2 := record2.GetCid() + + assert.Equal(t, cid1, cid2, "Identical v0.3.1 records should have identical CIDs") +} + +func TestRecord_GetCid_CrossVersion_Difference(t *testing.T) { + // Create two different records + record1 := corev1.New(&oasfv1alpha0.Record{ + Name: "test-agent", + SchemaVersion: "0.3.1", + Description: "A test agent", + }) + + record2 := corev1.New(&oasfv1alpha1.Record{ + Name: "test-agent", + SchemaVersion: "0.7.0", + Description: "A test agent", + }) + + // Both records should have the same CID. + cid1 := record1.GetCid() + cid2 := record2.GetCid() + + assert.NotEqual(t, cid1, cid2, "Different record versions should have different CIDs") +} + +func TestRecord_Validate(t *testing.T) { + tests := []struct { + name string + record *corev1.Record + wantValid bool + }{ + { + name: "valid 0.7.0 record", + record: corev1.New(&oasfv1alpha1.Record{ + Name: "valid-agent-v2", + SchemaVersion: "0.7.0", + Description: "A valid agent record", + Version: "1.0.0", + CreatedAt: "2024-01-01T00:00:00Z", + Authors: []string{ + "Jane Doe ", + }, + Locators: []*oasfv1alpha1.Locator{ + { + Type: "helm_chart", + Url: "https://example.com/helm-chart.tgz", + }, + }, + Skills: []*oasfv1alpha1.Skill{ + { + Name: "natural_language_processing/natural_language_understanding", + }, + }, + }), + wantValid: true, + }, + { + name: "invalid 0.7.0 record (missing required fields)", + record: corev1.New(&oasfv1alpha1.Record{ + Name: "invalid-agent-v2", + SchemaVersion: "v0.5.0", + Description: "An invalid agent record in v0.5.0 format", + Version: "1.0.0", + }), + wantValid: false, + }, + { + name: "nil record", + record: nil, + wantValid: false, + }, + { + name: "empty record", + record: &corev1.Record{}, + wantValid: false, + }, + { + name: "record with invalid generic data", + record: &corev1.Record{ + Data: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "invalid_field": { + Kind: &structpb.Value_StringValue{StringValue: "some value"}, + }, + }, + }, + }, + wantValid: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + valid, errors, err := tt.record.Validate(context.Background()) + if err != nil { + if tt.wantValid { + t.Errorf("Validate() unexpected error: %v", err) + } + + return + } + + if valid != tt.wantValid { + t.Errorf("Validate() got valid = %v, errors = %v, want %v", valid, errors, tt.wantValid) + } + + if !valid && len(errors) == 0 { + t.Errorf("Validate() expected errors for invalid record, got none") + } + }) + } +} + +func TestRecord_SetSchemaURL(t *testing.T) { + // Test that SetSchemaURL changes the package-level variable + // Note: This test modifies global state, so we should be careful about test isolation + + // Save original state + originalURL := "" + defer corev1.SetSchemaURL(originalURL) // Restore after test + + tests := []struct { + name string + schemaURL string + wantSet bool + }{ + { + name: "set valid schema URL", + schemaURL: "https://schema.oasf.outshift.com", + wantSet: true, + }, + { + name: "set empty schema URL (disable API validator)", + schemaURL: "", + wantSet: true, + }, + { + name: "set custom schema URL", + schemaURL: "https://custom.schema.url", + wantSet: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // This should not panic or error + corev1.SetSchemaURL(tt.schemaURL) + + // We can't directly verify the internal state, but we can verify + // that calling SetSchemaURL doesn't panic and that validation + // still works afterwards (when using a valid URL or empty URL) + record := corev1.New(&oasfv1alpha1.Record{ + Name: "test-agent", + SchemaVersion: "0.7.0", + Description: "A test agent", + Version: "1.0.0", + CreatedAt: "2024-01-01T00:00:00Z", + Authors: []string{ + "Jane Doe ", + }, + Locators: []*oasfv1alpha1.Locator{ + { + Type: "helm_chart", + Url: "https://example.com/helm-chart.tgz", + }, + }, + Skills: []*oasfv1alpha1.Skill{ + { + Name: "natural_language_processing/natural_language_understanding", + }, + }, + }) + + // Validation should work for valid URLs or empty URL (embedded validation) + // For invalid URLs, we expect a network error which is acceptable for this test + valid, _, err := record.Validate(context.Background()) + if err != nil { + // If it's a network error (like "no such host"), that's expected for invalid URLs + // and we just verify that SetSchemaURL didn't panic + if tt.schemaURL != "" && tt.schemaURL != "https://schema.oasf.outshift.com" { + // For custom/invalid URLs, network errors are expected + return + } + + t.Fatalf("Validate() error = %v", err) + } + + assert.True(t, valid) + }) + } +} + +// testRecordWithValidation is a helper function to create a test record and validate it. +// This reduces code duplication between similar tests. +func testRecordWithValidation(t *testing.T) { + t.Helper() + + record := corev1.New(&oasfv1alpha1.Record{ + Name: "test-agent", + SchemaVersion: "0.7.0", + Description: "A test agent", + Version: "1.0.0", + CreatedAt: "2024-01-01T00:00:00Z", + Authors: []string{ + "Jane Doe ", + }, + Locators: []*oasfv1alpha1.Locator{ + { + Type: "helm_chart", + Url: "https://example.com/helm-chart.tgz", + }, + }, + Skills: []*oasfv1alpha1.Skill{ + { + Name: "natural_language_processing/natural_language_understanding", + }, + }, + Modules: []*oasfv1alpha1.Module{ + { + Name: "test-extension", + }, + }, + }) + + // Validation should still work + valid, _, err := record.Validate(context.Background()) + if err != nil { + t.Fatalf("Validate() error = %v", err) + } + + assert.True(t, valid) +} + +func TestRecord_SetDisableAPIValidation(t *testing.T) { + // Test that SetDisableAPIValidation changes the package-level variable + // Note: This test modifies global state, so we should be careful about test isolation + + // Save original state + originalDisable := false + defer corev1.SetDisableAPIValidation(originalDisable) // Restore after test + + tests := []struct { + name string + disableAPI bool + wantDisableAPI bool + }{ + { + name: "disable API validation", + disableAPI: true, + wantDisableAPI: true, + }, + { + name: "enable API validation", + disableAPI: false, + wantDisableAPI: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // This should not panic or error + corev1.SetDisableAPIValidation(tt.disableAPI) + + // Verify that calling SetDisableAPIValidation doesn't panic and that validation still works + testRecordWithValidation(t) + }) + } +} + +func TestRecord_SetStrictValidation(t *testing.T) { + // Test that SetStrictValidation changes the package-level variable + // Note: This test modifies global state, so we should be careful about test isolation + + // Save original state + originalStrict := true + defer corev1.SetStrictValidation(originalStrict) // Restore after test + + tests := []struct { + name string + strict bool + wantStrict bool + }{ + { + name: "enable strict validation", + strict: true, + wantStrict: true, + }, + { + name: "disable strict validation (lax mode)", + strict: false, + wantStrict: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // This should not panic or error + corev1.SetStrictValidation(tt.strict) + + // Verify that calling SetStrictValidation doesn't panic and that validation still works + testRecordWithValidation(t) + }) + } +} + +func TestRecord_Validate_RecordSize(t *testing.T) { + // Test that Validate checks record size + // Create a record that exceeds max size + // Note: This is difficult to test without creating a very large record, + // but we can test the nil and empty record cases which are part of the validation logic + tests := []struct { + name string + record *corev1.Record + wantValid bool + }{ + { + name: "nil record", + record: nil, + wantValid: false, + }, + { + name: "record with nil data", + record: &corev1.Record{}, + wantValid: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + valid, errors, err := tt.record.Validate(context.Background()) + if err != nil { + if tt.wantValid { + t.Errorf("Validate() unexpected error: %v", err) + } + + return + } + + if valid != tt.wantValid { + t.Errorf("Validate() got valid = %v, errors = %v, want %v", valid, errors, tt.wantValid) + } + + if !valid && len(errors) == 0 { + t.Errorf("Validate() expected errors for invalid record, got none") + } + }) + } +} + +func TestRecord_Decode(t *testing.T) { + tests := []struct { + name string + record *corev1.Record + wantResp interface{} + wantFail bool + }{ + { + name: "valid v0.3.1 record", + record: corev1.New(&oasfv1alpha0.Record{ + Name: "valid-agent-v2", + SchemaVersion: "v0.3.1", + Description: "A valid agent record", + Version: "1.0.0", + CreatedAt: "2024-01-01T00:00:00Z", + }), + wantResp: &oasfv1alpha0.Record{ + Name: "valid-agent-v2", + SchemaVersion: "v0.3.1", + Description: "A valid agent record", + Version: "1.0.0", + CreatedAt: "2024-01-01T00:00:00Z", + }, + }, + { + name: "valid 0.7.0 record", + record: corev1.New(&oasfv1alpha1.Record{ + Name: "valid-agent-v2", + SchemaVersion: "0.7.0", + Description: "A valid agent record", + Version: "1.0.0", + CreatedAt: "2024-01-01T00:00:00Z", + }), + wantResp: &oasfv1alpha1.Record{ + Name: "valid-agent-v2", + SchemaVersion: "0.7.0", + Description: "A valid agent record", + Version: "1.0.0", + CreatedAt: "2024-01-01T00:00:00Z", + }, + }, + { + name: "nil record", + record: nil, + wantFail: true, + }, + { + name: "empty record", + record: &corev1.Record{}, + wantFail: true, + }, + { + name: "record with invalid generic data", + record: &corev1.Record{ + Data: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "invalid_field": { + Kind: &structpb.Value_StringValue{StringValue: "some value"}, + }, + }, + }, + }, + wantFail: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.record.Decode() + if err != nil { + if !tt.wantFail { + t.Errorf("Decode() unexpected error: %v", err) + } + + return + } + + if got == nil { + t.Errorf("Decode() got nil record, want %v", tt.wantResp) + + return + } + + if !assert.EqualValues(t, tt.wantResp, got.GetRecord()) { + t.Errorf("Decode() got %v, want %v", got, tt.wantResp) + } + }) + } +} diff --git a/api/core/v1/referrer.go b/api/core/v1/referrer.go index 053630e92..8a7aa38dc 100644 --- a/api/core/v1/referrer.go +++ b/api/core/v1/referrer.go @@ -1,20 +1,20 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package v1 - -// ReferrerObject defines an interface for referrer objects that can be -// marshaled and unmarshaled to/from RecordReferrer format. -type ReferrerObject interface { - // UnmarshalReferrer loads the object from a RecordReferrer. - UnmarshalReferrer(*RecordReferrer) error - - // MarshalReferrer exports the object into a RecordReferrer. - MarshalReferrer() (*RecordReferrer, error) - - // ReferrerType returns the type of the referrer. - // Examples: - // - Signature: "agntcy.dir.sign.v1.Signature" - // - PublicKey: "agntcy.dir.sign.v1.PublicKey" - ReferrerType() string -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package v1 + +// ReferrerObject defines an interface for referrer objects that can be +// marshaled and unmarshaled to/from RecordReferrer format. +type ReferrerObject interface { + // UnmarshalReferrer loads the object from a RecordReferrer. + UnmarshalReferrer(*RecordReferrer) error + + // MarshalReferrer exports the object into a RecordReferrer. + MarshalReferrer() (*RecordReferrer, error) + + // ReferrerType returns the type of the referrer. + // Examples: + // - Signature: "agntcy.dir.sign.v1.Signature" + // - PublicKey: "agntcy.dir.sign.v1.PublicKey" + ReferrerType() string +} diff --git a/api/core/v1/referrer_types.go b/api/core/v1/referrer_types.go index 047d6c5aa..ac395ccdb 100644 --- a/api/core/v1/referrer_types.go +++ b/api/core/v1/referrer_types.go @@ -1,13 +1,13 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package v1 - -// Referrer type constants for the high-level Dir Store API. -const ( - // PublicKeyReferrerType is the type for PublicKey referrers. - PublicKeyReferrerType = "agntcy.dir.sign.v1.PublicKey" - - // SignatureReferrerType is the type for Signature referrers. - SignatureReferrerType = "agntcy.dir.sign.v1.Signature" -) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package v1 + +// Referrer type constants for the high-level Dir Store API. +const ( + // PublicKeyReferrerType is the type for PublicKey referrers. + PublicKeyReferrerType = "agntcy.dir.sign.v1.PublicKey" + + // SignatureReferrerType is the type for Signature referrers. + SignatureReferrerType = "agntcy.dir.sign.v1.Signature" +) diff --git a/api/events/v1/event_service.pb.go b/api/events/v1/event_service.pb.go index d2a6a84ee..25d628994 100644 --- a/api/events/v1/event_service.pb.go +++ b/api/events/v1/event_service.pb.go @@ -1,468 +1,468 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.5 -// protoc (unknown) -// source: agntcy/dir/events/v1/event_service.proto - -package v1 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// EventType represents all valid event types in the system. -// Each value represents a specific operation that can occur. -// -// Supported Events: -// - Store: RECORD_PUSHED, RECORD_PULLED, RECORD_DELETED -// - Routing: RECORD_PUBLISHED, RECORD_UNPUBLISHED -// - Sync: SYNC_CREATED, SYNC_COMPLETED, SYNC_FAILED -// - Sign: RECORD_SIGNED -type EventType int32 - -const ( - // Unknown/unspecified event type. - EventType_EVENT_TYPE_UNSPECIFIED EventType = 0 - // A record was pushed to local storage. - EventType_EVENT_TYPE_RECORD_PUSHED EventType = 1 - // A record was pulled from storage. - EventType_EVENT_TYPE_RECORD_PULLED EventType = 2 - // A record was deleted from storage. - EventType_EVENT_TYPE_RECORD_DELETED EventType = 3 - // A record was published/announced to the network. - EventType_EVENT_TYPE_RECORD_PUBLISHED EventType = 4 - // A record was unpublished from the network. - EventType_EVENT_TYPE_RECORD_UNPUBLISHED EventType = 5 - // A sync operation was created/initiated. - EventType_EVENT_TYPE_SYNC_CREATED EventType = 6 - // A sync operation completed successfully. - EventType_EVENT_TYPE_SYNC_COMPLETED EventType = 7 - // A sync operation failed. - EventType_EVENT_TYPE_SYNC_FAILED EventType = 8 - // A record was signed. - EventType_EVENT_TYPE_RECORD_SIGNED EventType = 9 -) - -// Enum value maps for EventType. -var ( - EventType_name = map[int32]string{ - 0: "EVENT_TYPE_UNSPECIFIED", - 1: "EVENT_TYPE_RECORD_PUSHED", - 2: "EVENT_TYPE_RECORD_PULLED", - 3: "EVENT_TYPE_RECORD_DELETED", - 4: "EVENT_TYPE_RECORD_PUBLISHED", - 5: "EVENT_TYPE_RECORD_UNPUBLISHED", - 6: "EVENT_TYPE_SYNC_CREATED", - 7: "EVENT_TYPE_SYNC_COMPLETED", - 8: "EVENT_TYPE_SYNC_FAILED", - 9: "EVENT_TYPE_RECORD_SIGNED", - } - EventType_value = map[string]int32{ - "EVENT_TYPE_UNSPECIFIED": 0, - "EVENT_TYPE_RECORD_PUSHED": 1, - "EVENT_TYPE_RECORD_PULLED": 2, - "EVENT_TYPE_RECORD_DELETED": 3, - "EVENT_TYPE_RECORD_PUBLISHED": 4, - "EVENT_TYPE_RECORD_UNPUBLISHED": 5, - "EVENT_TYPE_SYNC_CREATED": 6, - "EVENT_TYPE_SYNC_COMPLETED": 7, - "EVENT_TYPE_SYNC_FAILED": 8, - "EVENT_TYPE_RECORD_SIGNED": 9, - } -) - -func (x EventType) Enum() *EventType { - p := new(EventType) - *p = x - return p -} - -func (x EventType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (EventType) Descriptor() protoreflect.EnumDescriptor { - return file_agntcy_dir_events_v1_event_service_proto_enumTypes[0].Descriptor() -} - -func (EventType) Type() protoreflect.EnumType { - return &file_agntcy_dir_events_v1_event_service_proto_enumTypes[0] -} - -func (x EventType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use EventType.Descriptor instead. -func (EventType) EnumDescriptor() ([]byte, []int) { - return file_agntcy_dir_events_v1_event_service_proto_rawDescGZIP(), []int{0} -} - -// ListenRequest specifies filters for event subscription. -type ListenRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Event types to subscribe to. - // If empty, subscribes to all event types. - EventTypes []EventType `protobuf:"varint,1,rep,packed,name=event_types,json=eventTypes,proto3,enum=agntcy.dir.events.v1.EventType" json:"event_types,omitempty"` - // Optional label filters (e.g., "/skills/AI", "/domains/research"). - // Only events for records matching these labels are delivered. - // Uses substring matching. - LabelFilters []string `protobuf:"bytes,2,rep,name=label_filters,json=labelFilters,proto3" json:"label_filters,omitempty"` - // Optional CID filters. - // Only events for specific CIDs are delivered. - CidFilters []string `protobuf:"bytes,3,rep,name=cid_filters,json=cidFilters,proto3" json:"cid_filters,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListenRequest) Reset() { - *x = ListenRequest{} - mi := &file_agntcy_dir_events_v1_event_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListenRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListenRequest) ProtoMessage() {} - -func (x *ListenRequest) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_events_v1_event_service_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListenRequest.ProtoReflect.Descriptor instead. -func (*ListenRequest) Descriptor() ([]byte, []int) { - return file_agntcy_dir_events_v1_event_service_proto_rawDescGZIP(), []int{0} -} - -func (x *ListenRequest) GetEventTypes() []EventType { - if x != nil { - return x.EventTypes - } - return nil -} - -func (x *ListenRequest) GetLabelFilters() []string { - if x != nil { - return x.LabelFilters - } - return nil -} - -func (x *ListenRequest) GetCidFilters() []string { - if x != nil { - return x.CidFilters - } - return nil -} - -// ListenResponse is the response message for the Listen RPC. -// Wraps the Event message to allow for future extensions without breaking the Event structure. -type ListenResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // The event that occurred. - Event *Event `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListenResponse) Reset() { - *x = ListenResponse{} - mi := &file_agntcy_dir_events_v1_event_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListenResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListenResponse) ProtoMessage() {} - -func (x *ListenResponse) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_events_v1_event_service_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListenResponse.ProtoReflect.Descriptor instead. -func (*ListenResponse) Descriptor() ([]byte, []int) { - return file_agntcy_dir_events_v1_event_service_proto_rawDescGZIP(), []int{1} -} - -func (x *ListenResponse) GetEvent() *Event { - if x != nil { - return x.Event - } - return nil -} - -// Event represents a system event that occurred. -type Event struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Unique event identifier (generated by the system). - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // Type of event that occurred. - Type EventType `protobuf:"varint,2,opt,name=type,proto3,enum=agntcy.dir.events.v1.EventType" json:"type,omitempty"` - // When the event occurred. - Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // Resource identifier (CID for records, sync_id for syncs, etc.). - ResourceId string `protobuf:"bytes,4,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` - // Optional labels associated with the record (for record events). - Labels []string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty"` - // Optional metadata for additional context. - // Used for flexible event-specific data that doesn't fit standard fields. - Metadata map[string]string `protobuf:"bytes,7,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *Event) Reset() { - *x = Event{} - mi := &file_agntcy_dir_events_v1_event_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Event) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Event) ProtoMessage() {} - -func (x *Event) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_events_v1_event_service_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Event.ProtoReflect.Descriptor instead. -func (*Event) Descriptor() ([]byte, []int) { - return file_agntcy_dir_events_v1_event_service_proto_rawDescGZIP(), []int{2} -} - -func (x *Event) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *Event) GetType() EventType { - if x != nil { - return x.Type - } - return EventType_EVENT_TYPE_UNSPECIFIED -} - -func (x *Event) GetTimestamp() *timestamppb.Timestamp { - if x != nil { - return x.Timestamp - } - return nil -} - -func (x *Event) GetResourceId() string { - if x != nil { - return x.ResourceId - } - return "" -} - -func (x *Event) GetLabels() []string { - if x != nil { - return x.Labels - } - return nil -} - -func (x *Event) GetMetadata() map[string]string { - if x != nil { - return x.Metadata - } - return nil -} - -var File_agntcy_dir_events_v1_event_service_proto protoreflect.FileDescriptor - -var file_agntcy_dir_events_v1_event_service_proto_rawDesc = string([]byte{ - 0x0a, 0x28, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x61, 0x67, 0x6e, 0x74, - 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, - 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0x97, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, - 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x61, - 0x62, 0x65, 0x6c, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x69, - 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0a, 0x63, 0x69, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0x43, 0x0a, 0x0e, 0x4c, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, - 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x61, - 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x22, 0xc3, 0x02, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x33, 0x0a, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, - 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x73, 0x12, 0x45, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, - 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0xbc, 0x02, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, - 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x50, 0x55, 0x53, 0x48, 0x45, 0x44, 0x10, 0x01, 0x12, 0x1c, - 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x43, - 0x4f, 0x52, 0x44, 0x5f, 0x50, 0x55, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, - 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x52, - 0x44, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x1f, 0x0a, 0x1b, 0x45, - 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, - 0x5f, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x45, 0x44, 0x10, 0x04, 0x12, 0x21, 0x0a, 0x1d, - 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x52, - 0x44, 0x5f, 0x55, 0x4e, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x45, 0x44, 0x10, 0x05, 0x12, - 0x1b, 0x0a, 0x17, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x59, - 0x4e, 0x43, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x06, 0x12, 0x1d, 0x0a, 0x19, - 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, - 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x07, 0x12, 0x1a, 0x0a, 0x16, 0x45, - 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x46, - 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x45, 0x44, 0x10, 0x09, 0x32, 0x65, 0x0a, 0x0c, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x55, 0x0a, 0x06, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x12, - 0x23, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, - 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0xc5, 0x01, 0x0a, - 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x23, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, - 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, 0x45, 0xaa, 0x02, 0x14, 0x41, 0x67, 0x6e, 0x74, - 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x56, 0x31, - 0xca, 0x02, 0x14, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x73, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x20, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, - 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5c, 0x56, 0x31, 0x5c, 0x47, - 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x17, 0x41, 0x67, 0x6e, - 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_agntcy_dir_events_v1_event_service_proto_rawDescOnce sync.Once - file_agntcy_dir_events_v1_event_service_proto_rawDescData []byte -) - -func file_agntcy_dir_events_v1_event_service_proto_rawDescGZIP() []byte { - file_agntcy_dir_events_v1_event_service_proto_rawDescOnce.Do(func() { - file_agntcy_dir_events_v1_event_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_events_v1_event_service_proto_rawDesc), len(file_agntcy_dir_events_v1_event_service_proto_rawDesc))) - }) - return file_agntcy_dir_events_v1_event_service_proto_rawDescData -} - -var file_agntcy_dir_events_v1_event_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_agntcy_dir_events_v1_event_service_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_agntcy_dir_events_v1_event_service_proto_goTypes = []any{ - (EventType)(0), // 0: agntcy.dir.events.v1.EventType - (*ListenRequest)(nil), // 1: agntcy.dir.events.v1.ListenRequest - (*ListenResponse)(nil), // 2: agntcy.dir.events.v1.ListenResponse - (*Event)(nil), // 3: agntcy.dir.events.v1.Event - nil, // 4: agntcy.dir.events.v1.Event.MetadataEntry - (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp -} -var file_agntcy_dir_events_v1_event_service_proto_depIdxs = []int32{ - 0, // 0: agntcy.dir.events.v1.ListenRequest.event_types:type_name -> agntcy.dir.events.v1.EventType - 3, // 1: agntcy.dir.events.v1.ListenResponse.event:type_name -> agntcy.dir.events.v1.Event - 0, // 2: agntcy.dir.events.v1.Event.type:type_name -> agntcy.dir.events.v1.EventType - 5, // 3: agntcy.dir.events.v1.Event.timestamp:type_name -> google.protobuf.Timestamp - 4, // 4: agntcy.dir.events.v1.Event.metadata:type_name -> agntcy.dir.events.v1.Event.MetadataEntry - 1, // 5: agntcy.dir.events.v1.EventService.Listen:input_type -> agntcy.dir.events.v1.ListenRequest - 2, // 6: agntcy.dir.events.v1.EventService.Listen:output_type -> agntcy.dir.events.v1.ListenResponse - 6, // [6:7] is the sub-list for method output_type - 5, // [5:6] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name -} - -func init() { file_agntcy_dir_events_v1_event_service_proto_init() } -func file_agntcy_dir_events_v1_event_service_proto_init() { - if File_agntcy_dir_events_v1_event_service_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_events_v1_event_service_proto_rawDesc), len(file_agntcy_dir_events_v1_event_service_proto_rawDesc)), - NumEnums: 1, - NumMessages: 4, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_agntcy_dir_events_v1_event_service_proto_goTypes, - DependencyIndexes: file_agntcy_dir_events_v1_event_service_proto_depIdxs, - EnumInfos: file_agntcy_dir_events_v1_event_service_proto_enumTypes, - MessageInfos: file_agntcy_dir_events_v1_event_service_proto_msgTypes, - }.Build() - File_agntcy_dir_events_v1_event_service_proto = out.File - file_agntcy_dir_events_v1_event_service_proto_goTypes = nil - file_agntcy_dir_events_v1_event_service_proto_depIdxs = nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc (unknown) +// source: agntcy/dir/events/v1/event_service.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// EventType represents all valid event types in the system. +// Each value represents a specific operation that can occur. +// +// Supported Events: +// - Store: RECORD_PUSHED, RECORD_PULLED, RECORD_DELETED +// - Routing: RECORD_PUBLISHED, RECORD_UNPUBLISHED +// - Sync: SYNC_CREATED, SYNC_COMPLETED, SYNC_FAILED +// - Sign: RECORD_SIGNED +type EventType int32 + +const ( + // Unknown/unspecified event type. + EventType_EVENT_TYPE_UNSPECIFIED EventType = 0 + // A record was pushed to local storage. + EventType_EVENT_TYPE_RECORD_PUSHED EventType = 1 + // A record was pulled from storage. + EventType_EVENT_TYPE_RECORD_PULLED EventType = 2 + // A record was deleted from storage. + EventType_EVENT_TYPE_RECORD_DELETED EventType = 3 + // A record was published/announced to the network. + EventType_EVENT_TYPE_RECORD_PUBLISHED EventType = 4 + // A record was unpublished from the network. + EventType_EVENT_TYPE_RECORD_UNPUBLISHED EventType = 5 + // A sync operation was created/initiated. + EventType_EVENT_TYPE_SYNC_CREATED EventType = 6 + // A sync operation completed successfully. + EventType_EVENT_TYPE_SYNC_COMPLETED EventType = 7 + // A sync operation failed. + EventType_EVENT_TYPE_SYNC_FAILED EventType = 8 + // A record was signed. + EventType_EVENT_TYPE_RECORD_SIGNED EventType = 9 +) + +// Enum value maps for EventType. +var ( + EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNSPECIFIED", + 1: "EVENT_TYPE_RECORD_PUSHED", + 2: "EVENT_TYPE_RECORD_PULLED", + 3: "EVENT_TYPE_RECORD_DELETED", + 4: "EVENT_TYPE_RECORD_PUBLISHED", + 5: "EVENT_TYPE_RECORD_UNPUBLISHED", + 6: "EVENT_TYPE_SYNC_CREATED", + 7: "EVENT_TYPE_SYNC_COMPLETED", + 8: "EVENT_TYPE_SYNC_FAILED", + 9: "EVENT_TYPE_RECORD_SIGNED", + } + EventType_value = map[string]int32{ + "EVENT_TYPE_UNSPECIFIED": 0, + "EVENT_TYPE_RECORD_PUSHED": 1, + "EVENT_TYPE_RECORD_PULLED": 2, + "EVENT_TYPE_RECORD_DELETED": 3, + "EVENT_TYPE_RECORD_PUBLISHED": 4, + "EVENT_TYPE_RECORD_UNPUBLISHED": 5, + "EVENT_TYPE_SYNC_CREATED": 6, + "EVENT_TYPE_SYNC_COMPLETED": 7, + "EVENT_TYPE_SYNC_FAILED": 8, + "EVENT_TYPE_RECORD_SIGNED": 9, + } +) + +func (x EventType) Enum() *EventType { + p := new(EventType) + *p = x + return p +} + +func (x EventType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (EventType) Descriptor() protoreflect.EnumDescriptor { + return file_agntcy_dir_events_v1_event_service_proto_enumTypes[0].Descriptor() +} + +func (EventType) Type() protoreflect.EnumType { + return &file_agntcy_dir_events_v1_event_service_proto_enumTypes[0] +} + +func (x EventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use EventType.Descriptor instead. +func (EventType) EnumDescriptor() ([]byte, []int) { + return file_agntcy_dir_events_v1_event_service_proto_rawDescGZIP(), []int{0} +} + +// ListenRequest specifies filters for event subscription. +type ListenRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Event types to subscribe to. + // If empty, subscribes to all event types. + EventTypes []EventType `protobuf:"varint,1,rep,packed,name=event_types,json=eventTypes,proto3,enum=agntcy.dir.events.v1.EventType" json:"event_types,omitempty"` + // Optional label filters (e.g., "/skills/AI", "/domains/research"). + // Only events for records matching these labels are delivered. + // Uses substring matching. + LabelFilters []string `protobuf:"bytes,2,rep,name=label_filters,json=labelFilters,proto3" json:"label_filters,omitempty"` + // Optional CID filters. + // Only events for specific CIDs are delivered. + CidFilters []string `protobuf:"bytes,3,rep,name=cid_filters,json=cidFilters,proto3" json:"cid_filters,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListenRequest) Reset() { + *x = ListenRequest{} + mi := &file_agntcy_dir_events_v1_event_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListenRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenRequest) ProtoMessage() {} + +func (x *ListenRequest) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_events_v1_event_service_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenRequest.ProtoReflect.Descriptor instead. +func (*ListenRequest) Descriptor() ([]byte, []int) { + return file_agntcy_dir_events_v1_event_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ListenRequest) GetEventTypes() []EventType { + if x != nil { + return x.EventTypes + } + return nil +} + +func (x *ListenRequest) GetLabelFilters() []string { + if x != nil { + return x.LabelFilters + } + return nil +} + +func (x *ListenRequest) GetCidFilters() []string { + if x != nil { + return x.CidFilters + } + return nil +} + +// ListenResponse is the response message for the Listen RPC. +// Wraps the Event message to allow for future extensions without breaking the Event structure. +type ListenResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The event that occurred. + Event *Event `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListenResponse) Reset() { + *x = ListenResponse{} + mi := &file_agntcy_dir_events_v1_event_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListenResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListenResponse) ProtoMessage() {} + +func (x *ListenResponse) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_events_v1_event_service_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListenResponse.ProtoReflect.Descriptor instead. +func (*ListenResponse) Descriptor() ([]byte, []int) { + return file_agntcy_dir_events_v1_event_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListenResponse) GetEvent() *Event { + if x != nil { + return x.Event + } + return nil +} + +// Event represents a system event that occurred. +type Event struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Unique event identifier (generated by the system). + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Type of event that occurred. + Type EventType `protobuf:"varint,2,opt,name=type,proto3,enum=agntcy.dir.events.v1.EventType" json:"type,omitempty"` + // When the event occurred. + Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Resource identifier (CID for records, sync_id for syncs, etc.). + ResourceId string `protobuf:"bytes,4,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` + // Optional labels associated with the record (for record events). + Labels []string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty"` + // Optional metadata for additional context. + // Used for flexible event-specific data that doesn't fit standard fields. + Metadata map[string]string `protobuf:"bytes,7,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Event) Reset() { + *x = Event{} + mi := &file_agntcy_dir_events_v1_event_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Event) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Event) ProtoMessage() {} + +func (x *Event) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_events_v1_event_service_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Event.ProtoReflect.Descriptor instead. +func (*Event) Descriptor() ([]byte, []int) { + return file_agntcy_dir_events_v1_event_service_proto_rawDescGZIP(), []int{2} +} + +func (x *Event) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Event) GetType() EventType { + if x != nil { + return x.Type + } + return EventType_EVENT_TYPE_UNSPECIFIED +} + +func (x *Event) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *Event) GetResourceId() string { + if x != nil { + return x.ResourceId + } + return "" +} + +func (x *Event) GetLabels() []string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *Event) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +var File_agntcy_dir_events_v1_event_service_proto protoreflect.FileDescriptor + +var file_agntcy_dir_events_v1_event_service_proto_rawDesc = string([]byte{ + 0x0a, 0x28, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x61, 0x67, 0x6e, 0x74, + 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, + 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x97, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, + 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x69, + 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0a, 0x63, 0x69, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0x43, 0x0a, 0x0e, 0x4c, + 0x69, 0x73, 0x74, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, + 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x61, + 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x22, 0xc3, 0x02, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x33, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, + 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x12, 0x45, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, + 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0xbc, 0x02, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, + 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x50, 0x55, 0x53, 0x48, 0x45, 0x44, 0x10, 0x01, 0x12, 0x1c, + 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x43, + 0x4f, 0x52, 0x44, 0x5f, 0x50, 0x55, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x52, + 0x44, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x1f, 0x0a, 0x1b, 0x45, + 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, + 0x5f, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x45, 0x44, 0x10, 0x04, 0x12, 0x21, 0x0a, 0x1d, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x52, + 0x44, 0x5f, 0x55, 0x4e, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x45, 0x44, 0x10, 0x05, 0x12, + 0x1b, 0x0a, 0x17, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x59, + 0x4e, 0x43, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x06, 0x12, 0x1d, 0x0a, 0x19, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, + 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x07, 0x12, 0x1a, 0x0a, 0x16, 0x45, + 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x46, + 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x53, 0x49, 0x47, + 0x4e, 0x45, 0x44, 0x10, 0x09, 0x32, 0x65, 0x0a, 0x0c, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x55, 0x0a, 0x06, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x12, + 0x23, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, + 0x72, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0xc5, 0x01, 0x0a, + 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x23, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, + 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, 0x45, 0xaa, 0x02, 0x14, 0x41, 0x67, 0x6e, 0x74, + 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x56, 0x31, + 0xca, 0x02, 0x14, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x73, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x20, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, + 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5c, 0x56, 0x31, 0x5c, 0x47, + 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x17, 0x41, 0x67, 0x6e, + 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_agntcy_dir_events_v1_event_service_proto_rawDescOnce sync.Once + file_agntcy_dir_events_v1_event_service_proto_rawDescData []byte +) + +func file_agntcy_dir_events_v1_event_service_proto_rawDescGZIP() []byte { + file_agntcy_dir_events_v1_event_service_proto_rawDescOnce.Do(func() { + file_agntcy_dir_events_v1_event_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_events_v1_event_service_proto_rawDesc), len(file_agntcy_dir_events_v1_event_service_proto_rawDesc))) + }) + return file_agntcy_dir_events_v1_event_service_proto_rawDescData +} + +var file_agntcy_dir_events_v1_event_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_agntcy_dir_events_v1_event_service_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_agntcy_dir_events_v1_event_service_proto_goTypes = []any{ + (EventType)(0), // 0: agntcy.dir.events.v1.EventType + (*ListenRequest)(nil), // 1: agntcy.dir.events.v1.ListenRequest + (*ListenResponse)(nil), // 2: agntcy.dir.events.v1.ListenResponse + (*Event)(nil), // 3: agntcy.dir.events.v1.Event + nil, // 4: agntcy.dir.events.v1.Event.MetadataEntry + (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp +} +var file_agntcy_dir_events_v1_event_service_proto_depIdxs = []int32{ + 0, // 0: agntcy.dir.events.v1.ListenRequest.event_types:type_name -> agntcy.dir.events.v1.EventType + 3, // 1: agntcy.dir.events.v1.ListenResponse.event:type_name -> agntcy.dir.events.v1.Event + 0, // 2: agntcy.dir.events.v1.Event.type:type_name -> agntcy.dir.events.v1.EventType + 5, // 3: agntcy.dir.events.v1.Event.timestamp:type_name -> google.protobuf.Timestamp + 4, // 4: agntcy.dir.events.v1.Event.metadata:type_name -> agntcy.dir.events.v1.Event.MetadataEntry + 1, // 5: agntcy.dir.events.v1.EventService.Listen:input_type -> agntcy.dir.events.v1.ListenRequest + 2, // 6: agntcy.dir.events.v1.EventService.Listen:output_type -> agntcy.dir.events.v1.ListenResponse + 6, // [6:7] is the sub-list for method output_type + 5, // [5:6] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_agntcy_dir_events_v1_event_service_proto_init() } +func file_agntcy_dir_events_v1_event_service_proto_init() { + if File_agntcy_dir_events_v1_event_service_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_events_v1_event_service_proto_rawDesc), len(file_agntcy_dir_events_v1_event_service_proto_rawDesc)), + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_agntcy_dir_events_v1_event_service_proto_goTypes, + DependencyIndexes: file_agntcy_dir_events_v1_event_service_proto_depIdxs, + EnumInfos: file_agntcy_dir_events_v1_event_service_proto_enumTypes, + MessageInfos: file_agntcy_dir_events_v1_event_service_proto_msgTypes, + }.Build() + File_agntcy_dir_events_v1_event_service_proto = out.File + file_agntcy_dir_events_v1_event_service_proto_goTypes = nil + file_agntcy_dir_events_v1_event_service_proto_depIdxs = nil +} diff --git a/api/events/v1/event_service_grpc.pb.go b/api/events/v1/event_service_grpc.pb.go index e3f872b9c..2bf469368 100644 --- a/api/events/v1/event_service_grpc.pb.go +++ b/api/events/v1/event_service_grpc.pb.go @@ -1,163 +1,163 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc (unknown) -// source: agntcy/dir/events/v1/event_service.proto - -package v1 - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 - -const ( - EventService_Listen_FullMethodName = "/agntcy.dir.events.v1.EventService/Listen" -) - -// EventServiceClient is the client API for EventService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -// -// EventService provides real-time event streaming for all system operations. -// Events are delivered from subscription time forward with no history or replay. -// This service enables external applications to react to system changes in real-time. -type EventServiceClient interface { - // Listen establishes a streaming connection to receive events. - // Events are only delivered while the stream is active. - // On disconnect, missed events are not recoverable. - Listen(ctx context.Context, in *ListenRequest, opts ...grpc.CallOption) (EventService_ListenClient, error) -} - -type eventServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewEventServiceClient(cc grpc.ClientConnInterface) EventServiceClient { - return &eventServiceClient{cc} -} - -func (c *eventServiceClient) Listen(ctx context.Context, in *ListenRequest, opts ...grpc.CallOption) (EventService_ListenClient, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &EventService_ServiceDesc.Streams[0], EventService_Listen_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &eventServiceListenClient{ClientStream: stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type EventService_ListenClient interface { - Recv() (*ListenResponse, error) - grpc.ClientStream -} - -type eventServiceListenClient struct { - grpc.ClientStream -} - -func (x *eventServiceListenClient) Recv() (*ListenResponse, error) { - m := new(ListenResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// EventServiceServer is the server API for EventService service. -// All implementations should embed UnimplementedEventServiceServer -// for forward compatibility. -// -// EventService provides real-time event streaming for all system operations. -// Events are delivered from subscription time forward with no history or replay. -// This service enables external applications to react to system changes in real-time. -type EventServiceServer interface { - // Listen establishes a streaming connection to receive events. - // Events are only delivered while the stream is active. - // On disconnect, missed events are not recoverable. - Listen(*ListenRequest, EventService_ListenServer) error -} - -// UnimplementedEventServiceServer should be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedEventServiceServer struct{} - -func (UnimplementedEventServiceServer) Listen(*ListenRequest, EventService_ListenServer) error { - return status.Errorf(codes.Unimplemented, "method Listen not implemented") -} -func (UnimplementedEventServiceServer) testEmbeddedByValue() {} - -// UnsafeEventServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to EventServiceServer will -// result in compilation errors. -type UnsafeEventServiceServer interface { - mustEmbedUnimplementedEventServiceServer() -} - -func RegisterEventServiceServer(s grpc.ServiceRegistrar, srv EventServiceServer) { - // If the following call pancis, it indicates UnimplementedEventServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } - s.RegisterService(&EventService_ServiceDesc, srv) -} - -func _EventService_Listen_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ListenRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(EventServiceServer).Listen(m, &eventServiceListenServer{ServerStream: stream}) -} - -type EventService_ListenServer interface { - Send(*ListenResponse) error - grpc.ServerStream -} - -type eventServiceListenServer struct { - grpc.ServerStream -} - -func (x *eventServiceListenServer) Send(m *ListenResponse) error { - return x.ServerStream.SendMsg(m) -} - -// EventService_ServiceDesc is the grpc.ServiceDesc for EventService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var EventService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "agntcy.dir.events.v1.EventService", - HandlerType: (*EventServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "Listen", - Handler: _EventService_Listen_Handler, - ServerStreams: true, - }, - }, - Metadata: "agntcy/dir/events/v1/event_service.proto", -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc (unknown) +// source: agntcy/dir/events/v1/event_service.proto + +package v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 + +const ( + EventService_Listen_FullMethodName = "/agntcy.dir.events.v1.EventService/Listen" +) + +// EventServiceClient is the client API for EventService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// EventService provides real-time event streaming for all system operations. +// Events are delivered from subscription time forward with no history or replay. +// This service enables external applications to react to system changes in real-time. +type EventServiceClient interface { + // Listen establishes a streaming connection to receive events. + // Events are only delivered while the stream is active. + // On disconnect, missed events are not recoverable. + Listen(ctx context.Context, in *ListenRequest, opts ...grpc.CallOption) (EventService_ListenClient, error) +} + +type eventServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewEventServiceClient(cc grpc.ClientConnInterface) EventServiceClient { + return &eventServiceClient{cc} +} + +func (c *eventServiceClient) Listen(ctx context.Context, in *ListenRequest, opts ...grpc.CallOption) (EventService_ListenClient, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &EventService_ServiceDesc.Streams[0], EventService_Listen_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &eventServiceListenClient{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type EventService_ListenClient interface { + Recv() (*ListenResponse, error) + grpc.ClientStream +} + +type eventServiceListenClient struct { + grpc.ClientStream +} + +func (x *eventServiceListenClient) Recv() (*ListenResponse, error) { + m := new(ListenResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// EventServiceServer is the server API for EventService service. +// All implementations should embed UnimplementedEventServiceServer +// for forward compatibility. +// +// EventService provides real-time event streaming for all system operations. +// Events are delivered from subscription time forward with no history or replay. +// This service enables external applications to react to system changes in real-time. +type EventServiceServer interface { + // Listen establishes a streaming connection to receive events. + // Events are only delivered while the stream is active. + // On disconnect, missed events are not recoverable. + Listen(*ListenRequest, EventService_ListenServer) error +} + +// UnimplementedEventServiceServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedEventServiceServer struct{} + +func (UnimplementedEventServiceServer) Listen(*ListenRequest, EventService_ListenServer) error { + return status.Errorf(codes.Unimplemented, "method Listen not implemented") +} +func (UnimplementedEventServiceServer) testEmbeddedByValue() {} + +// UnsafeEventServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to EventServiceServer will +// result in compilation errors. +type UnsafeEventServiceServer interface { + mustEmbedUnimplementedEventServiceServer() +} + +func RegisterEventServiceServer(s grpc.ServiceRegistrar, srv EventServiceServer) { + // If the following call pancis, it indicates UnimplementedEventServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&EventService_ServiceDesc, srv) +} + +func _EventService_Listen_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListenRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(EventServiceServer).Listen(m, &eventServiceListenServer{ServerStream: stream}) +} + +type EventService_ListenServer interface { + Send(*ListenResponse) error + grpc.ServerStream +} + +type eventServiceListenServer struct { + grpc.ServerStream +} + +func (x *eventServiceListenServer) Send(m *ListenResponse) error { + return x.ServerStream.SendMsg(m) +} + +// EventService_ServiceDesc is the grpc.ServiceDesc for EventService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var EventService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "agntcy.dir.events.v1.EventService", + HandlerType: (*EventServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Listen", + Handler: _EventService_Listen_Handler, + ServerStreams: true, + }, + }, + Metadata: "agntcy/dir/events/v1/event_service.proto", +} diff --git a/api/go.mod b/api/go.mod index 0afb789ea..afdba933b 100644 --- a/api/go.mod +++ b/api/go.mod @@ -1,38 +1,38 @@ -module github.com/agntcy/dir/api - -go 1.25.2 - -require ( - buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 - buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 - github.com/agntcy/oasf-sdk/pkg v0.0.14 - github.com/multiformats/go-multihash v0.2.3 - github.com/opencontainers/go-digest v1.0.0 - github.com/stretchr/testify v1.10.0 - google.golang.org/grpc v1.74.2 - google.golang.org/protobuf v1.36.10 -) - -require ( - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/ipfs/go-cid v0.5.0 - github.com/klauspost/cpuid/v2 v2.2.10 // indirect - github.com/minio/sha256-simd v1.0.1 // indirect - github.com/mr-tron/base58 v1.2.0 // indirect - github.com/multiformats/go-base32 v0.1.0 // indirect - github.com/multiformats/go-base36 v0.2.0 // indirect - github.com/multiformats/go-multibase v0.2.0 // indirect - github.com/multiformats/go-varint v0.0.7 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xeipuuv/gojsonschema v1.2.0 // indirect - golang.org/x/crypto v0.45.0 // indirect - golang.org/x/net v0.47.0 // indirect - golang.org/x/sys v0.38.0 // indirect - golang.org/x/text v0.31.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/blake3 v1.4.0 // indirect -) +module github.com/agntcy/dir/api + +go 1.25.2 + +require ( + buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 + buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 + github.com/agntcy/oasf-sdk/pkg v0.0.14 + github.com/multiformats/go-multihash v0.2.3 + github.com/opencontainers/go-digest v1.0.0 + github.com/stretchr/testify v1.10.0 + google.golang.org/grpc v1.74.2 + google.golang.org/protobuf v1.36.10 +) + +require ( + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/ipfs/go-cid v0.5.0 + github.com/klauspost/cpuid/v2 v2.2.10 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-varint v0.0.7 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/text v0.31.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + lukechampine.com/blake3 v1.4.0 // indirect +) diff --git a/api/go.sum b/api/go.sum index 510ffbaee..d26d53970 100644 --- a/api/go.sum +++ b/api/go.sum @@ -1,87 +1,87 @@ -buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 h1:THc6uLCGTpU393vVD5Eu5JHUdikvaP1+dqAclQe8pOE= -buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1/go.mod h1:xkbAJMbZuuebIblSFnLrfTpvmfjarhKsIid+Q9snDQ0= -buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 h1:ZObM/Cdu5dZO4ibBXNRSy+rFwG4oV86mYfKbI0Z7AAI= -buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1/go.mod h1:yJHswa2p3J+WxGLpgzuWNWn3I1CIkxdOu80Y/vN5lbE= -github.com/agntcy/oasf-sdk/pkg v0.0.14 h1:DNKQNf4R4SMDbnaawoSl6FVOBvkSy4O9MyqKd7iHE8I= -github.com/agntcy/oasf-sdk/pkg v0.0.14/go.mod h1:FvcEB49gsvK+JO5i6l/pt5QgTK0LZeR7KYKsdcI6ZIM= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= -github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= -github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= -github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= -github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= -github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= -github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= -github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= -github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= -github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= -github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= -go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= -go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= -go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= -go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= -go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= -go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= -go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= -go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= -go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= -golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= -golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= -golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= -golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= -google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= -google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= -google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= -lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= +buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 h1:THc6uLCGTpU393vVD5Eu5JHUdikvaP1+dqAclQe8pOE= +buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1/go.mod h1:xkbAJMbZuuebIblSFnLrfTpvmfjarhKsIid+Q9snDQ0= +buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 h1:ZObM/Cdu5dZO4ibBXNRSy+rFwG4oV86mYfKbI0Z7AAI= +buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1/go.mod h1:yJHswa2p3J+WxGLpgzuWNWn3I1CIkxdOu80Y/vN5lbE= +github.com/agntcy/oasf-sdk/pkg v0.0.14 h1:DNKQNf4R4SMDbnaawoSl6FVOBvkSy4O9MyqKd7iHE8I= +github.com/agntcy/oasf-sdk/pkg v0.0.14/go.mod h1:FvcEB49gsvK+JO5i6l/pt5QgTK0LZeR7KYKsdcI6ZIM= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= +github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= +google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= +lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= diff --git a/api/routing/v1/peer.pb.go b/api/routing/v1/peer.pb.go index b1078a1e5..162e410e9 100644 --- a/api/routing/v1/peer.pb.go +++ b/api/routing/v1/peer.pb.go @@ -1,267 +1,267 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.5 -// protoc (unknown) -// source: agntcy/dir/routing/v1/peer.proto - -package v1 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type PeerConnectionType int32 - -const ( - // Sender does not have a connection to peer, and no extra information (default) - PeerConnectionType_PEER_CONNECTION_TYPE_NOT_CONNECTED PeerConnectionType = 0 - // Sender has a live connection to peer. - PeerConnectionType_PEER_CONNECTION_TYPE_CONNECTED PeerConnectionType = 1 - // Sender recently connected to peer. - PeerConnectionType_PEER_CONNECTION_TYPE_CAN_CONNECT PeerConnectionType = 2 - // Sender made strong effort to connect to peer repeatedly but failed. - PeerConnectionType_PEER_CONNECTION_TYPE_CANNOT_CONNECT PeerConnectionType = 3 -) - -// Enum value maps for PeerConnectionType. -var ( - PeerConnectionType_name = map[int32]string{ - 0: "PEER_CONNECTION_TYPE_NOT_CONNECTED", - 1: "PEER_CONNECTION_TYPE_CONNECTED", - 2: "PEER_CONNECTION_TYPE_CAN_CONNECT", - 3: "PEER_CONNECTION_TYPE_CANNOT_CONNECT", - } - PeerConnectionType_value = map[string]int32{ - "PEER_CONNECTION_TYPE_NOT_CONNECTED": 0, - "PEER_CONNECTION_TYPE_CONNECTED": 1, - "PEER_CONNECTION_TYPE_CAN_CONNECT": 2, - "PEER_CONNECTION_TYPE_CANNOT_CONNECT": 3, - } -) - -func (x PeerConnectionType) Enum() *PeerConnectionType { - p := new(PeerConnectionType) - *p = x - return p -} - -func (x PeerConnectionType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (PeerConnectionType) Descriptor() protoreflect.EnumDescriptor { - return file_agntcy_dir_routing_v1_peer_proto_enumTypes[0].Descriptor() -} - -func (PeerConnectionType) Type() protoreflect.EnumType { - return &file_agntcy_dir_routing_v1_peer_proto_enumTypes[0] -} - -func (x PeerConnectionType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use PeerConnectionType.Descriptor instead. -func (PeerConnectionType) EnumDescriptor() ([]byte, []int) { - return file_agntcy_dir_routing_v1_peer_proto_rawDescGZIP(), []int{0} -} - -type Peer struct { - state protoimpl.MessageState `protogen:"open.v1"` - // ID of a given peer, typically described by a protocol. - // For example: - // - SPIFFE: "spiffe://example.org/service/foo" - // - JWT: "jwt:sub=alice,iss=https://issuer.example.com" - // - Tor: "onion:abcdefghijklmno.onion" - // - DID: "did:example:123456789abcdefghi" - // - IPFS: "ipfs:QmYwAPJzv5CZsnAzt8auVZRn2E6sD1c4x8pN5o6d5cW4D5" - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // Multiaddrs for a given peer. - // For example: - // - "/ip4/127.0.0.1/tcp/4001" - // - "/ip6/::1/tcp/4001" - // - "/dns4/example.com/tcp/443/https" - Addrs []string `protobuf:"bytes,2,rep,name=addrs,proto3" json:"addrs,omitempty"` - // Additional metadata about the peer. - Annotations map[string]string `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Used to signal the sender's connection capabilities to the peer. - Connection PeerConnectionType `protobuf:"varint,4,opt,name=connection,proto3,enum=agntcy.dir.routing.v1.PeerConnectionType" json:"connection,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *Peer) Reset() { - *x = Peer{} - mi := &file_agntcy_dir_routing_v1_peer_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Peer) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Peer) ProtoMessage() {} - -func (x *Peer) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_routing_v1_peer_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Peer.ProtoReflect.Descriptor instead. -func (*Peer) Descriptor() ([]byte, []int) { - return file_agntcy_dir_routing_v1_peer_proto_rawDescGZIP(), []int{0} -} - -func (x *Peer) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *Peer) GetAddrs() []string { - if x != nil { - return x.Addrs - } - return nil -} - -func (x *Peer) GetAnnotations() map[string]string { - if x != nil { - return x.Annotations - } - return nil -} - -func (x *Peer) GetConnection() PeerConnectionType { - if x != nil { - return x.Connection - } - return PeerConnectionType_PEER_CONNECTION_TYPE_NOT_CONNECTED -} - -var File_agntcy_dir_routing_v1_peer_proto protoreflect.FileDescriptor - -var file_agntcy_dir_routing_v1_peer_proto_rawDesc = string([]byte{ - 0x0a, 0x20, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x72, 0x6f, 0x75, - 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x65, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x15, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x22, 0x87, 0x02, 0x0a, 0x04, 0x50, 0x65, - 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x12, 0x4e, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, - 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x61, - 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x2a, 0xaf, 0x01, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x26, 0x0a, 0x22, 0x50, 0x45, - 0x45, 0x52, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x22, 0x0a, 0x1e, 0x50, 0x45, 0x45, 0x52, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, - 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, - 0x43, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x24, 0x0a, 0x20, 0x50, 0x45, 0x45, 0x52, 0x5f, 0x43, - 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, - 0x41, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x02, 0x12, 0x27, 0x0a, 0x23, - 0x50, 0x45, 0x45, 0x52, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, - 0x45, 0x43, 0x54, 0x10, 0x03, 0x42, 0xc3, 0x01, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, - 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x31, 0x42, 0x09, 0x50, 0x65, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x67, 0x6e, - 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, 0x74, - 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, 0x52, 0xaa, 0x02, 0x15, 0x41, - 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, - 0x67, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x15, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, - 0x72, 0x5c, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x21, 0x41, - 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, - 0x67, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0xea, 0x02, 0x18, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, - 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -}) - -var ( - file_agntcy_dir_routing_v1_peer_proto_rawDescOnce sync.Once - file_agntcy_dir_routing_v1_peer_proto_rawDescData []byte -) - -func file_agntcy_dir_routing_v1_peer_proto_rawDescGZIP() []byte { - file_agntcy_dir_routing_v1_peer_proto_rawDescOnce.Do(func() { - file_agntcy_dir_routing_v1_peer_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_routing_v1_peer_proto_rawDesc), len(file_agntcy_dir_routing_v1_peer_proto_rawDesc))) - }) - return file_agntcy_dir_routing_v1_peer_proto_rawDescData -} - -var file_agntcy_dir_routing_v1_peer_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_agntcy_dir_routing_v1_peer_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_agntcy_dir_routing_v1_peer_proto_goTypes = []any{ - (PeerConnectionType)(0), // 0: agntcy.dir.routing.v1.PeerConnectionType - (*Peer)(nil), // 1: agntcy.dir.routing.v1.Peer - nil, // 2: agntcy.dir.routing.v1.Peer.AnnotationsEntry -} -var file_agntcy_dir_routing_v1_peer_proto_depIdxs = []int32{ - 2, // 0: agntcy.dir.routing.v1.Peer.annotations:type_name -> agntcy.dir.routing.v1.Peer.AnnotationsEntry - 0, // 1: agntcy.dir.routing.v1.Peer.connection:type_name -> agntcy.dir.routing.v1.PeerConnectionType - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_agntcy_dir_routing_v1_peer_proto_init() } -func file_agntcy_dir_routing_v1_peer_proto_init() { - if File_agntcy_dir_routing_v1_peer_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_routing_v1_peer_proto_rawDesc), len(file_agntcy_dir_routing_v1_peer_proto_rawDesc)), - NumEnums: 1, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_agntcy_dir_routing_v1_peer_proto_goTypes, - DependencyIndexes: file_agntcy_dir_routing_v1_peer_proto_depIdxs, - EnumInfos: file_agntcy_dir_routing_v1_peer_proto_enumTypes, - MessageInfos: file_agntcy_dir_routing_v1_peer_proto_msgTypes, - }.Build() - File_agntcy_dir_routing_v1_peer_proto = out.File - file_agntcy_dir_routing_v1_peer_proto_goTypes = nil - file_agntcy_dir_routing_v1_peer_proto_depIdxs = nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc (unknown) +// source: agntcy/dir/routing/v1/peer.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PeerConnectionType int32 + +const ( + // Sender does not have a connection to peer, and no extra information (default) + PeerConnectionType_PEER_CONNECTION_TYPE_NOT_CONNECTED PeerConnectionType = 0 + // Sender has a live connection to peer. + PeerConnectionType_PEER_CONNECTION_TYPE_CONNECTED PeerConnectionType = 1 + // Sender recently connected to peer. + PeerConnectionType_PEER_CONNECTION_TYPE_CAN_CONNECT PeerConnectionType = 2 + // Sender made strong effort to connect to peer repeatedly but failed. + PeerConnectionType_PEER_CONNECTION_TYPE_CANNOT_CONNECT PeerConnectionType = 3 +) + +// Enum value maps for PeerConnectionType. +var ( + PeerConnectionType_name = map[int32]string{ + 0: "PEER_CONNECTION_TYPE_NOT_CONNECTED", + 1: "PEER_CONNECTION_TYPE_CONNECTED", + 2: "PEER_CONNECTION_TYPE_CAN_CONNECT", + 3: "PEER_CONNECTION_TYPE_CANNOT_CONNECT", + } + PeerConnectionType_value = map[string]int32{ + "PEER_CONNECTION_TYPE_NOT_CONNECTED": 0, + "PEER_CONNECTION_TYPE_CONNECTED": 1, + "PEER_CONNECTION_TYPE_CAN_CONNECT": 2, + "PEER_CONNECTION_TYPE_CANNOT_CONNECT": 3, + } +) + +func (x PeerConnectionType) Enum() *PeerConnectionType { + p := new(PeerConnectionType) + *p = x + return p +} + +func (x PeerConnectionType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PeerConnectionType) Descriptor() protoreflect.EnumDescriptor { + return file_agntcy_dir_routing_v1_peer_proto_enumTypes[0].Descriptor() +} + +func (PeerConnectionType) Type() protoreflect.EnumType { + return &file_agntcy_dir_routing_v1_peer_proto_enumTypes[0] +} + +func (x PeerConnectionType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PeerConnectionType.Descriptor instead. +func (PeerConnectionType) EnumDescriptor() ([]byte, []int) { + return file_agntcy_dir_routing_v1_peer_proto_rawDescGZIP(), []int{0} +} + +type Peer struct { + state protoimpl.MessageState `protogen:"open.v1"` + // ID of a given peer, typically described by a protocol. + // For example: + // - SPIFFE: "spiffe://example.org/service/foo" + // - JWT: "jwt:sub=alice,iss=https://issuer.example.com" + // - Tor: "onion:abcdefghijklmno.onion" + // - DID: "did:example:123456789abcdefghi" + // - IPFS: "ipfs:QmYwAPJzv5CZsnAzt8auVZRn2E6sD1c4x8pN5o6d5cW4D5" + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Multiaddrs for a given peer. + // For example: + // - "/ip4/127.0.0.1/tcp/4001" + // - "/ip6/::1/tcp/4001" + // - "/dns4/example.com/tcp/443/https" + Addrs []string `protobuf:"bytes,2,rep,name=addrs,proto3" json:"addrs,omitempty"` + // Additional metadata about the peer. + Annotations map[string]string `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Used to signal the sender's connection capabilities to the peer. + Connection PeerConnectionType `protobuf:"varint,4,opt,name=connection,proto3,enum=agntcy.dir.routing.v1.PeerConnectionType" json:"connection,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Peer) Reset() { + *x = Peer{} + mi := &file_agntcy_dir_routing_v1_peer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Peer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Peer) ProtoMessage() {} + +func (x *Peer) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_routing_v1_peer_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Peer.ProtoReflect.Descriptor instead. +func (*Peer) Descriptor() ([]byte, []int) { + return file_agntcy_dir_routing_v1_peer_proto_rawDescGZIP(), []int{0} +} + +func (x *Peer) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Peer) GetAddrs() []string { + if x != nil { + return x.Addrs + } + return nil +} + +func (x *Peer) GetAnnotations() map[string]string { + if x != nil { + return x.Annotations + } + return nil +} + +func (x *Peer) GetConnection() PeerConnectionType { + if x != nil { + return x.Connection + } + return PeerConnectionType_PEER_CONNECTION_TYPE_NOT_CONNECTED +} + +var File_agntcy_dir_routing_v1_peer_proto protoreflect.FileDescriptor + +var file_agntcy_dir_routing_v1_peer_proto_rawDesc = string([]byte{ + 0x0a, 0x20, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x72, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x65, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x15, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x22, 0x87, 0x02, 0x0a, 0x04, 0x50, 0x65, + 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x12, 0x4e, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x61, + 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x2a, 0xaf, 0x01, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x26, 0x0a, 0x22, 0x50, 0x45, + 0x45, 0x52, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x22, 0x0a, 0x1e, 0x50, 0x45, 0x45, 0x52, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, + 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, + 0x43, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x24, 0x0a, 0x20, 0x50, 0x45, 0x45, 0x52, 0x5f, 0x43, + 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, + 0x41, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x02, 0x12, 0x27, 0x0a, 0x23, + 0x50, 0x45, 0x45, 0x52, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, + 0x45, 0x43, 0x54, 0x10, 0x03, 0x42, 0xc3, 0x01, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, + 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x31, 0x42, 0x09, 0x50, 0x65, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x67, 0x6e, + 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, 0x52, 0xaa, 0x02, 0x15, 0x41, + 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x15, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, + 0x72, 0x5c, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x21, 0x41, + 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0xea, 0x02, 0x18, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, + 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +}) + +var ( + file_agntcy_dir_routing_v1_peer_proto_rawDescOnce sync.Once + file_agntcy_dir_routing_v1_peer_proto_rawDescData []byte +) + +func file_agntcy_dir_routing_v1_peer_proto_rawDescGZIP() []byte { + file_agntcy_dir_routing_v1_peer_proto_rawDescOnce.Do(func() { + file_agntcy_dir_routing_v1_peer_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_routing_v1_peer_proto_rawDesc), len(file_agntcy_dir_routing_v1_peer_proto_rawDesc))) + }) + return file_agntcy_dir_routing_v1_peer_proto_rawDescData +} + +var file_agntcy_dir_routing_v1_peer_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_agntcy_dir_routing_v1_peer_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_agntcy_dir_routing_v1_peer_proto_goTypes = []any{ + (PeerConnectionType)(0), // 0: agntcy.dir.routing.v1.PeerConnectionType + (*Peer)(nil), // 1: agntcy.dir.routing.v1.Peer + nil, // 2: agntcy.dir.routing.v1.Peer.AnnotationsEntry +} +var file_agntcy_dir_routing_v1_peer_proto_depIdxs = []int32{ + 2, // 0: agntcy.dir.routing.v1.Peer.annotations:type_name -> agntcy.dir.routing.v1.Peer.AnnotationsEntry + 0, // 1: agntcy.dir.routing.v1.Peer.connection:type_name -> agntcy.dir.routing.v1.PeerConnectionType + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_agntcy_dir_routing_v1_peer_proto_init() } +func file_agntcy_dir_routing_v1_peer_proto_init() { + if File_agntcy_dir_routing_v1_peer_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_routing_v1_peer_proto_rawDesc), len(file_agntcy_dir_routing_v1_peer_proto_rawDesc)), + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_agntcy_dir_routing_v1_peer_proto_goTypes, + DependencyIndexes: file_agntcy_dir_routing_v1_peer_proto_depIdxs, + EnumInfos: file_agntcy_dir_routing_v1_peer_proto_enumTypes, + MessageInfos: file_agntcy_dir_routing_v1_peer_proto_msgTypes, + }.Build() + File_agntcy_dir_routing_v1_peer_proto = out.File + file_agntcy_dir_routing_v1_peer_proto_goTypes = nil + file_agntcy_dir_routing_v1_peer_proto_depIdxs = nil +} diff --git a/api/routing/v1/publication_service.pb.go b/api/routing/v1/publication_service.pb.go index db7acbd19..6e312c357 100644 --- a/api/routing/v1/publication_service.pb.go +++ b/api/routing/v1/publication_service.pb.go @@ -1,553 +1,553 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.5 -// protoc (unknown) -// source: agntcy/dir/routing/v1/publication_service.proto - -package v1 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// PublicationStatus represents the current state of a publication request. -// Publications progress from pending to processing to completed or failed states. -type PublicationStatus int32 - -const ( - // Default/unset status - should not be used in practice - PublicationStatus_PUBLICATION_STATUS_UNSPECIFIED PublicationStatus = 0 - // Sync operation has been created but not yet started - PublicationStatus_PUBLICATION_STATUS_PENDING PublicationStatus = 1 - // Sync operation is actively discovering and transferring objects - PublicationStatus_PUBLICATION_STATUS_IN_PROGRESS PublicationStatus = 2 - // Sync operation has been successfully completed - PublicationStatus_PUBLICATION_STATUS_COMPLETED PublicationStatus = 3 - // Sync operation encountered an error and stopped - PublicationStatus_PUBLICATION_STATUS_FAILED PublicationStatus = 4 -) - -// Enum value maps for PublicationStatus. -var ( - PublicationStatus_name = map[int32]string{ - 0: "PUBLICATION_STATUS_UNSPECIFIED", - 1: "PUBLICATION_STATUS_PENDING", - 2: "PUBLICATION_STATUS_IN_PROGRESS", - 3: "PUBLICATION_STATUS_COMPLETED", - 4: "PUBLICATION_STATUS_FAILED", - } - PublicationStatus_value = map[string]int32{ - "PUBLICATION_STATUS_UNSPECIFIED": 0, - "PUBLICATION_STATUS_PENDING": 1, - "PUBLICATION_STATUS_IN_PROGRESS": 2, - "PUBLICATION_STATUS_COMPLETED": 3, - "PUBLICATION_STATUS_FAILED": 4, - } -) - -func (x PublicationStatus) Enum() *PublicationStatus { - p := new(PublicationStatus) - *p = x - return p -} - -func (x PublicationStatus) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (PublicationStatus) Descriptor() protoreflect.EnumDescriptor { - return file_agntcy_dir_routing_v1_publication_service_proto_enumTypes[0].Descriptor() -} - -func (PublicationStatus) Type() protoreflect.EnumType { - return &file_agntcy_dir_routing_v1_publication_service_proto_enumTypes[0] -} - -func (x PublicationStatus) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use PublicationStatus.Descriptor instead. -func (PublicationStatus) EnumDescriptor() ([]byte, []int) { - return file_agntcy_dir_routing_v1_publication_service_proto_rawDescGZIP(), []int{0} -} - -// CreatePublicationResponse returns the result of creating a publication request. -// This includes the publication ID and any relevant metadata. -type CreatePublicationResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Unique identifier of the publication operation. - PublicationId string `protobuf:"bytes,1,opt,name=publication_id,json=publicationId,proto3" json:"publication_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CreatePublicationResponse) Reset() { - *x = CreatePublicationResponse{} - mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CreatePublicationResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreatePublicationResponse) ProtoMessage() {} - -func (x *CreatePublicationResponse) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreatePublicationResponse.ProtoReflect.Descriptor instead. -func (*CreatePublicationResponse) Descriptor() ([]byte, []int) { - return file_agntcy_dir_routing_v1_publication_service_proto_rawDescGZIP(), []int{0} -} - -func (x *CreatePublicationResponse) GetPublicationId() string { - if x != nil { - return x.PublicationId - } - return "" -} - -// ListPublicationsRequest contains optional filters for listing publication requests. -type ListPublicationsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Optional limit on the number of results to return. - Limit *uint32 `protobuf:"varint,2,opt,name=limit,proto3,oneof" json:"limit,omitempty"` - // Optional offset for pagination of results. - Offset *uint32 `protobuf:"varint,3,opt,name=offset,proto3,oneof" json:"offset,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListPublicationsRequest) Reset() { - *x = ListPublicationsRequest{} - mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListPublicationsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListPublicationsRequest) ProtoMessage() {} - -func (x *ListPublicationsRequest) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListPublicationsRequest.ProtoReflect.Descriptor instead. -func (*ListPublicationsRequest) Descriptor() ([]byte, []int) { - return file_agntcy_dir_routing_v1_publication_service_proto_rawDescGZIP(), []int{1} -} - -func (x *ListPublicationsRequest) GetLimit() uint32 { - if x != nil && x.Limit != nil { - return *x.Limit - } - return 0 -} - -func (x *ListPublicationsRequest) GetOffset() uint32 { - if x != nil && x.Offset != nil { - return *x.Offset - } - return 0 -} - -// ListPublicationsItem represents a single publication request in the list response. -// Contains publication details including ID, status, and creation timestamp. -type ListPublicationsItem struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Unique identifier of the publication operation. - PublicationId string `protobuf:"bytes,1,opt,name=publication_id,json=publicationId,proto3" json:"publication_id,omitempty"` - // Current status of the publication operation. - Status PublicationStatus `protobuf:"varint,2,opt,name=status,proto3,enum=agntcy.dir.routing.v1.PublicationStatus" json:"status,omitempty"` - // Timestamp when the publication operation was created in the RFC3339 format. - // Specs: https://www.rfc-editor.org/rfc/rfc3339.html - CreatedTime string `protobuf:"bytes,3,opt,name=created_time,json=createdTime,proto3" json:"created_time,omitempty"` - // Timestamp of the most recent status update for this publication in the RFC3339 format. - LastUpdateTime string `protobuf:"bytes,4,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListPublicationsItem) Reset() { - *x = ListPublicationsItem{} - mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListPublicationsItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListPublicationsItem) ProtoMessage() {} - -func (x *ListPublicationsItem) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListPublicationsItem.ProtoReflect.Descriptor instead. -func (*ListPublicationsItem) Descriptor() ([]byte, []int) { - return file_agntcy_dir_routing_v1_publication_service_proto_rawDescGZIP(), []int{2} -} - -func (x *ListPublicationsItem) GetPublicationId() string { - if x != nil { - return x.PublicationId - } - return "" -} - -func (x *ListPublicationsItem) GetStatus() PublicationStatus { - if x != nil { - return x.Status - } - return PublicationStatus_PUBLICATION_STATUS_UNSPECIFIED -} - -func (x *ListPublicationsItem) GetCreatedTime() string { - if x != nil { - return x.CreatedTime - } - return "" -} - -func (x *ListPublicationsItem) GetLastUpdateTime() string { - if x != nil { - return x.LastUpdateTime - } - return "" -} - -// GetPublicationRequest specifies which publication to retrieve by its identifier. -type GetPublicationRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Unique identifier of the publication operation to query. - PublicationId string `protobuf:"bytes,1,opt,name=publication_id,json=publicationId,proto3" json:"publication_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetPublicationRequest) Reset() { - *x = GetPublicationRequest{} - mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetPublicationRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetPublicationRequest) ProtoMessage() {} - -func (x *GetPublicationRequest) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetPublicationRequest.ProtoReflect.Descriptor instead. -func (*GetPublicationRequest) Descriptor() ([]byte, []int) { - return file_agntcy_dir_routing_v1_publication_service_proto_rawDescGZIP(), []int{3} -} - -func (x *GetPublicationRequest) GetPublicationId() string { - if x != nil { - return x.PublicationId - } - return "" -} - -// GetPublicationResponse contains the full details of a specific publication request. -// Includes status, progress information, and any error details if applicable. -type GetPublicationResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Unique identifier of the publication operation. - PublicationId string `protobuf:"bytes,1,opt,name=publication_id,json=publicationId,proto3" json:"publication_id,omitempty"` - // Current status of the publication operation. - Status PublicationStatus `protobuf:"varint,2,opt,name=status,proto3,enum=agntcy.dir.routing.v1.PublicationStatus" json:"status,omitempty"` - // Timestamp when the publication operation was created in the RFC3339 format. - // Specs: https://www.rfc-editor.org/rfc/rfc3339.html - CreatedTime string `protobuf:"bytes,3,opt,name=created_time,json=createdTime,proto3" json:"created_time,omitempty"` - // Timestamp of the most recent status update for this publication in the RFC3339 format. - LastUpdateTime string `protobuf:"bytes,4,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetPublicationResponse) Reset() { - *x = GetPublicationResponse{} - mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetPublicationResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetPublicationResponse) ProtoMessage() {} - -func (x *GetPublicationResponse) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetPublicationResponse.ProtoReflect.Descriptor instead. -func (*GetPublicationResponse) Descriptor() ([]byte, []int) { - return file_agntcy_dir_routing_v1_publication_service_proto_rawDescGZIP(), []int{4} -} - -func (x *GetPublicationResponse) GetPublicationId() string { - if x != nil { - return x.PublicationId - } - return "" -} - -func (x *GetPublicationResponse) GetStatus() PublicationStatus { - if x != nil { - return x.Status - } - return PublicationStatus_PUBLICATION_STATUS_UNSPECIFIED -} - -func (x *GetPublicationResponse) GetCreatedTime() string { - if x != nil { - return x.CreatedTime - } - return "" -} - -func (x *GetPublicationResponse) GetLastUpdateTime() string { - if x != nil { - return x.LastUpdateTime - } - return "" -} - -var File_agntcy_dir_routing_v1_publication_service_proto protoreflect.FileDescriptor - -var file_agntcy_dir_routing_v1_publication_service_proto_rawDesc = string([]byte{ - 0x0a, 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x72, 0x6f, 0x75, - 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x15, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, - 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x1a, 0x2b, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, - 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x2f, - 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x42, 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, - 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x66, 0x0a, 0x17, 0x4c, 0x69, 0x73, - 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x88, 0x01, 0x01, 0x12, - 0x1b, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x48, - 0x01, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, - 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, - 0x74, 0x22, 0xcc, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x75, - 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, - 0x64, 0x12, 0x40, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x28, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, - 0x22, 0x3e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x75, 0x62, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, - 0x22, 0xce, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, - 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x49, 0x64, 0x12, 0x40, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, - 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x5f, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, - 0x65, 0x2a, 0xbc, 0x01, 0x0a, 0x11, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x22, 0x0a, 0x1e, 0x50, 0x55, 0x42, 0x4c, 0x49, - 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x50, - 0x55, 0x42, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, - 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x22, 0x0a, 0x1e, 0x50, - 0x55, 0x42, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, - 0x53, 0x5f, 0x49, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x02, 0x12, - 0x20, 0x0a, 0x1c, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, - 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, - 0x03, 0x12, 0x1d, 0x0a, 0x19, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, - 0x32, 0xe4, 0x02, 0x0a, 0x12, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x2e, 0x61, - 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x71, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x75, 0x62, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2e, 0x2e, 0x61, 0x67, 0x6e, 0x74, - 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x61, 0x67, 0x6e, 0x74, - 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x30, 0x01, 0x12, 0x6d, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x50, - 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x61, 0x67, 0x6e, - 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, - 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0xd1, 0x01, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x2e, - 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, - 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x42, 0x17, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x67, 0x6e, - 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, 0x74, - 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, 0x52, 0xaa, 0x02, 0x15, 0x41, - 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, - 0x67, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x15, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, - 0x72, 0x5c, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x21, 0x41, - 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, - 0x67, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0xea, 0x02, 0x18, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, - 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -}) - -var ( - file_agntcy_dir_routing_v1_publication_service_proto_rawDescOnce sync.Once - file_agntcy_dir_routing_v1_publication_service_proto_rawDescData []byte -) - -func file_agntcy_dir_routing_v1_publication_service_proto_rawDescGZIP() []byte { - file_agntcy_dir_routing_v1_publication_service_proto_rawDescOnce.Do(func() { - file_agntcy_dir_routing_v1_publication_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_routing_v1_publication_service_proto_rawDesc), len(file_agntcy_dir_routing_v1_publication_service_proto_rawDesc))) - }) - return file_agntcy_dir_routing_v1_publication_service_proto_rawDescData -} - -var file_agntcy_dir_routing_v1_publication_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_agntcy_dir_routing_v1_publication_service_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_agntcy_dir_routing_v1_publication_service_proto_goTypes = []any{ - (PublicationStatus)(0), // 0: agntcy.dir.routing.v1.PublicationStatus - (*CreatePublicationResponse)(nil), // 1: agntcy.dir.routing.v1.CreatePublicationResponse - (*ListPublicationsRequest)(nil), // 2: agntcy.dir.routing.v1.ListPublicationsRequest - (*ListPublicationsItem)(nil), // 3: agntcy.dir.routing.v1.ListPublicationsItem - (*GetPublicationRequest)(nil), // 4: agntcy.dir.routing.v1.GetPublicationRequest - (*GetPublicationResponse)(nil), // 5: agntcy.dir.routing.v1.GetPublicationResponse - (*PublishRequest)(nil), // 6: agntcy.dir.routing.v1.PublishRequest -} -var file_agntcy_dir_routing_v1_publication_service_proto_depIdxs = []int32{ - 0, // 0: agntcy.dir.routing.v1.ListPublicationsItem.status:type_name -> agntcy.dir.routing.v1.PublicationStatus - 0, // 1: agntcy.dir.routing.v1.GetPublicationResponse.status:type_name -> agntcy.dir.routing.v1.PublicationStatus - 6, // 2: agntcy.dir.routing.v1.PublicationService.CreatePublication:input_type -> agntcy.dir.routing.v1.PublishRequest - 2, // 3: agntcy.dir.routing.v1.PublicationService.ListPublications:input_type -> agntcy.dir.routing.v1.ListPublicationsRequest - 4, // 4: agntcy.dir.routing.v1.PublicationService.GetPublication:input_type -> agntcy.dir.routing.v1.GetPublicationRequest - 1, // 5: agntcy.dir.routing.v1.PublicationService.CreatePublication:output_type -> agntcy.dir.routing.v1.CreatePublicationResponse - 3, // 6: agntcy.dir.routing.v1.PublicationService.ListPublications:output_type -> agntcy.dir.routing.v1.ListPublicationsItem - 5, // 7: agntcy.dir.routing.v1.PublicationService.GetPublication:output_type -> agntcy.dir.routing.v1.GetPublicationResponse - 5, // [5:8] is the sub-list for method output_type - 2, // [2:5] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_agntcy_dir_routing_v1_publication_service_proto_init() } -func file_agntcy_dir_routing_v1_publication_service_proto_init() { - if File_agntcy_dir_routing_v1_publication_service_proto != nil { - return - } - file_agntcy_dir_routing_v1_routing_service_proto_init() - file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[1].OneofWrappers = []any{} - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_routing_v1_publication_service_proto_rawDesc), len(file_agntcy_dir_routing_v1_publication_service_proto_rawDesc)), - NumEnums: 1, - NumMessages: 5, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_agntcy_dir_routing_v1_publication_service_proto_goTypes, - DependencyIndexes: file_agntcy_dir_routing_v1_publication_service_proto_depIdxs, - EnumInfos: file_agntcy_dir_routing_v1_publication_service_proto_enumTypes, - MessageInfos: file_agntcy_dir_routing_v1_publication_service_proto_msgTypes, - }.Build() - File_agntcy_dir_routing_v1_publication_service_proto = out.File - file_agntcy_dir_routing_v1_publication_service_proto_goTypes = nil - file_agntcy_dir_routing_v1_publication_service_proto_depIdxs = nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc (unknown) +// source: agntcy/dir/routing/v1/publication_service.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// PublicationStatus represents the current state of a publication request. +// Publications progress from pending to processing to completed or failed states. +type PublicationStatus int32 + +const ( + // Default/unset status - should not be used in practice + PublicationStatus_PUBLICATION_STATUS_UNSPECIFIED PublicationStatus = 0 + // Sync operation has been created but not yet started + PublicationStatus_PUBLICATION_STATUS_PENDING PublicationStatus = 1 + // Sync operation is actively discovering and transferring objects + PublicationStatus_PUBLICATION_STATUS_IN_PROGRESS PublicationStatus = 2 + // Sync operation has been successfully completed + PublicationStatus_PUBLICATION_STATUS_COMPLETED PublicationStatus = 3 + // Sync operation encountered an error and stopped + PublicationStatus_PUBLICATION_STATUS_FAILED PublicationStatus = 4 +) + +// Enum value maps for PublicationStatus. +var ( + PublicationStatus_name = map[int32]string{ + 0: "PUBLICATION_STATUS_UNSPECIFIED", + 1: "PUBLICATION_STATUS_PENDING", + 2: "PUBLICATION_STATUS_IN_PROGRESS", + 3: "PUBLICATION_STATUS_COMPLETED", + 4: "PUBLICATION_STATUS_FAILED", + } + PublicationStatus_value = map[string]int32{ + "PUBLICATION_STATUS_UNSPECIFIED": 0, + "PUBLICATION_STATUS_PENDING": 1, + "PUBLICATION_STATUS_IN_PROGRESS": 2, + "PUBLICATION_STATUS_COMPLETED": 3, + "PUBLICATION_STATUS_FAILED": 4, + } +) + +func (x PublicationStatus) Enum() *PublicationStatus { + p := new(PublicationStatus) + *p = x + return p +} + +func (x PublicationStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PublicationStatus) Descriptor() protoreflect.EnumDescriptor { + return file_agntcy_dir_routing_v1_publication_service_proto_enumTypes[0].Descriptor() +} + +func (PublicationStatus) Type() protoreflect.EnumType { + return &file_agntcy_dir_routing_v1_publication_service_proto_enumTypes[0] +} + +func (x PublicationStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PublicationStatus.Descriptor instead. +func (PublicationStatus) EnumDescriptor() ([]byte, []int) { + return file_agntcy_dir_routing_v1_publication_service_proto_rawDescGZIP(), []int{0} +} + +// CreatePublicationResponse returns the result of creating a publication request. +// This includes the publication ID and any relevant metadata. +type CreatePublicationResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Unique identifier of the publication operation. + PublicationId string `protobuf:"bytes,1,opt,name=publication_id,json=publicationId,proto3" json:"publication_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreatePublicationResponse) Reset() { + *x = CreatePublicationResponse{} + mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreatePublicationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreatePublicationResponse) ProtoMessage() {} + +func (x *CreatePublicationResponse) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreatePublicationResponse.ProtoReflect.Descriptor instead. +func (*CreatePublicationResponse) Descriptor() ([]byte, []int) { + return file_agntcy_dir_routing_v1_publication_service_proto_rawDescGZIP(), []int{0} +} + +func (x *CreatePublicationResponse) GetPublicationId() string { + if x != nil { + return x.PublicationId + } + return "" +} + +// ListPublicationsRequest contains optional filters for listing publication requests. +type ListPublicationsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Optional limit on the number of results to return. + Limit *uint32 `protobuf:"varint,2,opt,name=limit,proto3,oneof" json:"limit,omitempty"` + // Optional offset for pagination of results. + Offset *uint32 `protobuf:"varint,3,opt,name=offset,proto3,oneof" json:"offset,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListPublicationsRequest) Reset() { + *x = ListPublicationsRequest{} + mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListPublicationsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListPublicationsRequest) ProtoMessage() {} + +func (x *ListPublicationsRequest) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListPublicationsRequest.ProtoReflect.Descriptor instead. +func (*ListPublicationsRequest) Descriptor() ([]byte, []int) { + return file_agntcy_dir_routing_v1_publication_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ListPublicationsRequest) GetLimit() uint32 { + if x != nil && x.Limit != nil { + return *x.Limit + } + return 0 +} + +func (x *ListPublicationsRequest) GetOffset() uint32 { + if x != nil && x.Offset != nil { + return *x.Offset + } + return 0 +} + +// ListPublicationsItem represents a single publication request in the list response. +// Contains publication details including ID, status, and creation timestamp. +type ListPublicationsItem struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Unique identifier of the publication operation. + PublicationId string `protobuf:"bytes,1,opt,name=publication_id,json=publicationId,proto3" json:"publication_id,omitempty"` + // Current status of the publication operation. + Status PublicationStatus `protobuf:"varint,2,opt,name=status,proto3,enum=agntcy.dir.routing.v1.PublicationStatus" json:"status,omitempty"` + // Timestamp when the publication operation was created in the RFC3339 format. + // Specs: https://www.rfc-editor.org/rfc/rfc3339.html + CreatedTime string `protobuf:"bytes,3,opt,name=created_time,json=createdTime,proto3" json:"created_time,omitempty"` + // Timestamp of the most recent status update for this publication in the RFC3339 format. + LastUpdateTime string `protobuf:"bytes,4,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListPublicationsItem) Reset() { + *x = ListPublicationsItem{} + mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListPublicationsItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListPublicationsItem) ProtoMessage() {} + +func (x *ListPublicationsItem) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListPublicationsItem.ProtoReflect.Descriptor instead. +func (*ListPublicationsItem) Descriptor() ([]byte, []int) { + return file_agntcy_dir_routing_v1_publication_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ListPublicationsItem) GetPublicationId() string { + if x != nil { + return x.PublicationId + } + return "" +} + +func (x *ListPublicationsItem) GetStatus() PublicationStatus { + if x != nil { + return x.Status + } + return PublicationStatus_PUBLICATION_STATUS_UNSPECIFIED +} + +func (x *ListPublicationsItem) GetCreatedTime() string { + if x != nil { + return x.CreatedTime + } + return "" +} + +func (x *ListPublicationsItem) GetLastUpdateTime() string { + if x != nil { + return x.LastUpdateTime + } + return "" +} + +// GetPublicationRequest specifies which publication to retrieve by its identifier. +type GetPublicationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Unique identifier of the publication operation to query. + PublicationId string `protobuf:"bytes,1,opt,name=publication_id,json=publicationId,proto3" json:"publication_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetPublicationRequest) Reset() { + *x = GetPublicationRequest{} + mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetPublicationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPublicationRequest) ProtoMessage() {} + +func (x *GetPublicationRequest) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPublicationRequest.ProtoReflect.Descriptor instead. +func (*GetPublicationRequest) Descriptor() ([]byte, []int) { + return file_agntcy_dir_routing_v1_publication_service_proto_rawDescGZIP(), []int{3} +} + +func (x *GetPublicationRequest) GetPublicationId() string { + if x != nil { + return x.PublicationId + } + return "" +} + +// GetPublicationResponse contains the full details of a specific publication request. +// Includes status, progress information, and any error details if applicable. +type GetPublicationResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Unique identifier of the publication operation. + PublicationId string `protobuf:"bytes,1,opt,name=publication_id,json=publicationId,proto3" json:"publication_id,omitempty"` + // Current status of the publication operation. + Status PublicationStatus `protobuf:"varint,2,opt,name=status,proto3,enum=agntcy.dir.routing.v1.PublicationStatus" json:"status,omitempty"` + // Timestamp when the publication operation was created in the RFC3339 format. + // Specs: https://www.rfc-editor.org/rfc/rfc3339.html + CreatedTime string `protobuf:"bytes,3,opt,name=created_time,json=createdTime,proto3" json:"created_time,omitempty"` + // Timestamp of the most recent status update for this publication in the RFC3339 format. + LastUpdateTime string `protobuf:"bytes,4,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetPublicationResponse) Reset() { + *x = GetPublicationResponse{} + mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetPublicationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPublicationResponse) ProtoMessage() {} + +func (x *GetPublicationResponse) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPublicationResponse.ProtoReflect.Descriptor instead. +func (*GetPublicationResponse) Descriptor() ([]byte, []int) { + return file_agntcy_dir_routing_v1_publication_service_proto_rawDescGZIP(), []int{4} +} + +func (x *GetPublicationResponse) GetPublicationId() string { + if x != nil { + return x.PublicationId + } + return "" +} + +func (x *GetPublicationResponse) GetStatus() PublicationStatus { + if x != nil { + return x.Status + } + return PublicationStatus_PUBLICATION_STATUS_UNSPECIFIED +} + +func (x *GetPublicationResponse) GetCreatedTime() string { + if x != nil { + return x.CreatedTime + } + return "" +} + +func (x *GetPublicationResponse) GetLastUpdateTime() string { + if x != nil { + return x.LastUpdateTime + } + return "" +} + +var File_agntcy_dir_routing_v1_publication_service_proto protoreflect.FileDescriptor + +var file_agntcy_dir_routing_v1_publication_service_proto_rawDesc = string([]byte{ + 0x0a, 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x72, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x15, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, + 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x1a, 0x2b, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, + 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x2f, + 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x42, 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x66, 0x0a, 0x17, 0x4c, 0x69, 0x73, + 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x88, 0x01, 0x01, 0x12, + 0x1b, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x48, + 0x01, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, + 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x22, 0xcc, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x75, + 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, + 0x64, 0x12, 0x40, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x28, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x22, 0x3e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, + 0x22, 0xce, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x64, 0x12, 0x40, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, + 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x5f, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x2a, 0xbc, 0x01, 0x0a, 0x11, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x22, 0x0a, 0x1e, 0x50, 0x55, 0x42, 0x4c, 0x49, + 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x50, + 0x55, 0x42, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x22, 0x0a, 0x1e, 0x50, + 0x55, 0x42, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x49, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x02, 0x12, + 0x20, 0x0a, 0x1c, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, + 0x03, 0x12, 0x1d, 0x0a, 0x19, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, + 0x32, 0xe4, 0x02, 0x0a, 0x12, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x2e, 0x61, + 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x71, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2e, 0x2e, 0x61, 0x67, 0x6e, 0x74, + 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x61, 0x67, 0x6e, 0x74, + 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x30, 0x01, 0x12, 0x6d, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x50, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x61, 0x67, 0x6e, + 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, + 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, + 0x2e, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0xd1, 0x01, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x2e, + 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x42, 0x17, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x67, 0x6e, + 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, 0x52, 0xaa, 0x02, 0x15, 0x41, + 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x15, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, + 0x72, 0x5c, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x21, 0x41, + 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0xea, 0x02, 0x18, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, + 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +}) + +var ( + file_agntcy_dir_routing_v1_publication_service_proto_rawDescOnce sync.Once + file_agntcy_dir_routing_v1_publication_service_proto_rawDescData []byte +) + +func file_agntcy_dir_routing_v1_publication_service_proto_rawDescGZIP() []byte { + file_agntcy_dir_routing_v1_publication_service_proto_rawDescOnce.Do(func() { + file_agntcy_dir_routing_v1_publication_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_routing_v1_publication_service_proto_rawDesc), len(file_agntcy_dir_routing_v1_publication_service_proto_rawDesc))) + }) + return file_agntcy_dir_routing_v1_publication_service_proto_rawDescData +} + +var file_agntcy_dir_routing_v1_publication_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_agntcy_dir_routing_v1_publication_service_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_agntcy_dir_routing_v1_publication_service_proto_goTypes = []any{ + (PublicationStatus)(0), // 0: agntcy.dir.routing.v1.PublicationStatus + (*CreatePublicationResponse)(nil), // 1: agntcy.dir.routing.v1.CreatePublicationResponse + (*ListPublicationsRequest)(nil), // 2: agntcy.dir.routing.v1.ListPublicationsRequest + (*ListPublicationsItem)(nil), // 3: agntcy.dir.routing.v1.ListPublicationsItem + (*GetPublicationRequest)(nil), // 4: agntcy.dir.routing.v1.GetPublicationRequest + (*GetPublicationResponse)(nil), // 5: agntcy.dir.routing.v1.GetPublicationResponse + (*PublishRequest)(nil), // 6: agntcy.dir.routing.v1.PublishRequest +} +var file_agntcy_dir_routing_v1_publication_service_proto_depIdxs = []int32{ + 0, // 0: agntcy.dir.routing.v1.ListPublicationsItem.status:type_name -> agntcy.dir.routing.v1.PublicationStatus + 0, // 1: agntcy.dir.routing.v1.GetPublicationResponse.status:type_name -> agntcy.dir.routing.v1.PublicationStatus + 6, // 2: agntcy.dir.routing.v1.PublicationService.CreatePublication:input_type -> agntcy.dir.routing.v1.PublishRequest + 2, // 3: agntcy.dir.routing.v1.PublicationService.ListPublications:input_type -> agntcy.dir.routing.v1.ListPublicationsRequest + 4, // 4: agntcy.dir.routing.v1.PublicationService.GetPublication:input_type -> agntcy.dir.routing.v1.GetPublicationRequest + 1, // 5: agntcy.dir.routing.v1.PublicationService.CreatePublication:output_type -> agntcy.dir.routing.v1.CreatePublicationResponse + 3, // 6: agntcy.dir.routing.v1.PublicationService.ListPublications:output_type -> agntcy.dir.routing.v1.ListPublicationsItem + 5, // 7: agntcy.dir.routing.v1.PublicationService.GetPublication:output_type -> agntcy.dir.routing.v1.GetPublicationResponse + 5, // [5:8] is the sub-list for method output_type + 2, // [2:5] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_agntcy_dir_routing_v1_publication_service_proto_init() } +func file_agntcy_dir_routing_v1_publication_service_proto_init() { + if File_agntcy_dir_routing_v1_publication_service_proto != nil { + return + } + file_agntcy_dir_routing_v1_routing_service_proto_init() + file_agntcy_dir_routing_v1_publication_service_proto_msgTypes[1].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_routing_v1_publication_service_proto_rawDesc), len(file_agntcy_dir_routing_v1_publication_service_proto_rawDesc)), + NumEnums: 1, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_agntcy_dir_routing_v1_publication_service_proto_goTypes, + DependencyIndexes: file_agntcy_dir_routing_v1_publication_service_proto_depIdxs, + EnumInfos: file_agntcy_dir_routing_v1_publication_service_proto_enumTypes, + MessageInfos: file_agntcy_dir_routing_v1_publication_service_proto_msgTypes, + }.Build() + File_agntcy_dir_routing_v1_publication_service_proto = out.File + file_agntcy_dir_routing_v1_publication_service_proto_goTypes = nil + file_agntcy_dir_routing_v1_publication_service_proto_depIdxs = nil +} diff --git a/api/routing/v1/publication_service_grpc.pb.go b/api/routing/v1/publication_service_grpc.pb.go index 347591b2e..a1e0b1376 100644 --- a/api/routing/v1/publication_service_grpc.pb.go +++ b/api/routing/v1/publication_service_grpc.pb.go @@ -1,256 +1,256 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc (unknown) -// source: agntcy/dir/routing/v1/publication_service.proto - -package v1 - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 - -const ( - PublicationService_CreatePublication_FullMethodName = "/agntcy.dir.routing.v1.PublicationService/CreatePublication" - PublicationService_ListPublications_FullMethodName = "/agntcy.dir.routing.v1.PublicationService/ListPublications" - PublicationService_GetPublication_FullMethodName = "/agntcy.dir.routing.v1.PublicationService/GetPublication" -) - -// PublicationServiceClient is the client API for PublicationService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -// -// PublicationService manages publication requests for announcing records to the DHT. -// -// Publications are stored in the database and processed by a worker that runs every hour. -// The publication workflow: -// 1. Publications are created via routing's Publish RPC by specifying either a query, a list of CIDs, or all records -// 2. Publication requests are added to the database -// 3. PublicationWorker queries the data using the publication request from the database to get the list of CIDs to be published -// 4. PublicationWorker announces the records with these CIDs to the DHT -type PublicationServiceClient interface { - // CreatePublication creates a new publication request that will be processed by the PublicationWorker. - // The publication request can specify either a query, a list of specific CIDs, or all records to be announced to the DHT. - CreatePublication(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*CreatePublicationResponse, error) - // ListPublications returns a stream of all publication requests in the system. - // This allows monitoring of pending, processing, and completed publication requests. - ListPublications(ctx context.Context, in *ListPublicationsRequest, opts ...grpc.CallOption) (PublicationService_ListPublicationsClient, error) - // GetPublication retrieves details of a specific publication request by its identifier. - // This includes the current status and any associated metadata. - GetPublication(ctx context.Context, in *GetPublicationRequest, opts ...grpc.CallOption) (*GetPublicationResponse, error) -} - -type publicationServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewPublicationServiceClient(cc grpc.ClientConnInterface) PublicationServiceClient { - return &publicationServiceClient{cc} -} - -func (c *publicationServiceClient) CreatePublication(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*CreatePublicationResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(CreatePublicationResponse) - err := c.cc.Invoke(ctx, PublicationService_CreatePublication_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *publicationServiceClient) ListPublications(ctx context.Context, in *ListPublicationsRequest, opts ...grpc.CallOption) (PublicationService_ListPublicationsClient, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &PublicationService_ServiceDesc.Streams[0], PublicationService_ListPublications_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &publicationServiceListPublicationsClient{ClientStream: stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type PublicationService_ListPublicationsClient interface { - Recv() (*ListPublicationsItem, error) - grpc.ClientStream -} - -type publicationServiceListPublicationsClient struct { - grpc.ClientStream -} - -func (x *publicationServiceListPublicationsClient) Recv() (*ListPublicationsItem, error) { - m := new(ListPublicationsItem) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *publicationServiceClient) GetPublication(ctx context.Context, in *GetPublicationRequest, opts ...grpc.CallOption) (*GetPublicationResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(GetPublicationResponse) - err := c.cc.Invoke(ctx, PublicationService_GetPublication_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -// PublicationServiceServer is the server API for PublicationService service. -// All implementations should embed UnimplementedPublicationServiceServer -// for forward compatibility. -// -// PublicationService manages publication requests for announcing records to the DHT. -// -// Publications are stored in the database and processed by a worker that runs every hour. -// The publication workflow: -// 1. Publications are created via routing's Publish RPC by specifying either a query, a list of CIDs, or all records -// 2. Publication requests are added to the database -// 3. PublicationWorker queries the data using the publication request from the database to get the list of CIDs to be published -// 4. PublicationWorker announces the records with these CIDs to the DHT -type PublicationServiceServer interface { - // CreatePublication creates a new publication request that will be processed by the PublicationWorker. - // The publication request can specify either a query, a list of specific CIDs, or all records to be announced to the DHT. - CreatePublication(context.Context, *PublishRequest) (*CreatePublicationResponse, error) - // ListPublications returns a stream of all publication requests in the system. - // This allows monitoring of pending, processing, and completed publication requests. - ListPublications(*ListPublicationsRequest, PublicationService_ListPublicationsServer) error - // GetPublication retrieves details of a specific publication request by its identifier. - // This includes the current status and any associated metadata. - GetPublication(context.Context, *GetPublicationRequest) (*GetPublicationResponse, error) -} - -// UnimplementedPublicationServiceServer should be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedPublicationServiceServer struct{} - -func (UnimplementedPublicationServiceServer) CreatePublication(context.Context, *PublishRequest) (*CreatePublicationResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreatePublication not implemented") -} -func (UnimplementedPublicationServiceServer) ListPublications(*ListPublicationsRequest, PublicationService_ListPublicationsServer) error { - return status.Errorf(codes.Unimplemented, "method ListPublications not implemented") -} -func (UnimplementedPublicationServiceServer) GetPublication(context.Context, *GetPublicationRequest) (*GetPublicationResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPublication not implemented") -} -func (UnimplementedPublicationServiceServer) testEmbeddedByValue() {} - -// UnsafePublicationServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to PublicationServiceServer will -// result in compilation errors. -type UnsafePublicationServiceServer interface { - mustEmbedUnimplementedPublicationServiceServer() -} - -func RegisterPublicationServiceServer(s grpc.ServiceRegistrar, srv PublicationServiceServer) { - // If the following call pancis, it indicates UnimplementedPublicationServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } - s.RegisterService(&PublicationService_ServiceDesc, srv) -} - -func _PublicationService_CreatePublication_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PublishRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PublicationServiceServer).CreatePublication(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: PublicationService_CreatePublication_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PublicationServiceServer).CreatePublication(ctx, req.(*PublishRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _PublicationService_ListPublications_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ListPublicationsRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(PublicationServiceServer).ListPublications(m, &publicationServiceListPublicationsServer{ServerStream: stream}) -} - -type PublicationService_ListPublicationsServer interface { - Send(*ListPublicationsItem) error - grpc.ServerStream -} - -type publicationServiceListPublicationsServer struct { - grpc.ServerStream -} - -func (x *publicationServiceListPublicationsServer) Send(m *ListPublicationsItem) error { - return x.ServerStream.SendMsg(m) -} - -func _PublicationService_GetPublication_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPublicationRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(PublicationServiceServer).GetPublication(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: PublicationService_GetPublication_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(PublicationServiceServer).GetPublication(ctx, req.(*GetPublicationRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// PublicationService_ServiceDesc is the grpc.ServiceDesc for PublicationService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var PublicationService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "agntcy.dir.routing.v1.PublicationService", - HandlerType: (*PublicationServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreatePublication", - Handler: _PublicationService_CreatePublication_Handler, - }, - { - MethodName: "GetPublication", - Handler: _PublicationService_GetPublication_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ListPublications", - Handler: _PublicationService_ListPublications_Handler, - ServerStreams: true, - }, - }, - Metadata: "agntcy/dir/routing/v1/publication_service.proto", -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc (unknown) +// source: agntcy/dir/routing/v1/publication_service.proto + +package v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 + +const ( + PublicationService_CreatePublication_FullMethodName = "/agntcy.dir.routing.v1.PublicationService/CreatePublication" + PublicationService_ListPublications_FullMethodName = "/agntcy.dir.routing.v1.PublicationService/ListPublications" + PublicationService_GetPublication_FullMethodName = "/agntcy.dir.routing.v1.PublicationService/GetPublication" +) + +// PublicationServiceClient is the client API for PublicationService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// PublicationService manages publication requests for announcing records to the DHT. +// +// Publications are stored in the database and processed by a worker that runs every hour. +// The publication workflow: +// 1. Publications are created via routing's Publish RPC by specifying either a query, a list of CIDs, or all records +// 2. Publication requests are added to the database +// 3. PublicationWorker queries the data using the publication request from the database to get the list of CIDs to be published +// 4. PublicationWorker announces the records with these CIDs to the DHT +type PublicationServiceClient interface { + // CreatePublication creates a new publication request that will be processed by the PublicationWorker. + // The publication request can specify either a query, a list of specific CIDs, or all records to be announced to the DHT. + CreatePublication(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*CreatePublicationResponse, error) + // ListPublications returns a stream of all publication requests in the system. + // This allows monitoring of pending, processing, and completed publication requests. + ListPublications(ctx context.Context, in *ListPublicationsRequest, opts ...grpc.CallOption) (PublicationService_ListPublicationsClient, error) + // GetPublication retrieves details of a specific publication request by its identifier. + // This includes the current status and any associated metadata. + GetPublication(ctx context.Context, in *GetPublicationRequest, opts ...grpc.CallOption) (*GetPublicationResponse, error) +} + +type publicationServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewPublicationServiceClient(cc grpc.ClientConnInterface) PublicationServiceClient { + return &publicationServiceClient{cc} +} + +func (c *publicationServiceClient) CreatePublication(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*CreatePublicationResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(CreatePublicationResponse) + err := c.cc.Invoke(ctx, PublicationService_CreatePublication_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publicationServiceClient) ListPublications(ctx context.Context, in *ListPublicationsRequest, opts ...grpc.CallOption) (PublicationService_ListPublicationsClient, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &PublicationService_ServiceDesc.Streams[0], PublicationService_ListPublications_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &publicationServiceListPublicationsClient{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type PublicationService_ListPublicationsClient interface { + Recv() (*ListPublicationsItem, error) + grpc.ClientStream +} + +type publicationServiceListPublicationsClient struct { + grpc.ClientStream +} + +func (x *publicationServiceListPublicationsClient) Recv() (*ListPublicationsItem, error) { + m := new(ListPublicationsItem) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *publicationServiceClient) GetPublication(ctx context.Context, in *GetPublicationRequest, opts ...grpc.CallOption) (*GetPublicationResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetPublicationResponse) + err := c.cc.Invoke(ctx, PublicationService_GetPublication_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// PublicationServiceServer is the server API for PublicationService service. +// All implementations should embed UnimplementedPublicationServiceServer +// for forward compatibility. +// +// PublicationService manages publication requests for announcing records to the DHT. +// +// Publications are stored in the database and processed by a worker that runs every hour. +// The publication workflow: +// 1. Publications are created via routing's Publish RPC by specifying either a query, a list of CIDs, or all records +// 2. Publication requests are added to the database +// 3. PublicationWorker queries the data using the publication request from the database to get the list of CIDs to be published +// 4. PublicationWorker announces the records with these CIDs to the DHT +type PublicationServiceServer interface { + // CreatePublication creates a new publication request that will be processed by the PublicationWorker. + // The publication request can specify either a query, a list of specific CIDs, or all records to be announced to the DHT. + CreatePublication(context.Context, *PublishRequest) (*CreatePublicationResponse, error) + // ListPublications returns a stream of all publication requests in the system. + // This allows monitoring of pending, processing, and completed publication requests. + ListPublications(*ListPublicationsRequest, PublicationService_ListPublicationsServer) error + // GetPublication retrieves details of a specific publication request by its identifier. + // This includes the current status and any associated metadata. + GetPublication(context.Context, *GetPublicationRequest) (*GetPublicationResponse, error) +} + +// UnimplementedPublicationServiceServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedPublicationServiceServer struct{} + +func (UnimplementedPublicationServiceServer) CreatePublication(context.Context, *PublishRequest) (*CreatePublicationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreatePublication not implemented") +} +func (UnimplementedPublicationServiceServer) ListPublications(*ListPublicationsRequest, PublicationService_ListPublicationsServer) error { + return status.Errorf(codes.Unimplemented, "method ListPublications not implemented") +} +func (UnimplementedPublicationServiceServer) GetPublication(context.Context, *GetPublicationRequest) (*GetPublicationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPublication not implemented") +} +func (UnimplementedPublicationServiceServer) testEmbeddedByValue() {} + +// UnsafePublicationServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to PublicationServiceServer will +// result in compilation errors. +type UnsafePublicationServiceServer interface { + mustEmbedUnimplementedPublicationServiceServer() +} + +func RegisterPublicationServiceServer(s grpc.ServiceRegistrar, srv PublicationServiceServer) { + // If the following call pancis, it indicates UnimplementedPublicationServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&PublicationService_ServiceDesc, srv) +} + +func _PublicationService_CreatePublication_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PublishRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublicationServiceServer).CreatePublication(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: PublicationService_CreatePublication_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublicationServiceServer).CreatePublication(ctx, req.(*PublishRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PublicationService_ListPublications_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListPublicationsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(PublicationServiceServer).ListPublications(m, &publicationServiceListPublicationsServer{ServerStream: stream}) +} + +type PublicationService_ListPublicationsServer interface { + Send(*ListPublicationsItem) error + grpc.ServerStream +} + +type publicationServiceListPublicationsServer struct { + grpc.ServerStream +} + +func (x *publicationServiceListPublicationsServer) Send(m *ListPublicationsItem) error { + return x.ServerStream.SendMsg(m) +} + +func _PublicationService_GetPublication_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPublicationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublicationServiceServer).GetPublication(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: PublicationService_GetPublication_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublicationServiceServer).GetPublication(ctx, req.(*GetPublicationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// PublicationService_ServiceDesc is the grpc.ServiceDesc for PublicationService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var PublicationService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "agntcy.dir.routing.v1.PublicationService", + HandlerType: (*PublicationServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreatePublication", + Handler: _PublicationService_CreatePublication_Handler, + }, + { + MethodName: "GetPublication", + Handler: _PublicationService_GetPublication_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListPublications", + Handler: _PublicationService_ListPublications_Handler, + ServerStreams: true, + }, + }, + Metadata: "agntcy/dir/routing/v1/publication_service.proto", +} diff --git a/api/routing/v1/record_query.pb.go b/api/routing/v1/record_query.pb.go index 1333069cd..09c73d4af 100644 --- a/api/routing/v1/record_query.pb.go +++ b/api/routing/v1/record_query.pb.go @@ -1,239 +1,239 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.5 -// protoc (unknown) -// source: agntcy/dir/routing/v1/record_query.proto - -package v1 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Defines a list of supported record query types. -type RecordQueryType int32 - -const ( - // Unspecified query type. - RecordQueryType_RECORD_QUERY_TYPE_UNSPECIFIED RecordQueryType = 0 - // Query for a skill name. - RecordQueryType_RECORD_QUERY_TYPE_SKILL RecordQueryType = 1 - // Query for a locator type. - RecordQueryType_RECORD_QUERY_TYPE_LOCATOR RecordQueryType = 2 - // Query for a domain name. - RecordQueryType_RECORD_QUERY_TYPE_DOMAIN RecordQueryType = 3 - // Query for a module name. - RecordQueryType_RECORD_QUERY_TYPE_MODULE RecordQueryType = 4 -) - -// Enum value maps for RecordQueryType. -var ( - RecordQueryType_name = map[int32]string{ - 0: "RECORD_QUERY_TYPE_UNSPECIFIED", - 1: "RECORD_QUERY_TYPE_SKILL", - 2: "RECORD_QUERY_TYPE_LOCATOR", - 3: "RECORD_QUERY_TYPE_DOMAIN", - 4: "RECORD_QUERY_TYPE_MODULE", - } - RecordQueryType_value = map[string]int32{ - "RECORD_QUERY_TYPE_UNSPECIFIED": 0, - "RECORD_QUERY_TYPE_SKILL": 1, - "RECORD_QUERY_TYPE_LOCATOR": 2, - "RECORD_QUERY_TYPE_DOMAIN": 3, - "RECORD_QUERY_TYPE_MODULE": 4, - } -) - -func (x RecordQueryType) Enum() *RecordQueryType { - p := new(RecordQueryType) - *p = x - return p -} - -func (x RecordQueryType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (RecordQueryType) Descriptor() protoreflect.EnumDescriptor { - return file_agntcy_dir_routing_v1_record_query_proto_enumTypes[0].Descriptor() -} - -func (RecordQueryType) Type() protoreflect.EnumType { - return &file_agntcy_dir_routing_v1_record_query_proto_enumTypes[0] -} - -func (x RecordQueryType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use RecordQueryType.Descriptor instead. -func (RecordQueryType) EnumDescriptor() ([]byte, []int) { - return file_agntcy_dir_routing_v1_record_query_proto_rawDescGZIP(), []int{0} -} - -// A query to match the record against during discovery. -// For example: -// -// { type: RECORD_QUERY_TYPE_SKILL, value: "Natural Language Processing" } -// { type: RECORD_QUERY_TYPE_LOCATOR, value: "helm-chart" } -// { type: RECORD_QUERY_TYPE_DOMAIN, value: "research" } -// { type: RECORD_QUERY_TYPE_MODULE, value: "core/llm/model" } -type RecordQuery struct { - state protoimpl.MessageState `protogen:"open.v1"` - // The type of the query to match against. - Type RecordQueryType `protobuf:"varint,1,opt,name=type,proto3,enum=agntcy.dir.routing.v1.RecordQueryType" json:"type,omitempty"` - // The query value to match against. - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RecordQuery) Reset() { - *x = RecordQuery{} - mi := &file_agntcy_dir_routing_v1_record_query_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RecordQuery) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RecordQuery) ProtoMessage() {} - -func (x *RecordQuery) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_routing_v1_record_query_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RecordQuery.ProtoReflect.Descriptor instead. -func (*RecordQuery) Descriptor() ([]byte, []int) { - return file_agntcy_dir_routing_v1_record_query_proto_rawDescGZIP(), []int{0} -} - -func (x *RecordQuery) GetType() RecordQueryType { - if x != nil { - return x.Type - } - return RecordQueryType_RECORD_QUERY_TYPE_UNSPECIFIED -} - -func (x *RecordQuery) GetValue() string { - if x != nil { - return x.Value - } - return "" -} - -var File_agntcy_dir_routing_v1_record_query_proto protoreflect.FileDescriptor - -var file_agntcy_dir_routing_v1_record_query_proto_rawDesc = string([]byte{ - 0x0a, 0x28, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x72, 0x6f, 0x75, - 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x61, 0x67, 0x6e, 0x74, - 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x31, 0x22, 0x5f, 0x0a, 0x0b, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x12, 0x3a, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, - 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x2a, 0xac, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, - 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x45, 0x43, - 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, - 0x4b, 0x49, 0x4c, 0x4c, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, - 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4c, 0x4f, 0x43, 0x41, - 0x54, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, - 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x4d, 0x41, 0x49, - 0x4e, 0x10, 0x03, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, - 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x55, 0x4c, 0x45, 0x10, - 0x04, 0x42, 0xca, 0x01, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, - 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x42, - 0x10, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x50, 0x01, 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, - 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, 0x52, 0xaa, - 0x02, 0x15, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x52, 0x6f, 0x75, - 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x15, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, - 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x31, 0xe2, - 0x02, 0x21, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x52, 0x6f, 0x75, - 0x74, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0xea, 0x02, 0x18, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, - 0x72, 0x3a, 0x3a, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_agntcy_dir_routing_v1_record_query_proto_rawDescOnce sync.Once - file_agntcy_dir_routing_v1_record_query_proto_rawDescData []byte -) - -func file_agntcy_dir_routing_v1_record_query_proto_rawDescGZIP() []byte { - file_agntcy_dir_routing_v1_record_query_proto_rawDescOnce.Do(func() { - file_agntcy_dir_routing_v1_record_query_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_routing_v1_record_query_proto_rawDesc), len(file_agntcy_dir_routing_v1_record_query_proto_rawDesc))) - }) - return file_agntcy_dir_routing_v1_record_query_proto_rawDescData -} - -var file_agntcy_dir_routing_v1_record_query_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_agntcy_dir_routing_v1_record_query_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_agntcy_dir_routing_v1_record_query_proto_goTypes = []any{ - (RecordQueryType)(0), // 0: agntcy.dir.routing.v1.RecordQueryType - (*RecordQuery)(nil), // 1: agntcy.dir.routing.v1.RecordQuery -} -var file_agntcy_dir_routing_v1_record_query_proto_depIdxs = []int32{ - 0, // 0: agntcy.dir.routing.v1.RecordQuery.type:type_name -> agntcy.dir.routing.v1.RecordQueryType - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_agntcy_dir_routing_v1_record_query_proto_init() } -func file_agntcy_dir_routing_v1_record_query_proto_init() { - if File_agntcy_dir_routing_v1_record_query_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_routing_v1_record_query_proto_rawDesc), len(file_agntcy_dir_routing_v1_record_query_proto_rawDesc)), - NumEnums: 1, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_agntcy_dir_routing_v1_record_query_proto_goTypes, - DependencyIndexes: file_agntcy_dir_routing_v1_record_query_proto_depIdxs, - EnumInfos: file_agntcy_dir_routing_v1_record_query_proto_enumTypes, - MessageInfos: file_agntcy_dir_routing_v1_record_query_proto_msgTypes, - }.Build() - File_agntcy_dir_routing_v1_record_query_proto = out.File - file_agntcy_dir_routing_v1_record_query_proto_goTypes = nil - file_agntcy_dir_routing_v1_record_query_proto_depIdxs = nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc (unknown) +// source: agntcy/dir/routing/v1/record_query.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Defines a list of supported record query types. +type RecordQueryType int32 + +const ( + // Unspecified query type. + RecordQueryType_RECORD_QUERY_TYPE_UNSPECIFIED RecordQueryType = 0 + // Query for a skill name. + RecordQueryType_RECORD_QUERY_TYPE_SKILL RecordQueryType = 1 + // Query for a locator type. + RecordQueryType_RECORD_QUERY_TYPE_LOCATOR RecordQueryType = 2 + // Query for a domain name. + RecordQueryType_RECORD_QUERY_TYPE_DOMAIN RecordQueryType = 3 + // Query for a module name. + RecordQueryType_RECORD_QUERY_TYPE_MODULE RecordQueryType = 4 +) + +// Enum value maps for RecordQueryType. +var ( + RecordQueryType_name = map[int32]string{ + 0: "RECORD_QUERY_TYPE_UNSPECIFIED", + 1: "RECORD_QUERY_TYPE_SKILL", + 2: "RECORD_QUERY_TYPE_LOCATOR", + 3: "RECORD_QUERY_TYPE_DOMAIN", + 4: "RECORD_QUERY_TYPE_MODULE", + } + RecordQueryType_value = map[string]int32{ + "RECORD_QUERY_TYPE_UNSPECIFIED": 0, + "RECORD_QUERY_TYPE_SKILL": 1, + "RECORD_QUERY_TYPE_LOCATOR": 2, + "RECORD_QUERY_TYPE_DOMAIN": 3, + "RECORD_QUERY_TYPE_MODULE": 4, + } +) + +func (x RecordQueryType) Enum() *RecordQueryType { + p := new(RecordQueryType) + *p = x + return p +} + +func (x RecordQueryType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RecordQueryType) Descriptor() protoreflect.EnumDescriptor { + return file_agntcy_dir_routing_v1_record_query_proto_enumTypes[0].Descriptor() +} + +func (RecordQueryType) Type() protoreflect.EnumType { + return &file_agntcy_dir_routing_v1_record_query_proto_enumTypes[0] +} + +func (x RecordQueryType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RecordQueryType.Descriptor instead. +func (RecordQueryType) EnumDescriptor() ([]byte, []int) { + return file_agntcy_dir_routing_v1_record_query_proto_rawDescGZIP(), []int{0} +} + +// A query to match the record against during discovery. +// For example: +// +// { type: RECORD_QUERY_TYPE_SKILL, value: "Natural Language Processing" } +// { type: RECORD_QUERY_TYPE_LOCATOR, value: "helm-chart" } +// { type: RECORD_QUERY_TYPE_DOMAIN, value: "research" } +// { type: RECORD_QUERY_TYPE_MODULE, value: "core/llm/model" } +type RecordQuery struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The type of the query to match against. + Type RecordQueryType `protobuf:"varint,1,opt,name=type,proto3,enum=agntcy.dir.routing.v1.RecordQueryType" json:"type,omitempty"` + // The query value to match against. + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RecordQuery) Reset() { + *x = RecordQuery{} + mi := &file_agntcy_dir_routing_v1_record_query_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RecordQuery) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RecordQuery) ProtoMessage() {} + +func (x *RecordQuery) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_routing_v1_record_query_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RecordQuery.ProtoReflect.Descriptor instead. +func (*RecordQuery) Descriptor() ([]byte, []int) { + return file_agntcy_dir_routing_v1_record_query_proto_rawDescGZIP(), []int{0} +} + +func (x *RecordQuery) GetType() RecordQueryType { + if x != nil { + return x.Type + } + return RecordQueryType_RECORD_QUERY_TYPE_UNSPECIFIED +} + +func (x *RecordQuery) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +var File_agntcy_dir_routing_v1_record_query_proto protoreflect.FileDescriptor + +var file_agntcy_dir_routing_v1_record_query_proto_rawDesc = string([]byte{ + 0x0a, 0x28, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x72, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x61, 0x67, 0x6e, 0x74, + 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x31, 0x22, 0x5f, 0x0a, 0x0b, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x12, 0x3a, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x2a, 0xac, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, + 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x45, 0x43, + 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x4b, 0x49, 0x4c, 0x4c, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, + 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4c, 0x4f, 0x43, 0x41, + 0x54, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, + 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x4d, 0x41, 0x49, + 0x4e, 0x10, 0x03, 0x12, 0x1c, 0x0a, 0x18, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, + 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x55, 0x4c, 0x45, 0x10, + 0x04, 0x42, 0xca, 0x01, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, + 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x42, + 0x10, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, 0x52, 0xaa, + 0x02, 0x15, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x52, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x15, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, + 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x31, 0xe2, + 0x02, 0x21, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x52, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0xea, 0x02, 0x18, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, + 0x72, 0x3a, 0x3a, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_agntcy_dir_routing_v1_record_query_proto_rawDescOnce sync.Once + file_agntcy_dir_routing_v1_record_query_proto_rawDescData []byte +) + +func file_agntcy_dir_routing_v1_record_query_proto_rawDescGZIP() []byte { + file_agntcy_dir_routing_v1_record_query_proto_rawDescOnce.Do(func() { + file_agntcy_dir_routing_v1_record_query_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_routing_v1_record_query_proto_rawDesc), len(file_agntcy_dir_routing_v1_record_query_proto_rawDesc))) + }) + return file_agntcy_dir_routing_v1_record_query_proto_rawDescData +} + +var file_agntcy_dir_routing_v1_record_query_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_agntcy_dir_routing_v1_record_query_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_agntcy_dir_routing_v1_record_query_proto_goTypes = []any{ + (RecordQueryType)(0), // 0: agntcy.dir.routing.v1.RecordQueryType + (*RecordQuery)(nil), // 1: agntcy.dir.routing.v1.RecordQuery +} +var file_agntcy_dir_routing_v1_record_query_proto_depIdxs = []int32{ + 0, // 0: agntcy.dir.routing.v1.RecordQuery.type:type_name -> agntcy.dir.routing.v1.RecordQueryType + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_agntcy_dir_routing_v1_record_query_proto_init() } +func file_agntcy_dir_routing_v1_record_query_proto_init() { + if File_agntcy_dir_routing_v1_record_query_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_routing_v1_record_query_proto_rawDesc), len(file_agntcy_dir_routing_v1_record_query_proto_rawDesc)), + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_agntcy_dir_routing_v1_record_query_proto_goTypes, + DependencyIndexes: file_agntcy_dir_routing_v1_record_query_proto_depIdxs, + EnumInfos: file_agntcy_dir_routing_v1_record_query_proto_enumTypes, + MessageInfos: file_agntcy_dir_routing_v1_record_query_proto_msgTypes, + }.Build() + File_agntcy_dir_routing_v1_record_query_proto = out.File + file_agntcy_dir_routing_v1_record_query_proto_goTypes = nil + file_agntcy_dir_routing_v1_record_query_proto_depIdxs = nil +} diff --git a/api/routing/v1/routing_service.pb.go b/api/routing/v1/routing_service.pb.go index 1134c6c61..2c3fa6d78 100644 --- a/api/routing/v1/routing_service.pb.go +++ b/api/routing/v1/routing_service.pb.go @@ -1,753 +1,753 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.5 -// protoc (unknown) -// source: agntcy/dir/routing/v1/routing_service.proto - -package v1 - -import ( - v1 "github.com/agntcy/dir/api/core/v1" - v11 "github.com/agntcy/dir/api/search/v1" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type PublishRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Request: - // - // *PublishRequest_RecordRefs - // *PublishRequest_Queries - Request isPublishRequest_Request `protobuf_oneof:"request"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PublishRequest) Reset() { - *x = PublishRequest{} - mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PublishRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PublishRequest) ProtoMessage() {} - -func (x *PublishRequest) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PublishRequest.ProtoReflect.Descriptor instead. -func (*PublishRequest) Descriptor() ([]byte, []int) { - return file_agntcy_dir_routing_v1_routing_service_proto_rawDescGZIP(), []int{0} -} - -func (x *PublishRequest) GetRequest() isPublishRequest_Request { - if x != nil { - return x.Request - } - return nil -} - -func (x *PublishRequest) GetRecordRefs() *RecordRefs { - if x != nil { - if x, ok := x.Request.(*PublishRequest_RecordRefs); ok { - return x.RecordRefs - } - } - return nil -} - -func (x *PublishRequest) GetQueries() *RecordQueries { - if x != nil { - if x, ok := x.Request.(*PublishRequest_Queries); ok { - return x.Queries - } - } - return nil -} - -type isPublishRequest_Request interface { - isPublishRequest_Request() -} - -type PublishRequest_RecordRefs struct { - // References to the records to be published. - RecordRefs *RecordRefs `protobuf:"bytes,1,opt,name=record_refs,json=recordRefs,proto3,oneof"` -} - -type PublishRequest_Queries struct { - // Queries to match against the records to be published. - Queries *RecordQueries `protobuf:"bytes,2,opt,name=queries,proto3,oneof"` -} - -func (*PublishRequest_RecordRefs) isPublishRequest_Request() {} - -func (*PublishRequest_Queries) isPublishRequest_Request() {} - -type UnpublishRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Request: - // - // *UnpublishRequest_RecordRefs - // *UnpublishRequest_Queries - Request isUnpublishRequest_Request `protobuf_oneof:"request"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *UnpublishRequest) Reset() { - *x = UnpublishRequest{} - mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *UnpublishRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UnpublishRequest) ProtoMessage() {} - -func (x *UnpublishRequest) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UnpublishRequest.ProtoReflect.Descriptor instead. -func (*UnpublishRequest) Descriptor() ([]byte, []int) { - return file_agntcy_dir_routing_v1_routing_service_proto_rawDescGZIP(), []int{1} -} - -func (x *UnpublishRequest) GetRequest() isUnpublishRequest_Request { - if x != nil { - return x.Request - } - return nil -} - -func (x *UnpublishRequest) GetRecordRefs() *RecordRefs { - if x != nil { - if x, ok := x.Request.(*UnpublishRequest_RecordRefs); ok { - return x.RecordRefs - } - } - return nil -} - -func (x *UnpublishRequest) GetQueries() *RecordQueries { - if x != nil { - if x, ok := x.Request.(*UnpublishRequest_Queries); ok { - return x.Queries - } - } - return nil -} - -type isUnpublishRequest_Request interface { - isUnpublishRequest_Request() -} - -type UnpublishRequest_RecordRefs struct { - // References to the records to be unpublished. - RecordRefs *RecordRefs `protobuf:"bytes,1,opt,name=record_refs,json=recordRefs,proto3,oneof"` -} - -type UnpublishRequest_Queries struct { - // Queries to match against the records to be unpublished. - Queries *RecordQueries `protobuf:"bytes,2,opt,name=queries,proto3,oneof"` -} - -func (*UnpublishRequest_RecordRefs) isUnpublishRequest_Request() {} - -func (*UnpublishRequest_Queries) isUnpublishRequest_Request() {} - -type RecordRefs struct { - state protoimpl.MessageState `protogen:"open.v1"` - Refs []*v1.RecordRef `protobuf:"bytes,1,rep,name=refs,proto3" json:"refs,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RecordRefs) Reset() { - *x = RecordRefs{} - mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RecordRefs) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RecordRefs) ProtoMessage() {} - -func (x *RecordRefs) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RecordRefs.ProtoReflect.Descriptor instead. -func (*RecordRefs) Descriptor() ([]byte, []int) { - return file_agntcy_dir_routing_v1_routing_service_proto_rawDescGZIP(), []int{2} -} - -func (x *RecordRefs) GetRefs() []*v1.RecordRef { - if x != nil { - return x.Refs - } - return nil -} - -type RecordQueries struct { - state protoimpl.MessageState `protogen:"open.v1"` - Queries []*v11.RecordQuery `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RecordQueries) Reset() { - *x = RecordQueries{} - mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RecordQueries) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RecordQueries) ProtoMessage() {} - -func (x *RecordQueries) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RecordQueries.ProtoReflect.Descriptor instead. -func (*RecordQueries) Descriptor() ([]byte, []int) { - return file_agntcy_dir_routing_v1_routing_service_proto_rawDescGZIP(), []int{3} -} - -func (x *RecordQueries) GetQueries() []*v11.RecordQuery { - if x != nil { - return x.Queries - } - return nil -} - -type SearchRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // List of queries to match against the records. - Queries []*RecordQuery `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` - // Minimal target query match score. - // For example, if min_match_score=2, it will return records that match - // at least two of the queries. - // If not set, it will return records that match at least one query. - MinMatchScore *uint32 `protobuf:"varint,2,opt,name=min_match_score,json=minMatchScore,proto3,oneof" json:"min_match_score,omitempty"` - // Limit the number of results returned. - // If not set, it will return all discovered records. - // Note that this is a soft limit, as the search may return more results - // than the limit if there are multiple peers providing the same record. - Limit *uint32 `protobuf:"varint,3,opt,name=limit,proto3,oneof" json:"limit,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SearchRequest) Reset() { - *x = SearchRequest{} - mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SearchRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SearchRequest) ProtoMessage() {} - -func (x *SearchRequest) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SearchRequest.ProtoReflect.Descriptor instead. -func (*SearchRequest) Descriptor() ([]byte, []int) { - return file_agntcy_dir_routing_v1_routing_service_proto_rawDescGZIP(), []int{4} -} - -func (x *SearchRequest) GetQueries() []*RecordQuery { - if x != nil { - return x.Queries - } - return nil -} - -func (x *SearchRequest) GetMinMatchScore() uint32 { - if x != nil && x.MinMatchScore != nil { - return *x.MinMatchScore - } - return 0 -} - -func (x *SearchRequest) GetLimit() uint32 { - if x != nil && x.Limit != nil { - return *x.Limit - } - return 0 -} - -type SearchResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // The record that matches the search query. - RecordRef *v1.RecordRef `protobuf:"bytes,1,opt,name=record_ref,json=recordRef,proto3" json:"record_ref,omitempty"` - // The peer that provided the record. - Peer *Peer `protobuf:"bytes,2,opt,name=peer,proto3" json:"peer,omitempty"` - // The queries that were matched. - MatchQueries []*RecordQuery `protobuf:"bytes,3,rep,name=match_queries,json=matchQueries,proto3" json:"match_queries,omitempty"` - // The score of the search match. - MatchScore uint32 `protobuf:"varint,4,opt,name=match_score,json=matchScore,proto3" json:"match_score,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SearchResponse) Reset() { - *x = SearchResponse{} - mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SearchResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SearchResponse) ProtoMessage() {} - -func (x *SearchResponse) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SearchResponse.ProtoReflect.Descriptor instead. -func (*SearchResponse) Descriptor() ([]byte, []int) { - return file_agntcy_dir_routing_v1_routing_service_proto_rawDescGZIP(), []int{5} -} - -func (x *SearchResponse) GetRecordRef() *v1.RecordRef { - if x != nil { - return x.RecordRef - } - return nil -} - -func (x *SearchResponse) GetPeer() *Peer { - if x != nil { - return x.Peer - } - return nil -} - -func (x *SearchResponse) GetMatchQueries() []*RecordQuery { - if x != nil { - return x.MatchQueries - } - return nil -} - -func (x *SearchResponse) GetMatchScore() uint32 { - if x != nil { - return x.MatchScore - } - return 0 -} - -type ListRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // List of queries to match against the records. - // If set, all queries must match for the record to be returned. - Queries []*RecordQuery `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` - // Limit the number of results returned. - // If not set, it will return all records that this peer is providing. - Limit *uint32 `protobuf:"varint,2,opt,name=limit,proto3,oneof" json:"limit,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListRequest) Reset() { - *x = ListRequest{} - mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListRequest) ProtoMessage() {} - -func (x *ListRequest) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[6] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListRequest.ProtoReflect.Descriptor instead. -func (*ListRequest) Descriptor() ([]byte, []int) { - return file_agntcy_dir_routing_v1_routing_service_proto_rawDescGZIP(), []int{6} -} - -func (x *ListRequest) GetQueries() []*RecordQuery { - if x != nil { - return x.Queries - } - return nil -} - -func (x *ListRequest) GetLimit() uint32 { - if x != nil && x.Limit != nil { - return *x.Limit - } - return 0 -} - -type ListResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // The record that matches the list queries. - RecordRef *v1.RecordRef `protobuf:"bytes,1,opt,name=record_ref,json=recordRef,proto3" json:"record_ref,omitempty"` - // Labels associated with this record (skills, domains, modules) - // Derived from the record content for CLI display purposes - Labels []string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListResponse) Reset() { - *x = ListResponse{} - mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListResponse) ProtoMessage() {} - -func (x *ListResponse) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListResponse.ProtoReflect.Descriptor instead. -func (*ListResponse) Descriptor() ([]byte, []int) { - return file_agntcy_dir_routing_v1_routing_service_proto_rawDescGZIP(), []int{7} -} - -func (x *ListResponse) GetRecordRef() *v1.RecordRef { - if x != nil { - return x.RecordRef - } - return nil -} - -func (x *ListResponse) GetLabels() []string { - if x != nil { - return x.Labels - } - return nil -} - -var File_agntcy_dir_routing_v1_routing_service_proto protoreflect.FileDescriptor - -var file_agntcy_dir_routing_v1_routing_service_proto_rawDesc = string([]byte{ - 0x0a, 0x2b, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x72, 0x6f, 0x75, - 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x61, - 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, - 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, - 0x72, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x65, 0x65, - 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, - 0x64, 0x69, 0x72, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x72, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x27, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, 0x65, - 0x61, 0x72, 0x63, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa3, 0x01, 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, - 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x0b, 0x72, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x21, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, - 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, - 0x66, 0x73, 0x48, 0x00, 0x52, 0x0a, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x73, - 0x12, 0x40, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, - 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x48, 0x00, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, - 0x65, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xa5, 0x01, - 0x0a, 0x10, 0x55, 0x6e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x44, 0x0a, 0x0b, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, - 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, - 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x73, 0x48, 0x00, 0x52, 0x0a, 0x72, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, - 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x61, 0x67, 0x6e, 0x74, - 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x48, - 0x00, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x3f, 0x0a, 0x0a, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, - 0x65, 0x66, 0x73, 0x12, 0x31, 0x0a, 0x04, 0x72, 0x65, 0x66, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, - 0x52, 0x04, 0x72, 0x65, 0x66, 0x73, 0x22, 0x4c, 0x0a, 0x0d, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, - 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, 0x2e, - 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x07, 0x71, 0x75, 0x65, - 0x72, 0x69, 0x65, 0x73, 0x22, 0xb3, 0x01, 0x0a, 0x0d, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, - 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, - 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x07, 0x71, 0x75, 0x65, - 0x72, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, - 0x0d, 0x6d, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x88, 0x01, - 0x01, 0x12, 0x19, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, - 0x48, 0x01, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x88, 0x01, 0x01, 0x42, 0x12, 0x0a, 0x10, - 0x5f, 0x6d, 0x69, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, - 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0xe9, 0x01, 0x0a, 0x0e, 0x53, - 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, - 0x0a, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, - 0x52, 0x09, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x12, 0x2f, 0x0a, 0x04, 0x70, - 0x65, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x61, 0x67, 0x6e, 0x74, - 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x0d, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, - 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x51, 0x75, - 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, - 0x63, 0x6f, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x22, 0x70, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, - 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x52, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, - 0x69, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0d, 0x48, 0x00, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x88, 0x01, 0x01, 0x42, 0x08, - 0x0a, 0x06, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x64, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x6f, - 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, - 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x52, 0x09, 0x72, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x32, 0xd4, - 0x02, 0x0a, 0x0e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x12, 0x48, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x25, 0x2e, 0x61, - 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, - 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4c, 0x0a, 0x09, 0x55, - 0x6e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x27, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, - 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, - 0x2e, 0x55, 0x6e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x57, 0x0a, 0x06, 0x53, 0x65, 0x61, - 0x72, 0x63, 0x68, 0x12, 0x24, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, - 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x61, 0x72, - 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x61, 0x67, 0x6e, 0x74, - 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x30, 0x01, 0x12, 0x51, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x22, 0x2e, 0x61, 0x67, 0x6e, - 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, - 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, - 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, - 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0xcd, 0x01, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, - 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x76, 0x31, 0x42, 0x13, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, - 0xa2, 0x02, 0x03, 0x41, 0x44, 0x52, 0xaa, 0x02, 0x15, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, - 0x44, 0x69, 0x72, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x31, 0xca, 0x02, - 0x15, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x52, 0x6f, 0x75, 0x74, - 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x21, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, - 0x44, 0x69, 0x72, 0x5c, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x31, 0x5c, 0x47, - 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x18, 0x41, 0x67, 0x6e, - 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, - 0x67, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_agntcy_dir_routing_v1_routing_service_proto_rawDescOnce sync.Once - file_agntcy_dir_routing_v1_routing_service_proto_rawDescData []byte -) - -func file_agntcy_dir_routing_v1_routing_service_proto_rawDescGZIP() []byte { - file_agntcy_dir_routing_v1_routing_service_proto_rawDescOnce.Do(func() { - file_agntcy_dir_routing_v1_routing_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_routing_v1_routing_service_proto_rawDesc), len(file_agntcy_dir_routing_v1_routing_service_proto_rawDesc))) - }) - return file_agntcy_dir_routing_v1_routing_service_proto_rawDescData -} - -var file_agntcy_dir_routing_v1_routing_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_agntcy_dir_routing_v1_routing_service_proto_goTypes = []any{ - (*PublishRequest)(nil), // 0: agntcy.dir.routing.v1.PublishRequest - (*UnpublishRequest)(nil), // 1: agntcy.dir.routing.v1.UnpublishRequest - (*RecordRefs)(nil), // 2: agntcy.dir.routing.v1.RecordRefs - (*RecordQueries)(nil), // 3: agntcy.dir.routing.v1.RecordQueries - (*SearchRequest)(nil), // 4: agntcy.dir.routing.v1.SearchRequest - (*SearchResponse)(nil), // 5: agntcy.dir.routing.v1.SearchResponse - (*ListRequest)(nil), // 6: agntcy.dir.routing.v1.ListRequest - (*ListResponse)(nil), // 7: agntcy.dir.routing.v1.ListResponse - (*v1.RecordRef)(nil), // 8: agntcy.dir.core.v1.RecordRef - (*v11.RecordQuery)(nil), // 9: agntcy.dir.search.v1.RecordQuery - (*RecordQuery)(nil), // 10: agntcy.dir.routing.v1.RecordQuery - (*Peer)(nil), // 11: agntcy.dir.routing.v1.Peer - (*emptypb.Empty)(nil), // 12: google.protobuf.Empty -} -var file_agntcy_dir_routing_v1_routing_service_proto_depIdxs = []int32{ - 2, // 0: agntcy.dir.routing.v1.PublishRequest.record_refs:type_name -> agntcy.dir.routing.v1.RecordRefs - 3, // 1: agntcy.dir.routing.v1.PublishRequest.queries:type_name -> agntcy.dir.routing.v1.RecordQueries - 2, // 2: agntcy.dir.routing.v1.UnpublishRequest.record_refs:type_name -> agntcy.dir.routing.v1.RecordRefs - 3, // 3: agntcy.dir.routing.v1.UnpublishRequest.queries:type_name -> agntcy.dir.routing.v1.RecordQueries - 8, // 4: agntcy.dir.routing.v1.RecordRefs.refs:type_name -> agntcy.dir.core.v1.RecordRef - 9, // 5: agntcy.dir.routing.v1.RecordQueries.queries:type_name -> agntcy.dir.search.v1.RecordQuery - 10, // 6: agntcy.dir.routing.v1.SearchRequest.queries:type_name -> agntcy.dir.routing.v1.RecordQuery - 8, // 7: agntcy.dir.routing.v1.SearchResponse.record_ref:type_name -> agntcy.dir.core.v1.RecordRef - 11, // 8: agntcy.dir.routing.v1.SearchResponse.peer:type_name -> agntcy.dir.routing.v1.Peer - 10, // 9: agntcy.dir.routing.v1.SearchResponse.match_queries:type_name -> agntcy.dir.routing.v1.RecordQuery - 10, // 10: agntcy.dir.routing.v1.ListRequest.queries:type_name -> agntcy.dir.routing.v1.RecordQuery - 8, // 11: agntcy.dir.routing.v1.ListResponse.record_ref:type_name -> agntcy.dir.core.v1.RecordRef - 0, // 12: agntcy.dir.routing.v1.RoutingService.Publish:input_type -> agntcy.dir.routing.v1.PublishRequest - 1, // 13: agntcy.dir.routing.v1.RoutingService.Unpublish:input_type -> agntcy.dir.routing.v1.UnpublishRequest - 4, // 14: agntcy.dir.routing.v1.RoutingService.Search:input_type -> agntcy.dir.routing.v1.SearchRequest - 6, // 15: agntcy.dir.routing.v1.RoutingService.List:input_type -> agntcy.dir.routing.v1.ListRequest - 12, // 16: agntcy.dir.routing.v1.RoutingService.Publish:output_type -> google.protobuf.Empty - 12, // 17: agntcy.dir.routing.v1.RoutingService.Unpublish:output_type -> google.protobuf.Empty - 5, // 18: agntcy.dir.routing.v1.RoutingService.Search:output_type -> agntcy.dir.routing.v1.SearchResponse - 7, // 19: agntcy.dir.routing.v1.RoutingService.List:output_type -> agntcy.dir.routing.v1.ListResponse - 16, // [16:20] is the sub-list for method output_type - 12, // [12:16] is the sub-list for method input_type - 12, // [12:12] is the sub-list for extension type_name - 12, // [12:12] is the sub-list for extension extendee - 0, // [0:12] is the sub-list for field type_name -} - -func init() { file_agntcy_dir_routing_v1_routing_service_proto_init() } -func file_agntcy_dir_routing_v1_routing_service_proto_init() { - if File_agntcy_dir_routing_v1_routing_service_proto != nil { - return - } - file_agntcy_dir_routing_v1_peer_proto_init() - file_agntcy_dir_routing_v1_record_query_proto_init() - file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[0].OneofWrappers = []any{ - (*PublishRequest_RecordRefs)(nil), - (*PublishRequest_Queries)(nil), - } - file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[1].OneofWrappers = []any{ - (*UnpublishRequest_RecordRefs)(nil), - (*UnpublishRequest_Queries)(nil), - } - file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[4].OneofWrappers = []any{} - file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[6].OneofWrappers = []any{} - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_routing_v1_routing_service_proto_rawDesc), len(file_agntcy_dir_routing_v1_routing_service_proto_rawDesc)), - NumEnums: 0, - NumMessages: 8, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_agntcy_dir_routing_v1_routing_service_proto_goTypes, - DependencyIndexes: file_agntcy_dir_routing_v1_routing_service_proto_depIdxs, - MessageInfos: file_agntcy_dir_routing_v1_routing_service_proto_msgTypes, - }.Build() - File_agntcy_dir_routing_v1_routing_service_proto = out.File - file_agntcy_dir_routing_v1_routing_service_proto_goTypes = nil - file_agntcy_dir_routing_v1_routing_service_proto_depIdxs = nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc (unknown) +// source: agntcy/dir/routing/v1/routing_service.proto + +package v1 + +import ( + v1 "github.com/agntcy/dir/api/core/v1" + v11 "github.com/agntcy/dir/api/search/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PublishRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Request: + // + // *PublishRequest_RecordRefs + // *PublishRequest_Queries + Request isPublishRequest_Request `protobuf_oneof:"request"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PublishRequest) Reset() { + *x = PublishRequest{} + mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PublishRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishRequest) ProtoMessage() {} + +func (x *PublishRequest) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishRequest.ProtoReflect.Descriptor instead. +func (*PublishRequest) Descriptor() ([]byte, []int) { + return file_agntcy_dir_routing_v1_routing_service_proto_rawDescGZIP(), []int{0} +} + +func (x *PublishRequest) GetRequest() isPublishRequest_Request { + if x != nil { + return x.Request + } + return nil +} + +func (x *PublishRequest) GetRecordRefs() *RecordRefs { + if x != nil { + if x, ok := x.Request.(*PublishRequest_RecordRefs); ok { + return x.RecordRefs + } + } + return nil +} + +func (x *PublishRequest) GetQueries() *RecordQueries { + if x != nil { + if x, ok := x.Request.(*PublishRequest_Queries); ok { + return x.Queries + } + } + return nil +} + +type isPublishRequest_Request interface { + isPublishRequest_Request() +} + +type PublishRequest_RecordRefs struct { + // References to the records to be published. + RecordRefs *RecordRefs `protobuf:"bytes,1,opt,name=record_refs,json=recordRefs,proto3,oneof"` +} + +type PublishRequest_Queries struct { + // Queries to match against the records to be published. + Queries *RecordQueries `protobuf:"bytes,2,opt,name=queries,proto3,oneof"` +} + +func (*PublishRequest_RecordRefs) isPublishRequest_Request() {} + +func (*PublishRequest_Queries) isPublishRequest_Request() {} + +type UnpublishRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Request: + // + // *UnpublishRequest_RecordRefs + // *UnpublishRequest_Queries + Request isUnpublishRequest_Request `protobuf_oneof:"request"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UnpublishRequest) Reset() { + *x = UnpublishRequest{} + mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UnpublishRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnpublishRequest) ProtoMessage() {} + +func (x *UnpublishRequest) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnpublishRequest.ProtoReflect.Descriptor instead. +func (*UnpublishRequest) Descriptor() ([]byte, []int) { + return file_agntcy_dir_routing_v1_routing_service_proto_rawDescGZIP(), []int{1} +} + +func (x *UnpublishRequest) GetRequest() isUnpublishRequest_Request { + if x != nil { + return x.Request + } + return nil +} + +func (x *UnpublishRequest) GetRecordRefs() *RecordRefs { + if x != nil { + if x, ok := x.Request.(*UnpublishRequest_RecordRefs); ok { + return x.RecordRefs + } + } + return nil +} + +func (x *UnpublishRequest) GetQueries() *RecordQueries { + if x != nil { + if x, ok := x.Request.(*UnpublishRequest_Queries); ok { + return x.Queries + } + } + return nil +} + +type isUnpublishRequest_Request interface { + isUnpublishRequest_Request() +} + +type UnpublishRequest_RecordRefs struct { + // References to the records to be unpublished. + RecordRefs *RecordRefs `protobuf:"bytes,1,opt,name=record_refs,json=recordRefs,proto3,oneof"` +} + +type UnpublishRequest_Queries struct { + // Queries to match against the records to be unpublished. + Queries *RecordQueries `protobuf:"bytes,2,opt,name=queries,proto3,oneof"` +} + +func (*UnpublishRequest_RecordRefs) isUnpublishRequest_Request() {} + +func (*UnpublishRequest_Queries) isUnpublishRequest_Request() {} + +type RecordRefs struct { + state protoimpl.MessageState `protogen:"open.v1"` + Refs []*v1.RecordRef `protobuf:"bytes,1,rep,name=refs,proto3" json:"refs,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RecordRefs) Reset() { + *x = RecordRefs{} + mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RecordRefs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RecordRefs) ProtoMessage() {} + +func (x *RecordRefs) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RecordRefs.ProtoReflect.Descriptor instead. +func (*RecordRefs) Descriptor() ([]byte, []int) { + return file_agntcy_dir_routing_v1_routing_service_proto_rawDescGZIP(), []int{2} +} + +func (x *RecordRefs) GetRefs() []*v1.RecordRef { + if x != nil { + return x.Refs + } + return nil +} + +type RecordQueries struct { + state protoimpl.MessageState `protogen:"open.v1"` + Queries []*v11.RecordQuery `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RecordQueries) Reset() { + *x = RecordQueries{} + mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RecordQueries) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RecordQueries) ProtoMessage() {} + +func (x *RecordQueries) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RecordQueries.ProtoReflect.Descriptor instead. +func (*RecordQueries) Descriptor() ([]byte, []int) { + return file_agntcy_dir_routing_v1_routing_service_proto_rawDescGZIP(), []int{3} +} + +func (x *RecordQueries) GetQueries() []*v11.RecordQuery { + if x != nil { + return x.Queries + } + return nil +} + +type SearchRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // List of queries to match against the records. + Queries []*RecordQuery `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` + // Minimal target query match score. + // For example, if min_match_score=2, it will return records that match + // at least two of the queries. + // If not set, it will return records that match at least one query. + MinMatchScore *uint32 `protobuf:"varint,2,opt,name=min_match_score,json=minMatchScore,proto3,oneof" json:"min_match_score,omitempty"` + // Limit the number of results returned. + // If not set, it will return all discovered records. + // Note that this is a soft limit, as the search may return more results + // than the limit if there are multiple peers providing the same record. + Limit *uint32 `protobuf:"varint,3,opt,name=limit,proto3,oneof" json:"limit,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SearchRequest) Reset() { + *x = SearchRequest{} + mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SearchRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SearchRequest) ProtoMessage() {} + +func (x *SearchRequest) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SearchRequest.ProtoReflect.Descriptor instead. +func (*SearchRequest) Descriptor() ([]byte, []int) { + return file_agntcy_dir_routing_v1_routing_service_proto_rawDescGZIP(), []int{4} +} + +func (x *SearchRequest) GetQueries() []*RecordQuery { + if x != nil { + return x.Queries + } + return nil +} + +func (x *SearchRequest) GetMinMatchScore() uint32 { + if x != nil && x.MinMatchScore != nil { + return *x.MinMatchScore + } + return 0 +} + +func (x *SearchRequest) GetLimit() uint32 { + if x != nil && x.Limit != nil { + return *x.Limit + } + return 0 +} + +type SearchResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The record that matches the search query. + RecordRef *v1.RecordRef `protobuf:"bytes,1,opt,name=record_ref,json=recordRef,proto3" json:"record_ref,omitempty"` + // The peer that provided the record. + Peer *Peer `protobuf:"bytes,2,opt,name=peer,proto3" json:"peer,omitempty"` + // The queries that were matched. + MatchQueries []*RecordQuery `protobuf:"bytes,3,rep,name=match_queries,json=matchQueries,proto3" json:"match_queries,omitempty"` + // The score of the search match. + MatchScore uint32 `protobuf:"varint,4,opt,name=match_score,json=matchScore,proto3" json:"match_score,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SearchResponse) Reset() { + *x = SearchResponse{} + mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SearchResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SearchResponse) ProtoMessage() {} + +func (x *SearchResponse) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SearchResponse.ProtoReflect.Descriptor instead. +func (*SearchResponse) Descriptor() ([]byte, []int) { + return file_agntcy_dir_routing_v1_routing_service_proto_rawDescGZIP(), []int{5} +} + +func (x *SearchResponse) GetRecordRef() *v1.RecordRef { + if x != nil { + return x.RecordRef + } + return nil +} + +func (x *SearchResponse) GetPeer() *Peer { + if x != nil { + return x.Peer + } + return nil +} + +func (x *SearchResponse) GetMatchQueries() []*RecordQuery { + if x != nil { + return x.MatchQueries + } + return nil +} + +func (x *SearchResponse) GetMatchScore() uint32 { + if x != nil { + return x.MatchScore + } + return 0 +} + +type ListRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // List of queries to match against the records. + // If set, all queries must match for the record to be returned. + Queries []*RecordQuery `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` + // Limit the number of results returned. + // If not set, it will return all records that this peer is providing. + Limit *uint32 `protobuf:"varint,2,opt,name=limit,proto3,oneof" json:"limit,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListRequest) Reset() { + *x = ListRequest{} + mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListRequest) ProtoMessage() {} + +func (x *ListRequest) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListRequest.ProtoReflect.Descriptor instead. +func (*ListRequest) Descriptor() ([]byte, []int) { + return file_agntcy_dir_routing_v1_routing_service_proto_rawDescGZIP(), []int{6} +} + +func (x *ListRequest) GetQueries() []*RecordQuery { + if x != nil { + return x.Queries + } + return nil +} + +func (x *ListRequest) GetLimit() uint32 { + if x != nil && x.Limit != nil { + return *x.Limit + } + return 0 +} + +type ListResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The record that matches the list queries. + RecordRef *v1.RecordRef `protobuf:"bytes,1,opt,name=record_ref,json=recordRef,proto3" json:"record_ref,omitempty"` + // Labels associated with this record (skills, domains, modules) + // Derived from the record content for CLI display purposes + Labels []string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListResponse) Reset() { + *x = ListResponse{} + mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListResponse) ProtoMessage() {} + +func (x *ListResponse) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListResponse.ProtoReflect.Descriptor instead. +func (*ListResponse) Descriptor() ([]byte, []int) { + return file_agntcy_dir_routing_v1_routing_service_proto_rawDescGZIP(), []int{7} +} + +func (x *ListResponse) GetRecordRef() *v1.RecordRef { + if x != nil { + return x.RecordRef + } + return nil +} + +func (x *ListResponse) GetLabels() []string { + if x != nil { + return x.Labels + } + return nil +} + +var File_agntcy_dir_routing_v1_routing_service_proto protoreflect.FileDescriptor + +var file_agntcy_dir_routing_v1_routing_service_proto_rawDesc = string([]byte{ + 0x0a, 0x2b, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x72, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x61, + 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, + 0x72, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x65, 0x65, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, + 0x64, 0x69, 0x72, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x72, + 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x27, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, 0x65, + 0x61, 0x72, 0x63, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa3, 0x01, 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, + 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x0b, 0x72, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x21, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, + 0x66, 0x73, 0x48, 0x00, 0x52, 0x0a, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x73, + 0x12, 0x40, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x48, 0x00, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xa5, 0x01, + 0x0a, 0x10, 0x55, 0x6e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x44, 0x0a, 0x0b, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, + 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, + 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x73, 0x48, 0x00, 0x52, 0x0a, 0x72, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x61, 0x67, 0x6e, 0x74, + 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x48, + 0x00, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x3f, 0x0a, 0x0a, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, + 0x65, 0x66, 0x73, 0x12, 0x31, 0x0a, 0x04, 0x72, 0x65, 0x66, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, + 0x52, 0x04, 0x72, 0x65, 0x66, 0x73, 0x22, 0x4c, 0x0a, 0x0d, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, + 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, 0x2e, + 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x07, 0x71, 0x75, 0x65, + 0x72, 0x69, 0x65, 0x73, 0x22, 0xb3, 0x01, 0x0a, 0x0d, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, + 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, + 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x07, 0x71, 0x75, 0x65, + 0x72, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, + 0x0d, 0x6d, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x88, 0x01, + 0x01, 0x12, 0x19, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, + 0x48, 0x01, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x88, 0x01, 0x01, 0x42, 0x12, 0x0a, 0x10, + 0x5f, 0x6d, 0x69, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, + 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0xe9, 0x01, 0x0a, 0x0e, 0x53, + 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, + 0x0a, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, + 0x52, 0x09, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x12, 0x2f, 0x0a, 0x04, 0x70, + 0x65, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x61, 0x67, 0x6e, 0x74, + 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x0d, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, + 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x51, 0x75, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, + 0x63, 0x6f, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x22, 0x70, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, + 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0d, 0x48, 0x00, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x88, 0x01, 0x01, 0x42, 0x08, + 0x0a, 0x06, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x64, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x6f, + 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, + 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x52, 0x09, 0x72, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x32, 0xd4, + 0x02, 0x0a, 0x0e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x48, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x25, 0x2e, 0x61, + 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4c, 0x0a, 0x09, 0x55, + 0x6e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x27, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, + 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, + 0x2e, 0x55, 0x6e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x57, 0x0a, 0x06, 0x53, 0x65, 0x61, + 0x72, 0x63, 0x68, 0x12, 0x24, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, + 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x61, 0x72, + 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x61, 0x67, 0x6e, 0x74, + 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x30, 0x01, 0x12, 0x51, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x22, 0x2e, 0x61, 0x67, 0x6e, + 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, + 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, + 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0xcd, 0x01, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, + 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, + 0x2e, 0x76, 0x31, 0x42, 0x13, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, + 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, + 0xa2, 0x02, 0x03, 0x41, 0x44, 0x52, 0xaa, 0x02, 0x15, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, + 0x44, 0x69, 0x72, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x31, 0xca, 0x02, + 0x15, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x52, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x21, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, + 0x44, 0x69, 0x72, 0x5c, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x31, 0x5c, 0x47, + 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x18, 0x41, 0x67, 0x6e, + 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_agntcy_dir_routing_v1_routing_service_proto_rawDescOnce sync.Once + file_agntcy_dir_routing_v1_routing_service_proto_rawDescData []byte +) + +func file_agntcy_dir_routing_v1_routing_service_proto_rawDescGZIP() []byte { + file_agntcy_dir_routing_v1_routing_service_proto_rawDescOnce.Do(func() { + file_agntcy_dir_routing_v1_routing_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_routing_v1_routing_service_proto_rawDesc), len(file_agntcy_dir_routing_v1_routing_service_proto_rawDesc))) + }) + return file_agntcy_dir_routing_v1_routing_service_proto_rawDescData +} + +var file_agntcy_dir_routing_v1_routing_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_agntcy_dir_routing_v1_routing_service_proto_goTypes = []any{ + (*PublishRequest)(nil), // 0: agntcy.dir.routing.v1.PublishRequest + (*UnpublishRequest)(nil), // 1: agntcy.dir.routing.v1.UnpublishRequest + (*RecordRefs)(nil), // 2: agntcy.dir.routing.v1.RecordRefs + (*RecordQueries)(nil), // 3: agntcy.dir.routing.v1.RecordQueries + (*SearchRequest)(nil), // 4: agntcy.dir.routing.v1.SearchRequest + (*SearchResponse)(nil), // 5: agntcy.dir.routing.v1.SearchResponse + (*ListRequest)(nil), // 6: agntcy.dir.routing.v1.ListRequest + (*ListResponse)(nil), // 7: agntcy.dir.routing.v1.ListResponse + (*v1.RecordRef)(nil), // 8: agntcy.dir.core.v1.RecordRef + (*v11.RecordQuery)(nil), // 9: agntcy.dir.search.v1.RecordQuery + (*RecordQuery)(nil), // 10: agntcy.dir.routing.v1.RecordQuery + (*Peer)(nil), // 11: agntcy.dir.routing.v1.Peer + (*emptypb.Empty)(nil), // 12: google.protobuf.Empty +} +var file_agntcy_dir_routing_v1_routing_service_proto_depIdxs = []int32{ + 2, // 0: agntcy.dir.routing.v1.PublishRequest.record_refs:type_name -> agntcy.dir.routing.v1.RecordRefs + 3, // 1: agntcy.dir.routing.v1.PublishRequest.queries:type_name -> agntcy.dir.routing.v1.RecordQueries + 2, // 2: agntcy.dir.routing.v1.UnpublishRequest.record_refs:type_name -> agntcy.dir.routing.v1.RecordRefs + 3, // 3: agntcy.dir.routing.v1.UnpublishRequest.queries:type_name -> agntcy.dir.routing.v1.RecordQueries + 8, // 4: agntcy.dir.routing.v1.RecordRefs.refs:type_name -> agntcy.dir.core.v1.RecordRef + 9, // 5: agntcy.dir.routing.v1.RecordQueries.queries:type_name -> agntcy.dir.search.v1.RecordQuery + 10, // 6: agntcy.dir.routing.v1.SearchRequest.queries:type_name -> agntcy.dir.routing.v1.RecordQuery + 8, // 7: agntcy.dir.routing.v1.SearchResponse.record_ref:type_name -> agntcy.dir.core.v1.RecordRef + 11, // 8: agntcy.dir.routing.v1.SearchResponse.peer:type_name -> agntcy.dir.routing.v1.Peer + 10, // 9: agntcy.dir.routing.v1.SearchResponse.match_queries:type_name -> agntcy.dir.routing.v1.RecordQuery + 10, // 10: agntcy.dir.routing.v1.ListRequest.queries:type_name -> agntcy.dir.routing.v1.RecordQuery + 8, // 11: agntcy.dir.routing.v1.ListResponse.record_ref:type_name -> agntcy.dir.core.v1.RecordRef + 0, // 12: agntcy.dir.routing.v1.RoutingService.Publish:input_type -> agntcy.dir.routing.v1.PublishRequest + 1, // 13: agntcy.dir.routing.v1.RoutingService.Unpublish:input_type -> agntcy.dir.routing.v1.UnpublishRequest + 4, // 14: agntcy.dir.routing.v1.RoutingService.Search:input_type -> agntcy.dir.routing.v1.SearchRequest + 6, // 15: agntcy.dir.routing.v1.RoutingService.List:input_type -> agntcy.dir.routing.v1.ListRequest + 12, // 16: agntcy.dir.routing.v1.RoutingService.Publish:output_type -> google.protobuf.Empty + 12, // 17: agntcy.dir.routing.v1.RoutingService.Unpublish:output_type -> google.protobuf.Empty + 5, // 18: agntcy.dir.routing.v1.RoutingService.Search:output_type -> agntcy.dir.routing.v1.SearchResponse + 7, // 19: agntcy.dir.routing.v1.RoutingService.List:output_type -> agntcy.dir.routing.v1.ListResponse + 16, // [16:20] is the sub-list for method output_type + 12, // [12:16] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_agntcy_dir_routing_v1_routing_service_proto_init() } +func file_agntcy_dir_routing_v1_routing_service_proto_init() { + if File_agntcy_dir_routing_v1_routing_service_proto != nil { + return + } + file_agntcy_dir_routing_v1_peer_proto_init() + file_agntcy_dir_routing_v1_record_query_proto_init() + file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[0].OneofWrappers = []any{ + (*PublishRequest_RecordRefs)(nil), + (*PublishRequest_Queries)(nil), + } + file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[1].OneofWrappers = []any{ + (*UnpublishRequest_RecordRefs)(nil), + (*UnpublishRequest_Queries)(nil), + } + file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[4].OneofWrappers = []any{} + file_agntcy_dir_routing_v1_routing_service_proto_msgTypes[6].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_routing_v1_routing_service_proto_rawDesc), len(file_agntcy_dir_routing_v1_routing_service_proto_rawDesc)), + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_agntcy_dir_routing_v1_routing_service_proto_goTypes, + DependencyIndexes: file_agntcy_dir_routing_v1_routing_service_proto_depIdxs, + MessageInfos: file_agntcy_dir_routing_v1_routing_service_proto_msgTypes, + }.Build() + File_agntcy_dir_routing_v1_routing_service_proto = out.File + file_agntcy_dir_routing_v1_routing_service_proto_goTypes = nil + file_agntcy_dir_routing_v1_routing_service_proto_depIdxs = nil +} diff --git a/api/routing/v1/routing_service_grpc.pb.go b/api/routing/v1/routing_service_grpc.pb.go index ea8d38e3a..e71387089 100644 --- a/api/routing/v1/routing_service_grpc.pb.go +++ b/api/routing/v1/routing_service_grpc.pb.go @@ -1,344 +1,344 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc (unknown) -// source: agntcy/dir/routing/v1/routing_service.proto - -package v1 - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 - -const ( - RoutingService_Publish_FullMethodName = "/agntcy.dir.routing.v1.RoutingService/Publish" - RoutingService_Unpublish_FullMethodName = "/agntcy.dir.routing.v1.RoutingService/Unpublish" - RoutingService_Search_FullMethodName = "/agntcy.dir.routing.v1.RoutingService/Search" - RoutingService_List_FullMethodName = "/agntcy.dir.routing.v1.RoutingService/List" -) - -// RoutingServiceClient is the client API for RoutingService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -// -// Defines an interface for announcement and discovery -// of records across interconnected network. -// -// Middleware should be used to control who can perform these RPCs. -// Policies for the middleware can be handled via separate service. -type RoutingServiceClient interface { - // Announce to the network that this peer is providing a given record. - // This enables other peers to discover this record and retrieve it - // from this peer. Listeners can use this event to perform custom operations, - // for example by cloning the record. - // - // Items need to be periodically republished (eg. 24h) to the network - // to avoid stale data. Republication should be done in the background. - Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Stop serving this record to the network. If other peers try - // to retrieve this record, the peer will refuse the request. - Unpublish(ctx context.Context, in *UnpublishRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Search records based on the request across the network. - // This will search the network for the record with the given parameters. - // - // It is possible that the records are stale or that they do not exist. - // Some records may be provided by multiple peers. - // - // Results from the search can be used as an input - // to Pull operation to retrieve the records. - Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (RoutingService_SearchClient, error) - // List all records that this peer is currently providing - // that match the given parameters. - // This operation does not interact with the network. - List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (RoutingService_ListClient, error) -} - -type routingServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewRoutingServiceClient(cc grpc.ClientConnInterface) RoutingServiceClient { - return &routingServiceClient{cc} -} - -func (c *routingServiceClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, RoutingService_Publish_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *routingServiceClient) Unpublish(ctx context.Context, in *UnpublishRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, RoutingService_Unpublish_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *routingServiceClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (RoutingService_SearchClient, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &RoutingService_ServiceDesc.Streams[0], RoutingService_Search_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &routingServiceSearchClient{ClientStream: stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type RoutingService_SearchClient interface { - Recv() (*SearchResponse, error) - grpc.ClientStream -} - -type routingServiceSearchClient struct { - grpc.ClientStream -} - -func (x *routingServiceSearchClient) Recv() (*SearchResponse, error) { - m := new(SearchResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *routingServiceClient) List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (RoutingService_ListClient, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &RoutingService_ServiceDesc.Streams[1], RoutingService_List_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &routingServiceListClient{ClientStream: stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type RoutingService_ListClient interface { - Recv() (*ListResponse, error) - grpc.ClientStream -} - -type routingServiceListClient struct { - grpc.ClientStream -} - -func (x *routingServiceListClient) Recv() (*ListResponse, error) { - m := new(ListResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// RoutingServiceServer is the server API for RoutingService service. -// All implementations should embed UnimplementedRoutingServiceServer -// for forward compatibility. -// -// Defines an interface for announcement and discovery -// of records across interconnected network. -// -// Middleware should be used to control who can perform these RPCs. -// Policies for the middleware can be handled via separate service. -type RoutingServiceServer interface { - // Announce to the network that this peer is providing a given record. - // This enables other peers to discover this record and retrieve it - // from this peer. Listeners can use this event to perform custom operations, - // for example by cloning the record. - // - // Items need to be periodically republished (eg. 24h) to the network - // to avoid stale data. Republication should be done in the background. - Publish(context.Context, *PublishRequest) (*emptypb.Empty, error) - // Stop serving this record to the network. If other peers try - // to retrieve this record, the peer will refuse the request. - Unpublish(context.Context, *UnpublishRequest) (*emptypb.Empty, error) - // Search records based on the request across the network. - // This will search the network for the record with the given parameters. - // - // It is possible that the records are stale or that they do not exist. - // Some records may be provided by multiple peers. - // - // Results from the search can be used as an input - // to Pull operation to retrieve the records. - Search(*SearchRequest, RoutingService_SearchServer) error - // List all records that this peer is currently providing - // that match the given parameters. - // This operation does not interact with the network. - List(*ListRequest, RoutingService_ListServer) error -} - -// UnimplementedRoutingServiceServer should be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedRoutingServiceServer struct{} - -func (UnimplementedRoutingServiceServer) Publish(context.Context, *PublishRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Publish not implemented") -} -func (UnimplementedRoutingServiceServer) Unpublish(context.Context, *UnpublishRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Unpublish not implemented") -} -func (UnimplementedRoutingServiceServer) Search(*SearchRequest, RoutingService_SearchServer) error { - return status.Errorf(codes.Unimplemented, "method Search not implemented") -} -func (UnimplementedRoutingServiceServer) List(*ListRequest, RoutingService_ListServer) error { - return status.Errorf(codes.Unimplemented, "method List not implemented") -} -func (UnimplementedRoutingServiceServer) testEmbeddedByValue() {} - -// UnsafeRoutingServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to RoutingServiceServer will -// result in compilation errors. -type UnsafeRoutingServiceServer interface { - mustEmbedUnimplementedRoutingServiceServer() -} - -func RegisterRoutingServiceServer(s grpc.ServiceRegistrar, srv RoutingServiceServer) { - // If the following call pancis, it indicates UnimplementedRoutingServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } - s.RegisterService(&RoutingService_ServiceDesc, srv) -} - -func _RoutingService_Publish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PublishRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RoutingServiceServer).Publish(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: RoutingService_Publish_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RoutingServiceServer).Publish(ctx, req.(*PublishRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _RoutingService_Unpublish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UnpublishRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RoutingServiceServer).Unpublish(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: RoutingService_Unpublish_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RoutingServiceServer).Unpublish(ctx, req.(*UnpublishRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _RoutingService_Search_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SearchRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(RoutingServiceServer).Search(m, &routingServiceSearchServer{ServerStream: stream}) -} - -type RoutingService_SearchServer interface { - Send(*SearchResponse) error - grpc.ServerStream -} - -type routingServiceSearchServer struct { - grpc.ServerStream -} - -func (x *routingServiceSearchServer) Send(m *SearchResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _RoutingService_List_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ListRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(RoutingServiceServer).List(m, &routingServiceListServer{ServerStream: stream}) -} - -type RoutingService_ListServer interface { - Send(*ListResponse) error - grpc.ServerStream -} - -type routingServiceListServer struct { - grpc.ServerStream -} - -func (x *routingServiceListServer) Send(m *ListResponse) error { - return x.ServerStream.SendMsg(m) -} - -// RoutingService_ServiceDesc is the grpc.ServiceDesc for RoutingService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var RoutingService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "agntcy.dir.routing.v1.RoutingService", - HandlerType: (*RoutingServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Publish", - Handler: _RoutingService_Publish_Handler, - }, - { - MethodName: "Unpublish", - Handler: _RoutingService_Unpublish_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Search", - Handler: _RoutingService_Search_Handler, - ServerStreams: true, - }, - { - StreamName: "List", - Handler: _RoutingService_List_Handler, - ServerStreams: true, - }, - }, - Metadata: "agntcy/dir/routing/v1/routing_service.proto", -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc (unknown) +// source: agntcy/dir/routing/v1/routing_service.proto + +package v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 + +const ( + RoutingService_Publish_FullMethodName = "/agntcy.dir.routing.v1.RoutingService/Publish" + RoutingService_Unpublish_FullMethodName = "/agntcy.dir.routing.v1.RoutingService/Unpublish" + RoutingService_Search_FullMethodName = "/agntcy.dir.routing.v1.RoutingService/Search" + RoutingService_List_FullMethodName = "/agntcy.dir.routing.v1.RoutingService/List" +) + +// RoutingServiceClient is the client API for RoutingService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// Defines an interface for announcement and discovery +// of records across interconnected network. +// +// Middleware should be used to control who can perform these RPCs. +// Policies for the middleware can be handled via separate service. +type RoutingServiceClient interface { + // Announce to the network that this peer is providing a given record. + // This enables other peers to discover this record and retrieve it + // from this peer. Listeners can use this event to perform custom operations, + // for example by cloning the record. + // + // Items need to be periodically republished (eg. 24h) to the network + // to avoid stale data. Republication should be done in the background. + Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Stop serving this record to the network. If other peers try + // to retrieve this record, the peer will refuse the request. + Unpublish(ctx context.Context, in *UnpublishRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Search records based on the request across the network. + // This will search the network for the record with the given parameters. + // + // It is possible that the records are stale or that they do not exist. + // Some records may be provided by multiple peers. + // + // Results from the search can be used as an input + // to Pull operation to retrieve the records. + Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (RoutingService_SearchClient, error) + // List all records that this peer is currently providing + // that match the given parameters. + // This operation does not interact with the network. + List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (RoutingService_ListClient, error) +} + +type routingServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewRoutingServiceClient(cc grpc.ClientConnInterface) RoutingServiceClient { + return &routingServiceClient{cc} +} + +func (c *routingServiceClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, RoutingService_Publish_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *routingServiceClient) Unpublish(ctx context.Context, in *UnpublishRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, RoutingService_Unpublish_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *routingServiceClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (RoutingService_SearchClient, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &RoutingService_ServiceDesc.Streams[0], RoutingService_Search_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &routingServiceSearchClient{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type RoutingService_SearchClient interface { + Recv() (*SearchResponse, error) + grpc.ClientStream +} + +type routingServiceSearchClient struct { + grpc.ClientStream +} + +func (x *routingServiceSearchClient) Recv() (*SearchResponse, error) { + m := new(SearchResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *routingServiceClient) List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (RoutingService_ListClient, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &RoutingService_ServiceDesc.Streams[1], RoutingService_List_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &routingServiceListClient{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type RoutingService_ListClient interface { + Recv() (*ListResponse, error) + grpc.ClientStream +} + +type routingServiceListClient struct { + grpc.ClientStream +} + +func (x *routingServiceListClient) Recv() (*ListResponse, error) { + m := new(ListResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// RoutingServiceServer is the server API for RoutingService service. +// All implementations should embed UnimplementedRoutingServiceServer +// for forward compatibility. +// +// Defines an interface for announcement and discovery +// of records across interconnected network. +// +// Middleware should be used to control who can perform these RPCs. +// Policies for the middleware can be handled via separate service. +type RoutingServiceServer interface { + // Announce to the network that this peer is providing a given record. + // This enables other peers to discover this record and retrieve it + // from this peer. Listeners can use this event to perform custom operations, + // for example by cloning the record. + // + // Items need to be periodically republished (eg. 24h) to the network + // to avoid stale data. Republication should be done in the background. + Publish(context.Context, *PublishRequest) (*emptypb.Empty, error) + // Stop serving this record to the network. If other peers try + // to retrieve this record, the peer will refuse the request. + Unpublish(context.Context, *UnpublishRequest) (*emptypb.Empty, error) + // Search records based on the request across the network. + // This will search the network for the record with the given parameters. + // + // It is possible that the records are stale or that they do not exist. + // Some records may be provided by multiple peers. + // + // Results from the search can be used as an input + // to Pull operation to retrieve the records. + Search(*SearchRequest, RoutingService_SearchServer) error + // List all records that this peer is currently providing + // that match the given parameters. + // This operation does not interact with the network. + List(*ListRequest, RoutingService_ListServer) error +} + +// UnimplementedRoutingServiceServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedRoutingServiceServer struct{} + +func (UnimplementedRoutingServiceServer) Publish(context.Context, *PublishRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Publish not implemented") +} +func (UnimplementedRoutingServiceServer) Unpublish(context.Context, *UnpublishRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Unpublish not implemented") +} +func (UnimplementedRoutingServiceServer) Search(*SearchRequest, RoutingService_SearchServer) error { + return status.Errorf(codes.Unimplemented, "method Search not implemented") +} +func (UnimplementedRoutingServiceServer) List(*ListRequest, RoutingService_ListServer) error { + return status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedRoutingServiceServer) testEmbeddedByValue() {} + +// UnsafeRoutingServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to RoutingServiceServer will +// result in compilation errors. +type UnsafeRoutingServiceServer interface { + mustEmbedUnimplementedRoutingServiceServer() +} + +func RegisterRoutingServiceServer(s grpc.ServiceRegistrar, srv RoutingServiceServer) { + // If the following call pancis, it indicates UnimplementedRoutingServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&RoutingService_ServiceDesc, srv) +} + +func _RoutingService_Publish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PublishRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RoutingServiceServer).Publish(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: RoutingService_Publish_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RoutingServiceServer).Publish(ctx, req.(*PublishRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RoutingService_Unpublish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UnpublishRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RoutingServiceServer).Unpublish(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: RoutingService_Unpublish_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RoutingServiceServer).Unpublish(ctx, req.(*UnpublishRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RoutingService_Search_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SearchRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(RoutingServiceServer).Search(m, &routingServiceSearchServer{ServerStream: stream}) +} + +type RoutingService_SearchServer interface { + Send(*SearchResponse) error + grpc.ServerStream +} + +type routingServiceSearchServer struct { + grpc.ServerStream +} + +func (x *routingServiceSearchServer) Send(m *SearchResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _RoutingService_List_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(RoutingServiceServer).List(m, &routingServiceListServer{ServerStream: stream}) +} + +type RoutingService_ListServer interface { + Send(*ListResponse) error + grpc.ServerStream +} + +type routingServiceListServer struct { + grpc.ServerStream +} + +func (x *routingServiceListServer) Send(m *ListResponse) error { + return x.ServerStream.SendMsg(m) +} + +// RoutingService_ServiceDesc is the grpc.ServiceDesc for RoutingService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var RoutingService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "agntcy.dir.routing.v1.RoutingService", + HandlerType: (*RoutingServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Publish", + Handler: _RoutingService_Publish_Handler, + }, + { + MethodName: "Unpublish", + Handler: _RoutingService_Unpublish_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Search", + Handler: _RoutingService_Search_Handler, + ServerStreams: true, + }, + { + StreamName: "List", + Handler: _RoutingService_List_Handler, + ServerStreams: true, + }, + }, + Metadata: "agntcy/dir/routing/v1/routing_service.proto", +} diff --git a/api/search/v1/record_query.go b/api/search/v1/record_query.go index 272d17542..acbdf3104 100644 --- a/api/search/v1/record_query.go +++ b/api/search/v1/record_query.go @@ -1,57 +1,57 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:mnd -package v1 - -var ValidQueryTypes []string - -func init() { - // Override allowed names for RecordQueryType - RecordQueryType_name = map[int32]string{ - 0: "unspecified", - 1: "name", - 2: "version", - 3: "skill-id", - 4: "skill-name", - 5: "locator", - 6: "module-name", - 7: "domain-id", - 8: "domain-name", - 9: "created-at", - 10: "author", - 11: "schema-version", - 12: "module-id", - } - RecordQueryType_value = map[string]int32{ - "": 0, - "unspecified": 0, - "name": 1, - "version": 2, - "skill-id": 3, - "skill-name": 4, - "locator": 5, - "module-name": 6, - "domain-id": 7, - "domain-name": 8, - "created-at": 9, - "author": 10, - "schema-version": 11, - "module-id": 12, - } - - ValidQueryTypes = []string{ - "name", - "version", - "skill-id", - "skill-name", - "locator", - "module-name", - "domain-id", - "domain-name", - "created-at", - "author", - "schema-version", - "module-id", - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:mnd +package v1 + +var ValidQueryTypes []string + +func init() { + // Override allowed names for RecordQueryType + RecordQueryType_name = map[int32]string{ + 0: "unspecified", + 1: "name", + 2: "version", + 3: "skill-id", + 4: "skill-name", + 5: "locator", + 6: "module-name", + 7: "domain-id", + 8: "domain-name", + 9: "created-at", + 10: "author", + 11: "schema-version", + 12: "module-id", + } + RecordQueryType_value = map[string]int32{ + "": 0, + "unspecified": 0, + "name": 1, + "version": 2, + "skill-id": 3, + "skill-name": 4, + "locator": 5, + "module-name": 6, + "domain-id": 7, + "domain-name": 8, + "created-at": 9, + "author": 10, + "schema-version": 11, + "module-id": 12, + } + + ValidQueryTypes = []string{ + "name", + "version", + "skill-id", + "skill-name", + "locator", + "module-name", + "domain-id", + "domain-name", + "created-at", + "author", + "schema-version", + "module-id", + } +} diff --git a/api/search/v1/record_query.pb.go b/api/search/v1/record_query.pb.go index fdd96454f..84263d7fe 100644 --- a/api/search/v1/record_query.pb.go +++ b/api/search/v1/record_query.pb.go @@ -1,306 +1,306 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.5 -// protoc (unknown) -// source: agntcy/dir/search/v1/record_query.proto - -package v1 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Defines a list of supported record query types. -type RecordQueryType int32 - -const ( - // Unspecified query type. - RecordQueryType_RECORD_QUERY_TYPE_UNSPECIFIED RecordQueryType = 0 - // Query for a record name. - // Supports wildcard patterns: "web*", "*service", "api-*-v2", "???api", "agent-[0-9]" - RecordQueryType_RECORD_QUERY_TYPE_NAME RecordQueryType = 1 - // Query for a record version. - // Supports wildcard patterns: "v1.*", "v2.*", "*-beta", "v1.0.?", "v[0-9].*" - RecordQueryType_RECORD_QUERY_TYPE_VERSION RecordQueryType = 2 - // Query for a skill ID. - // Numeric field - exact match only, no wildcard support. - RecordQueryType_RECORD_QUERY_TYPE_SKILL_ID RecordQueryType = 3 - // Query for a skill name. - // Supports wildcard patterns: "python*", "*script", "*machine*learning*", "Pytho?", "[A-M]*" - RecordQueryType_RECORD_QUERY_TYPE_SKILL_NAME RecordQueryType = 4 - // Query for a locator type. - // Supports wildcard patterns: "http*", "ftp*", "*docker*", "[hf]tt[ps]*" - RecordQueryType_RECORD_QUERY_TYPE_LOCATOR RecordQueryType = 5 - // Query for a module name. - // Supports wildcard patterns: "*-plugin", "*-module", "core*", "mod-?", "plugin-[0-9]" - RecordQueryType_RECORD_QUERY_TYPE_MODULE_NAME RecordQueryType = 6 - // Query for a domain ID. - // Numeric field - exact match only, no wildcard support. - RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_ID RecordQueryType = 7 - // Query for a domain name. - // Supports wildcard patterns: "*education*", "healthcare/*", "*technology" - RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_NAME RecordQueryType = 8 - // Query for a record's created_at timestamp. - // Supports wildcard patterns for date strings: "2025-*", ">=2025-01-01" - RecordQueryType_RECORD_QUERY_TYPE_CREATED_AT RecordQueryType = 9 - // Query for a record author. - // Supports wildcard patterns: "AGNTCY*", "*@example.com", "*Team*" - RecordQueryType_RECORD_QUERY_TYPE_AUTHOR RecordQueryType = 10 - // Query for a schema version. - // Supports wildcard patterns: "0.7.*", "0.*", "1.0.?" - RecordQueryType_RECORD_QUERY_TYPE_SCHEMA_VERSION RecordQueryType = 11 - // Query for a module ID. - // Numeric field - exact match only, no wildcard support. - RecordQueryType_RECORD_QUERY_TYPE_MODULE_ID RecordQueryType = 12 -) - -// Enum value maps for RecordQueryType. -var ( - RecordQueryType_name = map[int32]string{ - 0: "RECORD_QUERY_TYPE_UNSPECIFIED", - 1: "RECORD_QUERY_TYPE_NAME", - 2: "RECORD_QUERY_TYPE_VERSION", - 3: "RECORD_QUERY_TYPE_SKILL_ID", - 4: "RECORD_QUERY_TYPE_SKILL_NAME", - 5: "RECORD_QUERY_TYPE_LOCATOR", - 6: "RECORD_QUERY_TYPE_MODULE_NAME", - 7: "RECORD_QUERY_TYPE_DOMAIN_ID", - 8: "RECORD_QUERY_TYPE_DOMAIN_NAME", - 9: "RECORD_QUERY_TYPE_CREATED_AT", - 10: "RECORD_QUERY_TYPE_AUTHOR", - 11: "RECORD_QUERY_TYPE_SCHEMA_VERSION", - 12: "RECORD_QUERY_TYPE_MODULE_ID", - } - RecordQueryType_value = map[string]int32{ - "RECORD_QUERY_TYPE_UNSPECIFIED": 0, - "RECORD_QUERY_TYPE_NAME": 1, - "RECORD_QUERY_TYPE_VERSION": 2, - "RECORD_QUERY_TYPE_SKILL_ID": 3, - "RECORD_QUERY_TYPE_SKILL_NAME": 4, - "RECORD_QUERY_TYPE_LOCATOR": 5, - "RECORD_QUERY_TYPE_MODULE_NAME": 6, - "RECORD_QUERY_TYPE_DOMAIN_ID": 7, - "RECORD_QUERY_TYPE_DOMAIN_NAME": 8, - "RECORD_QUERY_TYPE_CREATED_AT": 9, - "RECORD_QUERY_TYPE_AUTHOR": 10, - "RECORD_QUERY_TYPE_SCHEMA_VERSION": 11, - "RECORD_QUERY_TYPE_MODULE_ID": 12, - } -) - -func (x RecordQueryType) Enum() *RecordQueryType { - p := new(RecordQueryType) - *p = x - return p -} - -func (x RecordQueryType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (RecordQueryType) Descriptor() protoreflect.EnumDescriptor { - return file_agntcy_dir_search_v1_record_query_proto_enumTypes[0].Descriptor() -} - -func (RecordQueryType) Type() protoreflect.EnumType { - return &file_agntcy_dir_search_v1_record_query_proto_enumTypes[0] -} - -func (x RecordQueryType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use RecordQueryType.Descriptor instead. -func (RecordQueryType) EnumDescriptor() ([]byte, []int) { - return file_agntcy_dir_search_v1_record_query_proto_rawDescGZIP(), []int{0} -} - -// A query to match the record against during discovery. -// For example: -// -// Exact match: { type: RECORD_QUERY_TYPE_NAME, value: "my-agent" } -// Wildcard match: { type: RECORD_QUERY_TYPE_NAME, value: "web*" } -// Pattern match: { type: RECORD_QUERY_TYPE_SKILL_NAME, value: "*machine*learning*" } -// Question mark: { type: RECORD_QUERY_TYPE_VERSION, value: "v1.0.?" } -// List wildcards: { type: RECORD_QUERY_TYPE_NAME, value: "agent-[0-9]" } -// Complex match: { type: RECORD_QUERY_TYPE_LOCATOR, value: "docker-image:https://*.example.com/*" } -type RecordQuery struct { - state protoimpl.MessageState `protogen:"open.v1"` - // The type of the query to match against. - Type RecordQueryType `protobuf:"varint,1,opt,name=type,proto3,enum=agntcy.dir.search.v1.RecordQueryType" json:"type,omitempty"` - // The query value to match against. - // Supports wildcard patterns: - // - // '*' - matches zero or more characters - // '?' - matches exactly one character - // '[]' - matches any character within brackets (e.g., [0-9], [a-z], [abc]) - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RecordQuery) Reset() { - *x = RecordQuery{} - mi := &file_agntcy_dir_search_v1_record_query_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RecordQuery) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RecordQuery) ProtoMessage() {} - -func (x *RecordQuery) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_search_v1_record_query_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RecordQuery.ProtoReflect.Descriptor instead. -func (*RecordQuery) Descriptor() ([]byte, []int) { - return file_agntcy_dir_search_v1_record_query_proto_rawDescGZIP(), []int{0} -} - -func (x *RecordQuery) GetType() RecordQueryType { - if x != nil { - return x.Type - } - return RecordQueryType_RECORD_QUERY_TYPE_UNSPECIFIED -} - -func (x *RecordQuery) GetValue() string { - if x != nil { - return x.Value - } - return "" -} - -var File_agntcy_dir_search_v1_record_query_proto protoreflect.FileDescriptor - -var file_agntcy_dir_search_v1_record_query_proto_rawDesc = string([]byte{ - 0x0a, 0x27, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, 0x65, 0x61, - 0x72, 0x63, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x61, 0x67, 0x6e, 0x74, 0x63, - 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, 0x22, - 0x5e, 0x0a, 0x0b, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x39, - 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x61, - 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, - 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2a, - 0xbe, 0x03, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, - 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, - 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, - 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4e, 0x41, 0x4d, 0x45, - 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, 0x45, - 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x10, - 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, 0x45, 0x52, - 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x4b, 0x49, 0x4c, 0x4c, 0x5f, 0x49, 0x44, 0x10, - 0x03, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, 0x45, 0x52, - 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x4b, 0x49, 0x4c, 0x4c, 0x5f, 0x4e, 0x41, 0x4d, - 0x45, 0x10, 0x04, 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, - 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x54, 0x4f, 0x52, - 0x10, 0x05, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, 0x45, - 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x55, 0x4c, 0x45, 0x5f, 0x4e, - 0x41, 0x4d, 0x45, 0x10, 0x06, 0x12, 0x1f, 0x0a, 0x1b, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, - 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x4d, 0x41, 0x49, - 0x4e, 0x5f, 0x49, 0x44, 0x10, 0x07, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, - 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x4d, 0x41, - 0x49, 0x4e, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x08, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x45, 0x43, - 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, - 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x41, 0x54, 0x10, 0x09, 0x12, 0x1c, 0x0a, 0x18, 0x52, - 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x41, 0x55, 0x54, 0x48, 0x4f, 0x52, 0x10, 0x0a, 0x12, 0x24, 0x0a, 0x20, 0x52, 0x45, 0x43, - 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, - 0x43, 0x48, 0x45, 0x4d, 0x41, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x0b, 0x12, - 0x1f, 0x0a, 0x1b, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x55, 0x4c, 0x45, 0x5f, 0x49, 0x44, 0x10, 0x0c, - 0x42, 0xc4, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, - 0x64, 0x69, 0x72, 0x2e, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x52, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x23, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x67, - 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x61, - 0x72, 0x63, 0x68, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, 0x53, 0xaa, 0x02, 0x14, 0x41, - 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, - 0x2e, 0x56, 0x31, 0xca, 0x02, 0x14, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, - 0x5c, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x20, 0x41, 0x67, 0x6e, - 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5c, 0x56, - 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x17, - 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, 0x53, 0x65, 0x61, - 0x72, 0x63, 0x68, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_agntcy_dir_search_v1_record_query_proto_rawDescOnce sync.Once - file_agntcy_dir_search_v1_record_query_proto_rawDescData []byte -) - -func file_agntcy_dir_search_v1_record_query_proto_rawDescGZIP() []byte { - file_agntcy_dir_search_v1_record_query_proto_rawDescOnce.Do(func() { - file_agntcy_dir_search_v1_record_query_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_search_v1_record_query_proto_rawDesc), len(file_agntcy_dir_search_v1_record_query_proto_rawDesc))) - }) - return file_agntcy_dir_search_v1_record_query_proto_rawDescData -} - -var file_agntcy_dir_search_v1_record_query_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_agntcy_dir_search_v1_record_query_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_agntcy_dir_search_v1_record_query_proto_goTypes = []any{ - (RecordQueryType)(0), // 0: agntcy.dir.search.v1.RecordQueryType - (*RecordQuery)(nil), // 1: agntcy.dir.search.v1.RecordQuery -} -var file_agntcy_dir_search_v1_record_query_proto_depIdxs = []int32{ - 0, // 0: agntcy.dir.search.v1.RecordQuery.type:type_name -> agntcy.dir.search.v1.RecordQueryType - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_agntcy_dir_search_v1_record_query_proto_init() } -func file_agntcy_dir_search_v1_record_query_proto_init() { - if File_agntcy_dir_search_v1_record_query_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_search_v1_record_query_proto_rawDesc), len(file_agntcy_dir_search_v1_record_query_proto_rawDesc)), - NumEnums: 1, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_agntcy_dir_search_v1_record_query_proto_goTypes, - DependencyIndexes: file_agntcy_dir_search_v1_record_query_proto_depIdxs, - EnumInfos: file_agntcy_dir_search_v1_record_query_proto_enumTypes, - MessageInfos: file_agntcy_dir_search_v1_record_query_proto_msgTypes, - }.Build() - File_agntcy_dir_search_v1_record_query_proto = out.File - file_agntcy_dir_search_v1_record_query_proto_goTypes = nil - file_agntcy_dir_search_v1_record_query_proto_depIdxs = nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc (unknown) +// source: agntcy/dir/search/v1/record_query.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Defines a list of supported record query types. +type RecordQueryType int32 + +const ( + // Unspecified query type. + RecordQueryType_RECORD_QUERY_TYPE_UNSPECIFIED RecordQueryType = 0 + // Query for a record name. + // Supports wildcard patterns: "web*", "*service", "api-*-v2", "???api", "agent-[0-9]" + RecordQueryType_RECORD_QUERY_TYPE_NAME RecordQueryType = 1 + // Query for a record version. + // Supports wildcard patterns: "v1.*", "v2.*", "*-beta", "v1.0.?", "v[0-9].*" + RecordQueryType_RECORD_QUERY_TYPE_VERSION RecordQueryType = 2 + // Query for a skill ID. + // Numeric field - exact match only, no wildcard support. + RecordQueryType_RECORD_QUERY_TYPE_SKILL_ID RecordQueryType = 3 + // Query for a skill name. + // Supports wildcard patterns: "python*", "*script", "*machine*learning*", "Pytho?", "[A-M]*" + RecordQueryType_RECORD_QUERY_TYPE_SKILL_NAME RecordQueryType = 4 + // Query for a locator type. + // Supports wildcard patterns: "http*", "ftp*", "*docker*", "[hf]tt[ps]*" + RecordQueryType_RECORD_QUERY_TYPE_LOCATOR RecordQueryType = 5 + // Query for a module name. + // Supports wildcard patterns: "*-plugin", "*-module", "core*", "mod-?", "plugin-[0-9]" + RecordQueryType_RECORD_QUERY_TYPE_MODULE_NAME RecordQueryType = 6 + // Query for a domain ID. + // Numeric field - exact match only, no wildcard support. + RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_ID RecordQueryType = 7 + // Query for a domain name. + // Supports wildcard patterns: "*education*", "healthcare/*", "*technology" + RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_NAME RecordQueryType = 8 + // Query for a record's created_at timestamp. + // Supports wildcard patterns for date strings: "2025-*", ">=2025-01-01" + RecordQueryType_RECORD_QUERY_TYPE_CREATED_AT RecordQueryType = 9 + // Query for a record author. + // Supports wildcard patterns: "AGNTCY*", "*@example.com", "*Team*" + RecordQueryType_RECORD_QUERY_TYPE_AUTHOR RecordQueryType = 10 + // Query for a schema version. + // Supports wildcard patterns: "0.7.*", "0.*", "1.0.?" + RecordQueryType_RECORD_QUERY_TYPE_SCHEMA_VERSION RecordQueryType = 11 + // Query for a module ID. + // Numeric field - exact match only, no wildcard support. + RecordQueryType_RECORD_QUERY_TYPE_MODULE_ID RecordQueryType = 12 +) + +// Enum value maps for RecordQueryType. +var ( + RecordQueryType_name = map[int32]string{ + 0: "RECORD_QUERY_TYPE_UNSPECIFIED", + 1: "RECORD_QUERY_TYPE_NAME", + 2: "RECORD_QUERY_TYPE_VERSION", + 3: "RECORD_QUERY_TYPE_SKILL_ID", + 4: "RECORD_QUERY_TYPE_SKILL_NAME", + 5: "RECORD_QUERY_TYPE_LOCATOR", + 6: "RECORD_QUERY_TYPE_MODULE_NAME", + 7: "RECORD_QUERY_TYPE_DOMAIN_ID", + 8: "RECORD_QUERY_TYPE_DOMAIN_NAME", + 9: "RECORD_QUERY_TYPE_CREATED_AT", + 10: "RECORD_QUERY_TYPE_AUTHOR", + 11: "RECORD_QUERY_TYPE_SCHEMA_VERSION", + 12: "RECORD_QUERY_TYPE_MODULE_ID", + } + RecordQueryType_value = map[string]int32{ + "RECORD_QUERY_TYPE_UNSPECIFIED": 0, + "RECORD_QUERY_TYPE_NAME": 1, + "RECORD_QUERY_TYPE_VERSION": 2, + "RECORD_QUERY_TYPE_SKILL_ID": 3, + "RECORD_QUERY_TYPE_SKILL_NAME": 4, + "RECORD_QUERY_TYPE_LOCATOR": 5, + "RECORD_QUERY_TYPE_MODULE_NAME": 6, + "RECORD_QUERY_TYPE_DOMAIN_ID": 7, + "RECORD_QUERY_TYPE_DOMAIN_NAME": 8, + "RECORD_QUERY_TYPE_CREATED_AT": 9, + "RECORD_QUERY_TYPE_AUTHOR": 10, + "RECORD_QUERY_TYPE_SCHEMA_VERSION": 11, + "RECORD_QUERY_TYPE_MODULE_ID": 12, + } +) + +func (x RecordQueryType) Enum() *RecordQueryType { + p := new(RecordQueryType) + *p = x + return p +} + +func (x RecordQueryType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RecordQueryType) Descriptor() protoreflect.EnumDescriptor { + return file_agntcy_dir_search_v1_record_query_proto_enumTypes[0].Descriptor() +} + +func (RecordQueryType) Type() protoreflect.EnumType { + return &file_agntcy_dir_search_v1_record_query_proto_enumTypes[0] +} + +func (x RecordQueryType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RecordQueryType.Descriptor instead. +func (RecordQueryType) EnumDescriptor() ([]byte, []int) { + return file_agntcy_dir_search_v1_record_query_proto_rawDescGZIP(), []int{0} +} + +// A query to match the record against during discovery. +// For example: +// +// Exact match: { type: RECORD_QUERY_TYPE_NAME, value: "my-agent" } +// Wildcard match: { type: RECORD_QUERY_TYPE_NAME, value: "web*" } +// Pattern match: { type: RECORD_QUERY_TYPE_SKILL_NAME, value: "*machine*learning*" } +// Question mark: { type: RECORD_QUERY_TYPE_VERSION, value: "v1.0.?" } +// List wildcards: { type: RECORD_QUERY_TYPE_NAME, value: "agent-[0-9]" } +// Complex match: { type: RECORD_QUERY_TYPE_LOCATOR, value: "docker-image:https://*.example.com/*" } +type RecordQuery struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The type of the query to match against. + Type RecordQueryType `protobuf:"varint,1,opt,name=type,proto3,enum=agntcy.dir.search.v1.RecordQueryType" json:"type,omitempty"` + // The query value to match against. + // Supports wildcard patterns: + // + // '*' - matches zero or more characters + // '?' - matches exactly one character + // '[]' - matches any character within brackets (e.g., [0-9], [a-z], [abc]) + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RecordQuery) Reset() { + *x = RecordQuery{} + mi := &file_agntcy_dir_search_v1_record_query_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RecordQuery) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RecordQuery) ProtoMessage() {} + +func (x *RecordQuery) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_search_v1_record_query_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RecordQuery.ProtoReflect.Descriptor instead. +func (*RecordQuery) Descriptor() ([]byte, []int) { + return file_agntcy_dir_search_v1_record_query_proto_rawDescGZIP(), []int{0} +} + +func (x *RecordQuery) GetType() RecordQueryType { + if x != nil { + return x.Type + } + return RecordQueryType_RECORD_QUERY_TYPE_UNSPECIFIED +} + +func (x *RecordQuery) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +var File_agntcy_dir_search_v1_record_query_proto protoreflect.FileDescriptor + +var file_agntcy_dir_search_v1_record_query_proto_rawDesc = string([]byte{ + 0x0a, 0x27, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, 0x65, 0x61, + 0x72, 0x63, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x61, 0x67, 0x6e, 0x74, 0x63, + 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, 0x22, + 0x5e, 0x0a, 0x0b, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x39, + 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x61, + 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2a, + 0xbe, 0x03, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, + 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, + 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4e, 0x41, 0x4d, 0x45, + 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, 0x45, + 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x10, + 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, 0x45, 0x52, + 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x4b, 0x49, 0x4c, 0x4c, 0x5f, 0x49, 0x44, 0x10, + 0x03, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, 0x45, 0x52, + 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x4b, 0x49, 0x4c, 0x4c, 0x5f, 0x4e, 0x41, 0x4d, + 0x45, 0x10, 0x04, 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, + 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x54, 0x4f, 0x52, + 0x10, 0x05, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, 0x45, + 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x55, 0x4c, 0x45, 0x5f, 0x4e, + 0x41, 0x4d, 0x45, 0x10, 0x06, 0x12, 0x1f, 0x0a, 0x1b, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, + 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x4d, 0x41, 0x49, + 0x4e, 0x5f, 0x49, 0x44, 0x10, 0x07, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, + 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x4d, 0x41, + 0x49, 0x4e, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x08, 0x12, 0x20, 0x0a, 0x1c, 0x52, 0x45, 0x43, + 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, + 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x41, 0x54, 0x10, 0x09, 0x12, 0x1c, 0x0a, 0x18, 0x52, + 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x41, 0x55, 0x54, 0x48, 0x4f, 0x52, 0x10, 0x0a, 0x12, 0x24, 0x0a, 0x20, 0x52, 0x45, 0x43, + 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x43, 0x48, 0x45, 0x4d, 0x41, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x0b, 0x12, + 0x1f, 0x0a, 0x1b, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, 0x51, 0x55, 0x45, 0x52, 0x59, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x55, 0x4c, 0x45, 0x5f, 0x49, 0x44, 0x10, 0x0c, + 0x42, 0xc4, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, + 0x64, 0x69, 0x72, 0x2e, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x52, + 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x23, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x67, + 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x61, + 0x72, 0x63, 0x68, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, 0x53, 0xaa, 0x02, 0x14, 0x41, + 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, + 0x2e, 0x56, 0x31, 0xca, 0x02, 0x14, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, + 0x5c, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x20, 0x41, 0x67, 0x6e, + 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5c, 0x56, + 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x17, + 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, 0x53, 0x65, 0x61, + 0x72, 0x63, 0x68, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_agntcy_dir_search_v1_record_query_proto_rawDescOnce sync.Once + file_agntcy_dir_search_v1_record_query_proto_rawDescData []byte +) + +func file_agntcy_dir_search_v1_record_query_proto_rawDescGZIP() []byte { + file_agntcy_dir_search_v1_record_query_proto_rawDescOnce.Do(func() { + file_agntcy_dir_search_v1_record_query_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_search_v1_record_query_proto_rawDesc), len(file_agntcy_dir_search_v1_record_query_proto_rawDesc))) + }) + return file_agntcy_dir_search_v1_record_query_proto_rawDescData +} + +var file_agntcy_dir_search_v1_record_query_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_agntcy_dir_search_v1_record_query_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_agntcy_dir_search_v1_record_query_proto_goTypes = []any{ + (RecordQueryType)(0), // 0: agntcy.dir.search.v1.RecordQueryType + (*RecordQuery)(nil), // 1: agntcy.dir.search.v1.RecordQuery +} +var file_agntcy_dir_search_v1_record_query_proto_depIdxs = []int32{ + 0, // 0: agntcy.dir.search.v1.RecordQuery.type:type_name -> agntcy.dir.search.v1.RecordQueryType + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_agntcy_dir_search_v1_record_query_proto_init() } +func file_agntcy_dir_search_v1_record_query_proto_init() { + if File_agntcy_dir_search_v1_record_query_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_search_v1_record_query_proto_rawDesc), len(file_agntcy_dir_search_v1_record_query_proto_rawDesc)), + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_agntcy_dir_search_v1_record_query_proto_goTypes, + DependencyIndexes: file_agntcy_dir_search_v1_record_query_proto_depIdxs, + EnumInfos: file_agntcy_dir_search_v1_record_query_proto_enumTypes, + MessageInfos: file_agntcy_dir_search_v1_record_query_proto_msgTypes, + }.Build() + File_agntcy_dir_search_v1_record_query_proto = out.File + file_agntcy_dir_search_v1_record_query_proto_goTypes = nil + file_agntcy_dir_search_v1_record_query_proto_depIdxs = nil +} diff --git a/api/search/v1/search_service.pb.go b/api/search/v1/search_service.pb.go index c26b2931d..470b48b60 100644 --- a/api/search/v1/search_service.pb.go +++ b/api/search/v1/search_service.pb.go @@ -1,375 +1,375 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.5 -// protoc (unknown) -// source: agntcy/dir/search/v1/search_service.proto - -package v1 - -import ( - v1 "github.com/agntcy/dir/api/core/v1" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type SearchCIDsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // List of queries to match against the records. - Queries []*RecordQuery `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` - // Optional limit on the number of results to return. - Limit *uint32 `protobuf:"varint,2,opt,name=limit,proto3,oneof" json:"limit,omitempty"` - // Optional offset for pagination of results. - Offset *uint32 `protobuf:"varint,3,opt,name=offset,proto3,oneof" json:"offset,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SearchCIDsRequest) Reset() { - *x = SearchCIDsRequest{} - mi := &file_agntcy_dir_search_v1_search_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SearchCIDsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SearchCIDsRequest) ProtoMessage() {} - -func (x *SearchCIDsRequest) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_search_v1_search_service_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SearchCIDsRequest.ProtoReflect.Descriptor instead. -func (*SearchCIDsRequest) Descriptor() ([]byte, []int) { - return file_agntcy_dir_search_v1_search_service_proto_rawDescGZIP(), []int{0} -} - -func (x *SearchCIDsRequest) GetQueries() []*RecordQuery { - if x != nil { - return x.Queries - } - return nil -} - -func (x *SearchCIDsRequest) GetLimit() uint32 { - if x != nil && x.Limit != nil { - return *x.Limit - } - return 0 -} - -func (x *SearchCIDsRequest) GetOffset() uint32 { - if x != nil && x.Offset != nil { - return *x.Offset - } - return 0 -} - -type SearchRecordsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // List of queries to match against the records. - Queries []*RecordQuery `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` - // Optional limit on the number of results to return. - Limit *uint32 `protobuf:"varint,2,opt,name=limit,proto3,oneof" json:"limit,omitempty"` - // Optional offset for pagination of results. - Offset *uint32 `protobuf:"varint,3,opt,name=offset,proto3,oneof" json:"offset,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SearchRecordsRequest) Reset() { - *x = SearchRecordsRequest{} - mi := &file_agntcy_dir_search_v1_search_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SearchRecordsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SearchRecordsRequest) ProtoMessage() {} - -func (x *SearchRecordsRequest) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_search_v1_search_service_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SearchRecordsRequest.ProtoReflect.Descriptor instead. -func (*SearchRecordsRequest) Descriptor() ([]byte, []int) { - return file_agntcy_dir_search_v1_search_service_proto_rawDescGZIP(), []int{1} -} - -func (x *SearchRecordsRequest) GetQueries() []*RecordQuery { - if x != nil { - return x.Queries - } - return nil -} - -func (x *SearchRecordsRequest) GetLimit() uint32 { - if x != nil && x.Limit != nil { - return *x.Limit - } - return 0 -} - -func (x *SearchRecordsRequest) GetOffset() uint32 { - if x != nil && x.Offset != nil { - return *x.Offset - } - return 0 -} - -type SearchCIDsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // The CID of the record that matches the search criteria. - RecordCid string `protobuf:"bytes,1,opt,name=record_cid,json=recordCid,proto3" json:"record_cid,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SearchCIDsResponse) Reset() { - *x = SearchCIDsResponse{} - mi := &file_agntcy_dir_search_v1_search_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SearchCIDsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SearchCIDsResponse) ProtoMessage() {} - -func (x *SearchCIDsResponse) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_search_v1_search_service_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SearchCIDsResponse.ProtoReflect.Descriptor instead. -func (*SearchCIDsResponse) Descriptor() ([]byte, []int) { - return file_agntcy_dir_search_v1_search_service_proto_rawDescGZIP(), []int{2} -} - -func (x *SearchCIDsResponse) GetRecordCid() string { - if x != nil { - return x.RecordCid - } - return "" -} - -type SearchRecordsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // The full record that matches the search criteria. - Record *v1.Record `protobuf:"bytes,1,opt,name=record,proto3" json:"record,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SearchRecordsResponse) Reset() { - *x = SearchRecordsResponse{} - mi := &file_agntcy_dir_search_v1_search_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SearchRecordsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SearchRecordsResponse) ProtoMessage() {} - -func (x *SearchRecordsResponse) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_search_v1_search_service_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SearchRecordsResponse.ProtoReflect.Descriptor instead. -func (*SearchRecordsResponse) Descriptor() ([]byte, []int) { - return file_agntcy_dir_search_v1_search_service_proto_rawDescGZIP(), []int{3} -} - -func (x *SearchRecordsResponse) GetRecord() *v1.Record { - if x != nil { - return x.Record - } - return nil -} - -var File_agntcy_dir_search_v1_search_service_proto protoreflect.FileDescriptor - -var file_agntcy_dir_search_v1_search_service_proto_rawDesc = string([]byte{ - 0x0a, 0x29, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, 0x65, 0x61, - 0x72, 0x63, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x61, 0x67, 0x6e, - 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, - 0x31, 0x1a, 0x1f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x63, 0x6f, - 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x27, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, - 0x65, 0x61, 0x72, 0x63, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x01, 0x0a, 0x11, - 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x43, 0x49, 0x44, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x3b, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, - 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x19, - 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, - 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x66, 0x66, - 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x01, 0x52, 0x06, 0x6f, 0x66, 0x66, - 0x73, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, - 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x22, 0xa0, 0x01, 0x0a, 0x14, - 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, - 0x69, 0x72, 0x2e, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, - 0x73, 0x12, 0x19, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x48, 0x00, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, - 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x01, 0x52, 0x06, - 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x6c, 0x69, - 0x6d, 0x69, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x22, 0x33, - 0x0a, 0x12, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x43, 0x49, 0x44, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x63, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x43, 0x69, 0x64, 0x22, 0x4b, 0x0a, 0x15, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x06, - 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, - 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x32, 0xde, 0x01, 0x0a, 0x0d, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x61, 0x0a, 0x0a, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x43, 0x49, 0x44, 0x73, - 0x12, 0x27, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x65, - 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x43, 0x49, - 0x44, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x61, 0x67, 0x6e, 0x74, - 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x43, 0x49, 0x44, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x6a, 0x0a, 0x0d, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x2a, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, - 0x64, 0x69, 0x72, 0x2e, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, - 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, - 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, - 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, - 0x01, 0x42, 0xc6, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, - 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x12, - 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x23, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, 0x53, 0xaa, - 0x02, 0x14, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x53, 0x65, 0x61, - 0x72, 0x63, 0x68, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x14, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, - 0x44, 0x69, 0x72, 0x5c, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x20, - 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x53, 0x65, 0x61, 0x72, 0x63, - 0x68, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0xea, 0x02, 0x17, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, - 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -}) - -var ( - file_agntcy_dir_search_v1_search_service_proto_rawDescOnce sync.Once - file_agntcy_dir_search_v1_search_service_proto_rawDescData []byte -) - -func file_agntcy_dir_search_v1_search_service_proto_rawDescGZIP() []byte { - file_agntcy_dir_search_v1_search_service_proto_rawDescOnce.Do(func() { - file_agntcy_dir_search_v1_search_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_search_v1_search_service_proto_rawDesc), len(file_agntcy_dir_search_v1_search_service_proto_rawDesc))) - }) - return file_agntcy_dir_search_v1_search_service_proto_rawDescData -} - -var file_agntcy_dir_search_v1_search_service_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_agntcy_dir_search_v1_search_service_proto_goTypes = []any{ - (*SearchCIDsRequest)(nil), // 0: agntcy.dir.search.v1.SearchCIDsRequest - (*SearchRecordsRequest)(nil), // 1: agntcy.dir.search.v1.SearchRecordsRequest - (*SearchCIDsResponse)(nil), // 2: agntcy.dir.search.v1.SearchCIDsResponse - (*SearchRecordsResponse)(nil), // 3: agntcy.dir.search.v1.SearchRecordsResponse - (*RecordQuery)(nil), // 4: agntcy.dir.search.v1.RecordQuery - (*v1.Record)(nil), // 5: agntcy.dir.core.v1.Record -} -var file_agntcy_dir_search_v1_search_service_proto_depIdxs = []int32{ - 4, // 0: agntcy.dir.search.v1.SearchCIDsRequest.queries:type_name -> agntcy.dir.search.v1.RecordQuery - 4, // 1: agntcy.dir.search.v1.SearchRecordsRequest.queries:type_name -> agntcy.dir.search.v1.RecordQuery - 5, // 2: agntcy.dir.search.v1.SearchRecordsResponse.record:type_name -> agntcy.dir.core.v1.Record - 0, // 3: agntcy.dir.search.v1.SearchService.SearchCIDs:input_type -> agntcy.dir.search.v1.SearchCIDsRequest - 1, // 4: agntcy.dir.search.v1.SearchService.SearchRecords:input_type -> agntcy.dir.search.v1.SearchRecordsRequest - 2, // 5: agntcy.dir.search.v1.SearchService.SearchCIDs:output_type -> agntcy.dir.search.v1.SearchCIDsResponse - 3, // 6: agntcy.dir.search.v1.SearchService.SearchRecords:output_type -> agntcy.dir.search.v1.SearchRecordsResponse - 5, // [5:7] is the sub-list for method output_type - 3, // [3:5] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_agntcy_dir_search_v1_search_service_proto_init() } -func file_agntcy_dir_search_v1_search_service_proto_init() { - if File_agntcy_dir_search_v1_search_service_proto != nil { - return - } - file_agntcy_dir_search_v1_record_query_proto_init() - file_agntcy_dir_search_v1_search_service_proto_msgTypes[0].OneofWrappers = []any{} - file_agntcy_dir_search_v1_search_service_proto_msgTypes[1].OneofWrappers = []any{} - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_search_v1_search_service_proto_rawDesc), len(file_agntcy_dir_search_v1_search_service_proto_rawDesc)), - NumEnums: 0, - NumMessages: 4, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_agntcy_dir_search_v1_search_service_proto_goTypes, - DependencyIndexes: file_agntcy_dir_search_v1_search_service_proto_depIdxs, - MessageInfos: file_agntcy_dir_search_v1_search_service_proto_msgTypes, - }.Build() - File_agntcy_dir_search_v1_search_service_proto = out.File - file_agntcy_dir_search_v1_search_service_proto_goTypes = nil - file_agntcy_dir_search_v1_search_service_proto_depIdxs = nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc (unknown) +// source: agntcy/dir/search/v1/search_service.proto + +package v1 + +import ( + v1 "github.com/agntcy/dir/api/core/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SearchCIDsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // List of queries to match against the records. + Queries []*RecordQuery `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` + // Optional limit on the number of results to return. + Limit *uint32 `protobuf:"varint,2,opt,name=limit,proto3,oneof" json:"limit,omitempty"` + // Optional offset for pagination of results. + Offset *uint32 `protobuf:"varint,3,opt,name=offset,proto3,oneof" json:"offset,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SearchCIDsRequest) Reset() { + *x = SearchCIDsRequest{} + mi := &file_agntcy_dir_search_v1_search_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SearchCIDsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SearchCIDsRequest) ProtoMessage() {} + +func (x *SearchCIDsRequest) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_search_v1_search_service_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SearchCIDsRequest.ProtoReflect.Descriptor instead. +func (*SearchCIDsRequest) Descriptor() ([]byte, []int) { + return file_agntcy_dir_search_v1_search_service_proto_rawDescGZIP(), []int{0} +} + +func (x *SearchCIDsRequest) GetQueries() []*RecordQuery { + if x != nil { + return x.Queries + } + return nil +} + +func (x *SearchCIDsRequest) GetLimit() uint32 { + if x != nil && x.Limit != nil { + return *x.Limit + } + return 0 +} + +func (x *SearchCIDsRequest) GetOffset() uint32 { + if x != nil && x.Offset != nil { + return *x.Offset + } + return 0 +} + +type SearchRecordsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // List of queries to match against the records. + Queries []*RecordQuery `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` + // Optional limit on the number of results to return. + Limit *uint32 `protobuf:"varint,2,opt,name=limit,proto3,oneof" json:"limit,omitempty"` + // Optional offset for pagination of results. + Offset *uint32 `protobuf:"varint,3,opt,name=offset,proto3,oneof" json:"offset,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SearchRecordsRequest) Reset() { + *x = SearchRecordsRequest{} + mi := &file_agntcy_dir_search_v1_search_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SearchRecordsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SearchRecordsRequest) ProtoMessage() {} + +func (x *SearchRecordsRequest) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_search_v1_search_service_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SearchRecordsRequest.ProtoReflect.Descriptor instead. +func (*SearchRecordsRequest) Descriptor() ([]byte, []int) { + return file_agntcy_dir_search_v1_search_service_proto_rawDescGZIP(), []int{1} +} + +func (x *SearchRecordsRequest) GetQueries() []*RecordQuery { + if x != nil { + return x.Queries + } + return nil +} + +func (x *SearchRecordsRequest) GetLimit() uint32 { + if x != nil && x.Limit != nil { + return *x.Limit + } + return 0 +} + +func (x *SearchRecordsRequest) GetOffset() uint32 { + if x != nil && x.Offset != nil { + return *x.Offset + } + return 0 +} + +type SearchCIDsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The CID of the record that matches the search criteria. + RecordCid string `protobuf:"bytes,1,opt,name=record_cid,json=recordCid,proto3" json:"record_cid,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SearchCIDsResponse) Reset() { + *x = SearchCIDsResponse{} + mi := &file_agntcy_dir_search_v1_search_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SearchCIDsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SearchCIDsResponse) ProtoMessage() {} + +func (x *SearchCIDsResponse) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_search_v1_search_service_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SearchCIDsResponse.ProtoReflect.Descriptor instead. +func (*SearchCIDsResponse) Descriptor() ([]byte, []int) { + return file_agntcy_dir_search_v1_search_service_proto_rawDescGZIP(), []int{2} +} + +func (x *SearchCIDsResponse) GetRecordCid() string { + if x != nil { + return x.RecordCid + } + return "" +} + +type SearchRecordsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The full record that matches the search criteria. + Record *v1.Record `protobuf:"bytes,1,opt,name=record,proto3" json:"record,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SearchRecordsResponse) Reset() { + *x = SearchRecordsResponse{} + mi := &file_agntcy_dir_search_v1_search_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SearchRecordsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SearchRecordsResponse) ProtoMessage() {} + +func (x *SearchRecordsResponse) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_search_v1_search_service_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SearchRecordsResponse.ProtoReflect.Descriptor instead. +func (*SearchRecordsResponse) Descriptor() ([]byte, []int) { + return file_agntcy_dir_search_v1_search_service_proto_rawDescGZIP(), []int{3} +} + +func (x *SearchRecordsResponse) GetRecord() *v1.Record { + if x != nil { + return x.Record + } + return nil +} + +var File_agntcy_dir_search_v1_search_service_proto protoreflect.FileDescriptor + +var file_agntcy_dir_search_v1_search_service_proto_rawDesc = string([]byte{ + 0x0a, 0x29, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, 0x65, 0x61, + 0x72, 0x63, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x61, 0x67, 0x6e, + 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, + 0x31, 0x1a, 0x1f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x27, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, + 0x65, 0x61, 0x72, 0x63, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x01, 0x0a, 0x11, + 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x43, 0x49, 0x44, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x3b, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, + 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x19, + 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, + 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x66, 0x66, + 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x01, 0x52, 0x06, 0x6f, 0x66, 0x66, + 0x73, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x22, 0xa0, 0x01, 0x0a, 0x14, + 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, + 0x69, 0x72, 0x2e, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x12, 0x19, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x48, 0x00, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, + 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x01, 0x52, 0x06, + 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x22, 0x33, + 0x0a, 0x12, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x43, 0x49, 0x44, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x63, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x43, 0x69, 0x64, 0x22, 0x4b, 0x0a, 0x15, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x06, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, + 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x32, 0xde, 0x01, 0x0a, 0x0d, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x61, 0x0a, 0x0a, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x43, 0x49, 0x44, 0x73, + 0x12, 0x27, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x65, + 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x43, 0x49, + 0x44, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x61, 0x67, 0x6e, 0x74, + 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x43, 0x49, 0x44, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x6a, 0x0a, 0x0d, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x52, + 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x2a, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, + 0x64, 0x69, 0x72, 0x2e, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, + 0x61, 0x72, 0x63, 0x68, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, + 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, + 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, + 0x01, 0x42, 0xc6, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, + 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x12, + 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x23, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, 0x53, 0xaa, + 0x02, 0x14, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x53, 0x65, 0x61, + 0x72, 0x63, 0x68, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x14, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, + 0x44, 0x69, 0x72, 0x5c, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x20, + 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x53, 0x65, 0x61, 0x72, 0x63, + 0x68, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0xea, 0x02, 0x17, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, + 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +}) + +var ( + file_agntcy_dir_search_v1_search_service_proto_rawDescOnce sync.Once + file_agntcy_dir_search_v1_search_service_proto_rawDescData []byte +) + +func file_agntcy_dir_search_v1_search_service_proto_rawDescGZIP() []byte { + file_agntcy_dir_search_v1_search_service_proto_rawDescOnce.Do(func() { + file_agntcy_dir_search_v1_search_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_search_v1_search_service_proto_rawDesc), len(file_agntcy_dir_search_v1_search_service_proto_rawDesc))) + }) + return file_agntcy_dir_search_v1_search_service_proto_rawDescData +} + +var file_agntcy_dir_search_v1_search_service_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_agntcy_dir_search_v1_search_service_proto_goTypes = []any{ + (*SearchCIDsRequest)(nil), // 0: agntcy.dir.search.v1.SearchCIDsRequest + (*SearchRecordsRequest)(nil), // 1: agntcy.dir.search.v1.SearchRecordsRequest + (*SearchCIDsResponse)(nil), // 2: agntcy.dir.search.v1.SearchCIDsResponse + (*SearchRecordsResponse)(nil), // 3: agntcy.dir.search.v1.SearchRecordsResponse + (*RecordQuery)(nil), // 4: agntcy.dir.search.v1.RecordQuery + (*v1.Record)(nil), // 5: agntcy.dir.core.v1.Record +} +var file_agntcy_dir_search_v1_search_service_proto_depIdxs = []int32{ + 4, // 0: agntcy.dir.search.v1.SearchCIDsRequest.queries:type_name -> agntcy.dir.search.v1.RecordQuery + 4, // 1: agntcy.dir.search.v1.SearchRecordsRequest.queries:type_name -> agntcy.dir.search.v1.RecordQuery + 5, // 2: agntcy.dir.search.v1.SearchRecordsResponse.record:type_name -> agntcy.dir.core.v1.Record + 0, // 3: agntcy.dir.search.v1.SearchService.SearchCIDs:input_type -> agntcy.dir.search.v1.SearchCIDsRequest + 1, // 4: agntcy.dir.search.v1.SearchService.SearchRecords:input_type -> agntcy.dir.search.v1.SearchRecordsRequest + 2, // 5: agntcy.dir.search.v1.SearchService.SearchCIDs:output_type -> agntcy.dir.search.v1.SearchCIDsResponse + 3, // 6: agntcy.dir.search.v1.SearchService.SearchRecords:output_type -> agntcy.dir.search.v1.SearchRecordsResponse + 5, // [5:7] is the sub-list for method output_type + 3, // [3:5] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_agntcy_dir_search_v1_search_service_proto_init() } +func file_agntcy_dir_search_v1_search_service_proto_init() { + if File_agntcy_dir_search_v1_search_service_proto != nil { + return + } + file_agntcy_dir_search_v1_record_query_proto_init() + file_agntcy_dir_search_v1_search_service_proto_msgTypes[0].OneofWrappers = []any{} + file_agntcy_dir_search_v1_search_service_proto_msgTypes[1].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_search_v1_search_service_proto_rawDesc), len(file_agntcy_dir_search_v1_search_service_proto_rawDesc)), + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_agntcy_dir_search_v1_search_service_proto_goTypes, + DependencyIndexes: file_agntcy_dir_search_v1_search_service_proto_depIdxs, + MessageInfos: file_agntcy_dir_search_v1_search_service_proto_msgTypes, + }.Build() + File_agntcy_dir_search_v1_search_service_proto = out.File + file_agntcy_dir_search_v1_search_service_proto_goTypes = nil + file_agntcy_dir_search_v1_search_service_proto_depIdxs = nil +} diff --git a/api/search/v1/search_service_grpc.pb.go b/api/search/v1/search_service_grpc.pb.go index 559f84659..23bc4ca0e 100644 --- a/api/search/v1/search_service_grpc.pb.go +++ b/api/search/v1/search_service_grpc.pb.go @@ -1,226 +1,226 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc (unknown) -// source: agntcy/dir/search/v1/search_service.proto - -package v1 - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 - -const ( - SearchService_SearchCIDs_FullMethodName = "/agntcy.dir.search.v1.SearchService/SearchCIDs" - SearchService_SearchRecords_FullMethodName = "/agntcy.dir.search.v1.SearchService/SearchRecords" -) - -// SearchServiceClient is the client API for SearchService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type SearchServiceClient interface { - // Search for record CIDs that match the given parameters. - // Returns only CIDs for efficient lookups and piping to other commands. - // This operation does not interact with the network. - SearchCIDs(ctx context.Context, in *SearchCIDsRequest, opts ...grpc.CallOption) (SearchService_SearchCIDsClient, error) - // Search for full records that match the given parameters. - // Returns complete record data including all metadata, skills, domains, etc. - // This operation does not interact with the network. - SearchRecords(ctx context.Context, in *SearchRecordsRequest, opts ...grpc.CallOption) (SearchService_SearchRecordsClient, error) -} - -type searchServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewSearchServiceClient(cc grpc.ClientConnInterface) SearchServiceClient { - return &searchServiceClient{cc} -} - -func (c *searchServiceClient) SearchCIDs(ctx context.Context, in *SearchCIDsRequest, opts ...grpc.CallOption) (SearchService_SearchCIDsClient, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &SearchService_ServiceDesc.Streams[0], SearchService_SearchCIDs_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &searchServiceSearchCIDsClient{ClientStream: stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type SearchService_SearchCIDsClient interface { - Recv() (*SearchCIDsResponse, error) - grpc.ClientStream -} - -type searchServiceSearchCIDsClient struct { - grpc.ClientStream -} - -func (x *searchServiceSearchCIDsClient) Recv() (*SearchCIDsResponse, error) { - m := new(SearchCIDsResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *searchServiceClient) SearchRecords(ctx context.Context, in *SearchRecordsRequest, opts ...grpc.CallOption) (SearchService_SearchRecordsClient, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &SearchService_ServiceDesc.Streams[1], SearchService_SearchRecords_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &searchServiceSearchRecordsClient{ClientStream: stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type SearchService_SearchRecordsClient interface { - Recv() (*SearchRecordsResponse, error) - grpc.ClientStream -} - -type searchServiceSearchRecordsClient struct { - grpc.ClientStream -} - -func (x *searchServiceSearchRecordsClient) Recv() (*SearchRecordsResponse, error) { - m := new(SearchRecordsResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// SearchServiceServer is the server API for SearchService service. -// All implementations should embed UnimplementedSearchServiceServer -// for forward compatibility. -type SearchServiceServer interface { - // Search for record CIDs that match the given parameters. - // Returns only CIDs for efficient lookups and piping to other commands. - // This operation does not interact with the network. - SearchCIDs(*SearchCIDsRequest, SearchService_SearchCIDsServer) error - // Search for full records that match the given parameters. - // Returns complete record data including all metadata, skills, domains, etc. - // This operation does not interact with the network. - SearchRecords(*SearchRecordsRequest, SearchService_SearchRecordsServer) error -} - -// UnimplementedSearchServiceServer should be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedSearchServiceServer struct{} - -func (UnimplementedSearchServiceServer) SearchCIDs(*SearchCIDsRequest, SearchService_SearchCIDsServer) error { - return status.Errorf(codes.Unimplemented, "method SearchCIDs not implemented") -} -func (UnimplementedSearchServiceServer) SearchRecords(*SearchRecordsRequest, SearchService_SearchRecordsServer) error { - return status.Errorf(codes.Unimplemented, "method SearchRecords not implemented") -} -func (UnimplementedSearchServiceServer) testEmbeddedByValue() {} - -// UnsafeSearchServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to SearchServiceServer will -// result in compilation errors. -type UnsafeSearchServiceServer interface { - mustEmbedUnimplementedSearchServiceServer() -} - -func RegisterSearchServiceServer(s grpc.ServiceRegistrar, srv SearchServiceServer) { - // If the following call pancis, it indicates UnimplementedSearchServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } - s.RegisterService(&SearchService_ServiceDesc, srv) -} - -func _SearchService_SearchCIDs_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SearchCIDsRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(SearchServiceServer).SearchCIDs(m, &searchServiceSearchCIDsServer{ServerStream: stream}) -} - -type SearchService_SearchCIDsServer interface { - Send(*SearchCIDsResponse) error - grpc.ServerStream -} - -type searchServiceSearchCIDsServer struct { - grpc.ServerStream -} - -func (x *searchServiceSearchCIDsServer) Send(m *SearchCIDsResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _SearchService_SearchRecords_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SearchRecordsRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(SearchServiceServer).SearchRecords(m, &searchServiceSearchRecordsServer{ServerStream: stream}) -} - -type SearchService_SearchRecordsServer interface { - Send(*SearchRecordsResponse) error - grpc.ServerStream -} - -type searchServiceSearchRecordsServer struct { - grpc.ServerStream -} - -func (x *searchServiceSearchRecordsServer) Send(m *SearchRecordsResponse) error { - return x.ServerStream.SendMsg(m) -} - -// SearchService_ServiceDesc is the grpc.ServiceDesc for SearchService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var SearchService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "agntcy.dir.search.v1.SearchService", - HandlerType: (*SearchServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "SearchCIDs", - Handler: _SearchService_SearchCIDs_Handler, - ServerStreams: true, - }, - { - StreamName: "SearchRecords", - Handler: _SearchService_SearchRecords_Handler, - ServerStreams: true, - }, - }, - Metadata: "agntcy/dir/search/v1/search_service.proto", -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc (unknown) +// source: agntcy/dir/search/v1/search_service.proto + +package v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 + +const ( + SearchService_SearchCIDs_FullMethodName = "/agntcy.dir.search.v1.SearchService/SearchCIDs" + SearchService_SearchRecords_FullMethodName = "/agntcy.dir.search.v1.SearchService/SearchRecords" +) + +// SearchServiceClient is the client API for SearchService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type SearchServiceClient interface { + // Search for record CIDs that match the given parameters. + // Returns only CIDs for efficient lookups and piping to other commands. + // This operation does not interact with the network. + SearchCIDs(ctx context.Context, in *SearchCIDsRequest, opts ...grpc.CallOption) (SearchService_SearchCIDsClient, error) + // Search for full records that match the given parameters. + // Returns complete record data including all metadata, skills, domains, etc. + // This operation does not interact with the network. + SearchRecords(ctx context.Context, in *SearchRecordsRequest, opts ...grpc.CallOption) (SearchService_SearchRecordsClient, error) +} + +type searchServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewSearchServiceClient(cc grpc.ClientConnInterface) SearchServiceClient { + return &searchServiceClient{cc} +} + +func (c *searchServiceClient) SearchCIDs(ctx context.Context, in *SearchCIDsRequest, opts ...grpc.CallOption) (SearchService_SearchCIDsClient, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &SearchService_ServiceDesc.Streams[0], SearchService_SearchCIDs_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &searchServiceSearchCIDsClient{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SearchService_SearchCIDsClient interface { + Recv() (*SearchCIDsResponse, error) + grpc.ClientStream +} + +type searchServiceSearchCIDsClient struct { + grpc.ClientStream +} + +func (x *searchServiceSearchCIDsClient) Recv() (*SearchCIDsResponse, error) { + m := new(SearchCIDsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *searchServiceClient) SearchRecords(ctx context.Context, in *SearchRecordsRequest, opts ...grpc.CallOption) (SearchService_SearchRecordsClient, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &SearchService_ServiceDesc.Streams[1], SearchService_SearchRecords_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &searchServiceSearchRecordsClient{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SearchService_SearchRecordsClient interface { + Recv() (*SearchRecordsResponse, error) + grpc.ClientStream +} + +type searchServiceSearchRecordsClient struct { + grpc.ClientStream +} + +func (x *searchServiceSearchRecordsClient) Recv() (*SearchRecordsResponse, error) { + m := new(SearchRecordsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// SearchServiceServer is the server API for SearchService service. +// All implementations should embed UnimplementedSearchServiceServer +// for forward compatibility. +type SearchServiceServer interface { + // Search for record CIDs that match the given parameters. + // Returns only CIDs for efficient lookups and piping to other commands. + // This operation does not interact with the network. + SearchCIDs(*SearchCIDsRequest, SearchService_SearchCIDsServer) error + // Search for full records that match the given parameters. + // Returns complete record data including all metadata, skills, domains, etc. + // This operation does not interact with the network. + SearchRecords(*SearchRecordsRequest, SearchService_SearchRecordsServer) error +} + +// UnimplementedSearchServiceServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedSearchServiceServer struct{} + +func (UnimplementedSearchServiceServer) SearchCIDs(*SearchCIDsRequest, SearchService_SearchCIDsServer) error { + return status.Errorf(codes.Unimplemented, "method SearchCIDs not implemented") +} +func (UnimplementedSearchServiceServer) SearchRecords(*SearchRecordsRequest, SearchService_SearchRecordsServer) error { + return status.Errorf(codes.Unimplemented, "method SearchRecords not implemented") +} +func (UnimplementedSearchServiceServer) testEmbeddedByValue() {} + +// UnsafeSearchServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SearchServiceServer will +// result in compilation errors. +type UnsafeSearchServiceServer interface { + mustEmbedUnimplementedSearchServiceServer() +} + +func RegisterSearchServiceServer(s grpc.ServiceRegistrar, srv SearchServiceServer) { + // If the following call pancis, it indicates UnimplementedSearchServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&SearchService_ServiceDesc, srv) +} + +func _SearchService_SearchCIDs_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SearchCIDsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SearchServiceServer).SearchCIDs(m, &searchServiceSearchCIDsServer{ServerStream: stream}) +} + +type SearchService_SearchCIDsServer interface { + Send(*SearchCIDsResponse) error + grpc.ServerStream +} + +type searchServiceSearchCIDsServer struct { + grpc.ServerStream +} + +func (x *searchServiceSearchCIDsServer) Send(m *SearchCIDsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _SearchService_SearchRecords_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SearchRecordsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SearchServiceServer).SearchRecords(m, &searchServiceSearchRecordsServer{ServerStream: stream}) +} + +type SearchService_SearchRecordsServer interface { + Send(*SearchRecordsResponse) error + grpc.ServerStream +} + +type searchServiceSearchRecordsServer struct { + grpc.ServerStream +} + +func (x *searchServiceSearchRecordsServer) Send(m *SearchRecordsResponse) error { + return x.ServerStream.SendMsg(m) +} + +// SearchService_ServiceDesc is the grpc.ServiceDesc for SearchService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var SearchService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "agntcy.dir.search.v1.SearchService", + HandlerType: (*SearchServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "SearchCIDs", + Handler: _SearchService_SearchCIDs_Handler, + ServerStreams: true, + }, + { + StreamName: "SearchRecords", + Handler: _SearchService_SearchRecords_Handler, + ServerStreams: true, + }, + }, + Metadata: "agntcy/dir/search/v1/search_service.proto", +} diff --git a/api/sign/v1/public_key.go b/api/sign/v1/public_key.go index e315be6d3..0339b6c95 100644 --- a/api/sign/v1/public_key.go +++ b/api/sign/v1/public_key.go @@ -1,53 +1,53 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package v1 - -import ( - "errors" - "fmt" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/oasf-sdk/pkg/decoder" -) - -// ReferrerType returns the referrer type for PublicKey. -func (p *PublicKey) ReferrerType() string { - return string((&PublicKey{}).ProtoReflect().Descriptor().FullName()) -} - -// MarshalReferrer exports the PublicKey into a RecordReferrer. -func (p *PublicKey) MarshalReferrer() (*corev1.RecordReferrer, error) { - if p == nil { - return nil, errors.New("public key is nil") - } - - // Use decoder to convert proto message to structpb - data, err := decoder.StructToProto(p) - if err != nil { - return nil, fmt.Errorf("failed to convert public key to struct: %w", err) - } - - return &corev1.RecordReferrer{ - Type: p.ReferrerType(), - Data: data, - }, nil -} - -// UnmarshalReferrer loads the PublicKey from a RecordReferrer. -func (p *PublicKey) UnmarshalReferrer(ref *corev1.RecordReferrer) error { - if ref == nil || ref.GetData() == nil { - return errors.New("referrer or data is nil") - } - - // Use decoder to convert structpb to proto message - decoded, err := decoder.ProtoToStruct[PublicKey](ref.GetData()) - if err != nil { - return fmt.Errorf("failed to decode public key from referrer: %w", err) - } - - // Copy fields individually to avoid copying the lock - p.Key = decoded.GetKey() - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package v1 + +import ( + "errors" + "fmt" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/oasf-sdk/pkg/decoder" +) + +// ReferrerType returns the referrer type for PublicKey. +func (p *PublicKey) ReferrerType() string { + return string((&PublicKey{}).ProtoReflect().Descriptor().FullName()) +} + +// MarshalReferrer exports the PublicKey into a RecordReferrer. +func (p *PublicKey) MarshalReferrer() (*corev1.RecordReferrer, error) { + if p == nil { + return nil, errors.New("public key is nil") + } + + // Use decoder to convert proto message to structpb + data, err := decoder.StructToProto(p) + if err != nil { + return nil, fmt.Errorf("failed to convert public key to struct: %w", err) + } + + return &corev1.RecordReferrer{ + Type: p.ReferrerType(), + Data: data, + }, nil +} + +// UnmarshalReferrer loads the PublicKey from a RecordReferrer. +func (p *PublicKey) UnmarshalReferrer(ref *corev1.RecordReferrer) error { + if ref == nil || ref.GetData() == nil { + return errors.New("referrer or data is nil") + } + + // Use decoder to convert structpb to proto message + decoded, err := decoder.ProtoToStruct[PublicKey](ref.GetData()) + if err != nil { + return fmt.Errorf("failed to decode public key from referrer: %w", err) + } + + // Copy fields individually to avoid copying the lock + p.Key = decoded.GetKey() + + return nil +} diff --git a/api/sign/v1/public_key.pb.go b/api/sign/v1/public_key.pb.go index ef38bae0a..ecc51886d 100644 --- a/api/sign/v1/public_key.pb.go +++ b/api/sign/v1/public_key.pb.go @@ -1,143 +1,143 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.5 -// protoc (unknown) -// source: agntcy/dir/sign/v1/public_key.proto - -package v1 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// PublicKey is the public key data associated with a Record. -// Multiple public keys can be associated with a single Record. -type PublicKey struct { - state protoimpl.MessageState `protogen:"open.v1"` - // PEM-encoded public key string. - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PublicKey) Reset() { - *x = PublicKey{} - mi := &file_agntcy_dir_sign_v1_public_key_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PublicKey) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PublicKey) ProtoMessage() {} - -func (x *PublicKey) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_sign_v1_public_key_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PublicKey.ProtoReflect.Descriptor instead. -func (*PublicKey) Descriptor() ([]byte, []int) { - return file_agntcy_dir_sign_v1_public_key_proto_rawDescGZIP(), []int{0} -} - -func (x *PublicKey) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -var File_agntcy_dir_sign_v1_public_key_proto protoreflect.FileDescriptor - -var file_agntcy_dir_sign_v1_public_key_proto_rawDesc = string([]byte{ - 0x0a, 0x23, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, 0x69, 0x67, - 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, - 0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0x1d, 0x0a, 0x09, 0x50, 0x75, 0x62, - 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0xb6, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, - 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, - 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x21, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, - 0x2f, 0x73, 0x69, 0x67, 0x6e, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, 0x53, 0xaa, 0x02, - 0x12, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x53, 0x69, 0x67, 0x6e, - 0x2e, 0x56, 0x31, 0xca, 0x02, 0x12, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, - 0x5c, 0x53, 0x69, 0x67, 0x6e, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x1e, 0x41, 0x67, 0x6e, 0x74, 0x63, - 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x53, 0x69, 0x67, 0x6e, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, - 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x15, 0x41, 0x67, 0x6e, 0x74, - 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, 0x53, 0x69, 0x67, 0x6e, 0x3a, 0x3a, 0x56, - 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_agntcy_dir_sign_v1_public_key_proto_rawDescOnce sync.Once - file_agntcy_dir_sign_v1_public_key_proto_rawDescData []byte -) - -func file_agntcy_dir_sign_v1_public_key_proto_rawDescGZIP() []byte { - file_agntcy_dir_sign_v1_public_key_proto_rawDescOnce.Do(func() { - file_agntcy_dir_sign_v1_public_key_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_sign_v1_public_key_proto_rawDesc), len(file_agntcy_dir_sign_v1_public_key_proto_rawDesc))) - }) - return file_agntcy_dir_sign_v1_public_key_proto_rawDescData -} - -var file_agntcy_dir_sign_v1_public_key_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_agntcy_dir_sign_v1_public_key_proto_goTypes = []any{ - (*PublicKey)(nil), // 0: agntcy.dir.sign.v1.PublicKey -} -var file_agntcy_dir_sign_v1_public_key_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_agntcy_dir_sign_v1_public_key_proto_init() } -func file_agntcy_dir_sign_v1_public_key_proto_init() { - if File_agntcy_dir_sign_v1_public_key_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_sign_v1_public_key_proto_rawDesc), len(file_agntcy_dir_sign_v1_public_key_proto_rawDesc)), - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_agntcy_dir_sign_v1_public_key_proto_goTypes, - DependencyIndexes: file_agntcy_dir_sign_v1_public_key_proto_depIdxs, - MessageInfos: file_agntcy_dir_sign_v1_public_key_proto_msgTypes, - }.Build() - File_agntcy_dir_sign_v1_public_key_proto = out.File - file_agntcy_dir_sign_v1_public_key_proto_goTypes = nil - file_agntcy_dir_sign_v1_public_key_proto_depIdxs = nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc (unknown) +// source: agntcy/dir/sign/v1/public_key.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// PublicKey is the public key data associated with a Record. +// Multiple public keys can be associated with a single Record. +type PublicKey struct { + state protoimpl.MessageState `protogen:"open.v1"` + // PEM-encoded public key string. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PublicKey) Reset() { + *x = PublicKey{} + mi := &file_agntcy_dir_sign_v1_public_key_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PublicKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublicKey) ProtoMessage() {} + +func (x *PublicKey) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_sign_v1_public_key_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublicKey.ProtoReflect.Descriptor instead. +func (*PublicKey) Descriptor() ([]byte, []int) { + return file_agntcy_dir_sign_v1_public_key_proto_rawDescGZIP(), []int{0} +} + +func (x *PublicKey) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +var File_agntcy_dir_sign_v1_public_key_proto protoreflect.FileDescriptor + +var file_agntcy_dir_sign_v1_public_key_proto_rawDesc = string([]byte{ + 0x0a, 0x23, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, 0x69, 0x67, + 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, + 0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0x1d, 0x0a, 0x09, 0x50, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0xb6, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, + 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, + 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x21, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x73, 0x69, 0x67, 0x6e, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, 0x53, 0xaa, 0x02, + 0x12, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x53, 0x69, 0x67, 0x6e, + 0x2e, 0x56, 0x31, 0xca, 0x02, 0x12, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, + 0x5c, 0x53, 0x69, 0x67, 0x6e, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x1e, 0x41, 0x67, 0x6e, 0x74, 0x63, + 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x53, 0x69, 0x67, 0x6e, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, + 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x15, 0x41, 0x67, 0x6e, 0x74, + 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, 0x53, 0x69, 0x67, 0x6e, 0x3a, 0x3a, 0x56, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_agntcy_dir_sign_v1_public_key_proto_rawDescOnce sync.Once + file_agntcy_dir_sign_v1_public_key_proto_rawDescData []byte +) + +func file_agntcy_dir_sign_v1_public_key_proto_rawDescGZIP() []byte { + file_agntcy_dir_sign_v1_public_key_proto_rawDescOnce.Do(func() { + file_agntcy_dir_sign_v1_public_key_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_sign_v1_public_key_proto_rawDesc), len(file_agntcy_dir_sign_v1_public_key_proto_rawDesc))) + }) + return file_agntcy_dir_sign_v1_public_key_proto_rawDescData +} + +var file_agntcy_dir_sign_v1_public_key_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_agntcy_dir_sign_v1_public_key_proto_goTypes = []any{ + (*PublicKey)(nil), // 0: agntcy.dir.sign.v1.PublicKey +} +var file_agntcy_dir_sign_v1_public_key_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_agntcy_dir_sign_v1_public_key_proto_init() } +func file_agntcy_dir_sign_v1_public_key_proto_init() { + if File_agntcy_dir_sign_v1_public_key_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_sign_v1_public_key_proto_rawDesc), len(file_agntcy_dir_sign_v1_public_key_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_agntcy_dir_sign_v1_public_key_proto_goTypes, + DependencyIndexes: file_agntcy_dir_sign_v1_public_key_proto_depIdxs, + MessageInfos: file_agntcy_dir_sign_v1_public_key_proto_msgTypes, + }.Build() + File_agntcy_dir_sign_v1_public_key_proto = out.File + file_agntcy_dir_sign_v1_public_key_proto_goTypes = nil + file_agntcy_dir_sign_v1_public_key_proto_depIdxs = nil +} diff --git a/api/sign/v1/sign_service.pb.go b/api/sign/v1/sign_service.pb.go index 0ebc29543..e4cf05595 100644 --- a/api/sign/v1/sign_service.pb.go +++ b/api/sign/v1/sign_service.pb.go @@ -1,664 +1,664 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.5 -// protoc (unknown) -// source: agntcy/dir/sign/v1/sign_service.proto - -package v1 - -import ( - v1 "github.com/agntcy/dir/api/core/v1" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type SignRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Record reference to be signed - RecordRef *v1.RecordRef `protobuf:"bytes,1,opt,name=record_ref,json=recordRef,proto3" json:"record_ref,omitempty"` - // Signing provider to use - Provider *SignRequestProvider `protobuf:"bytes,2,opt,name=provider,proto3" json:"provider,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SignRequest) Reset() { - *x = SignRequest{} - mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SignRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SignRequest) ProtoMessage() {} - -func (x *SignRequest) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SignRequest.ProtoReflect.Descriptor instead. -func (*SignRequest) Descriptor() ([]byte, []int) { - return file_agntcy_dir_sign_v1_sign_service_proto_rawDescGZIP(), []int{0} -} - -func (x *SignRequest) GetRecordRef() *v1.RecordRef { - if x != nil { - return x.RecordRef - } - return nil -} - -func (x *SignRequest) GetProvider() *SignRequestProvider { - if x != nil { - return x.Provider - } - return nil -} - -type SignRequestProvider struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Request: - // - // *SignRequestProvider_Oidc - // *SignRequestProvider_Key - Request isSignRequestProvider_Request `protobuf_oneof:"request"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SignRequestProvider) Reset() { - *x = SignRequestProvider{} - mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SignRequestProvider) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SignRequestProvider) ProtoMessage() {} - -func (x *SignRequestProvider) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SignRequestProvider.ProtoReflect.Descriptor instead. -func (*SignRequestProvider) Descriptor() ([]byte, []int) { - return file_agntcy_dir_sign_v1_sign_service_proto_rawDescGZIP(), []int{1} -} - -func (x *SignRequestProvider) GetRequest() isSignRequestProvider_Request { - if x != nil { - return x.Request - } - return nil -} - -func (x *SignRequestProvider) GetOidc() *SignWithOIDC { - if x != nil { - if x, ok := x.Request.(*SignRequestProvider_Oidc); ok { - return x.Oidc - } - } - return nil -} - -func (x *SignRequestProvider) GetKey() *SignWithKey { - if x != nil { - if x, ok := x.Request.(*SignRequestProvider_Key); ok { - return x.Key - } - } - return nil -} - -type isSignRequestProvider_Request interface { - isSignRequestProvider_Request() -} - -type SignRequestProvider_Oidc struct { - // Sign with OIDC provider - Oidc *SignWithOIDC `protobuf:"bytes,1,opt,name=oidc,proto3,oneof"` -} - -type SignRequestProvider_Key struct { - // Sign with PEM-encoded public key - Key *SignWithKey `protobuf:"bytes,2,opt,name=key,proto3,oneof"` -} - -func (*SignRequestProvider_Oidc) isSignRequestProvider_Request() {} - -func (*SignRequestProvider_Key) isSignRequestProvider_Request() {} - -type SignWithOIDC struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Token for OIDC provider - IdToken string `protobuf:"bytes,1,opt,name=id_token,json=idToken,proto3" json:"id_token,omitempty"` - // Signing options for OIDC - Options *SignWithOIDC_SignOpts `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SignWithOIDC) Reset() { - *x = SignWithOIDC{} - mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SignWithOIDC) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SignWithOIDC) ProtoMessage() {} - -func (x *SignWithOIDC) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SignWithOIDC.ProtoReflect.Descriptor instead. -func (*SignWithOIDC) Descriptor() ([]byte, []int) { - return file_agntcy_dir_sign_v1_sign_service_proto_rawDescGZIP(), []int{2} -} - -func (x *SignWithOIDC) GetIdToken() string { - if x != nil { - return x.IdToken - } - return "" -} - -func (x *SignWithOIDC) GetOptions() *SignWithOIDC_SignOpts { - if x != nil { - return x.Options - } - return nil -} - -type SignWithKey struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Private key used for signing - PrivateKey []byte `protobuf:"bytes,1,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"` - // Password to unlock the private key - Password []byte `protobuf:"bytes,2,opt,name=password,proto3,oneof" json:"password,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SignWithKey) Reset() { - *x = SignWithKey{} - mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SignWithKey) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SignWithKey) ProtoMessage() {} - -func (x *SignWithKey) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SignWithKey.ProtoReflect.Descriptor instead. -func (*SignWithKey) Descriptor() ([]byte, []int) { - return file_agntcy_dir_sign_v1_sign_service_proto_rawDescGZIP(), []int{3} -} - -func (x *SignWithKey) GetPrivateKey() []byte { - if x != nil { - return x.PrivateKey - } - return nil -} - -func (x *SignWithKey) GetPassword() []byte { - if x != nil { - return x.Password - } - return nil -} - -type SignResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Cryptographic signature of the record - Signature *Signature `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SignResponse) Reset() { - *x = SignResponse{} - mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SignResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SignResponse) ProtoMessage() {} - -func (x *SignResponse) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SignResponse.ProtoReflect.Descriptor instead. -func (*SignResponse) Descriptor() ([]byte, []int) { - return file_agntcy_dir_sign_v1_sign_service_proto_rawDescGZIP(), []int{4} -} - -func (x *SignResponse) GetSignature() *Signature { - if x != nil { - return x.Signature - } - return nil -} - -type VerifyRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Record reference to be verified - RecordRef *v1.RecordRef `protobuf:"bytes,1,opt,name=record_ref,json=recordRef,proto3" json:"record_ref,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *VerifyRequest) Reset() { - *x = VerifyRequest{} - mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *VerifyRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VerifyRequest) ProtoMessage() {} - -func (x *VerifyRequest) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VerifyRequest.ProtoReflect.Descriptor instead. -func (*VerifyRequest) Descriptor() ([]byte, []int) { - return file_agntcy_dir_sign_v1_sign_service_proto_rawDescGZIP(), []int{5} -} - -func (x *VerifyRequest) GetRecordRef() *v1.RecordRef { - if x != nil { - return x.RecordRef - } - return nil -} - -type VerifyResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // The verify process result - Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` - // Optional error message if verification failed - ErrorMessage *string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3,oneof" json:"error_message,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *VerifyResponse) Reset() { - *x = VerifyResponse{} - mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *VerifyResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VerifyResponse) ProtoMessage() {} - -func (x *VerifyResponse) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[6] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VerifyResponse.ProtoReflect.Descriptor instead. -func (*VerifyResponse) Descriptor() ([]byte, []int) { - return file_agntcy_dir_sign_v1_sign_service_proto_rawDescGZIP(), []int{6} -} - -func (x *VerifyResponse) GetSuccess() bool { - if x != nil { - return x.Success - } - return false -} - -func (x *VerifyResponse) GetErrorMessage() string { - if x != nil && x.ErrorMessage != nil { - return *x.ErrorMessage - } - return "" -} - -// List of sign options for OIDC -type SignWithOIDC_SignOpts struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Fulcio authority access URL (default value: https://fulcio.sigstage.dev) - FulcioUrl *string `protobuf:"bytes,1,opt,name=fulcio_url,json=fulcioUrl,proto3,oneof" json:"fulcio_url,omitempty"` - // Rekor validator access URL (default value: https://rekor.sigstage.dev) - RekorUrl *string `protobuf:"bytes,2,opt,name=rekor_url,json=rekorUrl,proto3,oneof" json:"rekor_url,omitempty"` - // Timestamp authority access URL (default value: https://timestamp.sigstage.dev/api/v1/timestamp) - TimestampUrl *string `protobuf:"bytes,3,opt,name=timestamp_url,json=timestampUrl,proto3,oneof" json:"timestamp_url,omitempty"` - // OIDC provider access URL (default value: https://oauth2.sigstage.dev/auth) - OidcProviderUrl *string `protobuf:"bytes,4,opt,name=oidc_provider_url,json=oidcProviderUrl,proto3,oneof" json:"oidc_provider_url,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SignWithOIDC_SignOpts) Reset() { - *x = SignWithOIDC_SignOpts{} - mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SignWithOIDC_SignOpts) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SignWithOIDC_SignOpts) ProtoMessage() {} - -func (x *SignWithOIDC_SignOpts) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SignWithOIDC_SignOpts.ProtoReflect.Descriptor instead. -func (*SignWithOIDC_SignOpts) Descriptor() ([]byte, []int) { - return file_agntcy_dir_sign_v1_sign_service_proto_rawDescGZIP(), []int{2, 0} -} - -func (x *SignWithOIDC_SignOpts) GetFulcioUrl() string { - if x != nil && x.FulcioUrl != nil { - return *x.FulcioUrl - } - return "" -} - -func (x *SignWithOIDC_SignOpts) GetRekorUrl() string { - if x != nil && x.RekorUrl != nil { - return *x.RekorUrl - } - return "" -} - -func (x *SignWithOIDC_SignOpts) GetTimestampUrl() string { - if x != nil && x.TimestampUrl != nil { - return *x.TimestampUrl - } - return "" -} - -func (x *SignWithOIDC_SignOpts) GetOidcProviderUrl() string { - if x != nil && x.OidcProviderUrl != nil { - return *x.OidcProviderUrl - } - return "" -} - -var File_agntcy_dir_sign_v1_sign_service_proto protoreflect.FileDescriptor - -var file_agntcy_dir_sign_v1_sign_service_proto_rawDesc = string([]byte{ - 0x0a, 0x25, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, 0x69, 0x67, - 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x69, 0x67, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, - 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x61, 0x67, 0x6e, - 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, - 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x61, 0x67, - 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, 0x69, 0x67, 0x6e, 0x2f, 0x76, 0x31, - 0x2f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0x90, 0x01, 0x0a, 0x0b, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x3c, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, - 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x52, 0x65, 0x66, 0x52, 0x09, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x12, 0x43, - 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x27, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, - 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x22, 0x8d, 0x01, 0x0a, 0x13, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x04, 0x6f, - 0x69, 0x64, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x67, 0x6e, 0x74, - 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, - 0x69, 0x67, 0x6e, 0x57, 0x69, 0x74, 0x68, 0x4f, 0x49, 0x44, 0x43, 0x48, 0x00, 0x52, 0x04, 0x6f, - 0x69, 0x64, 0x63, 0x12, 0x33, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, - 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x57, 0x69, 0x74, 0x68, 0x4b, 0x65, - 0x79, 0x48, 0x00, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x09, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x22, 0xe1, 0x02, 0x0a, 0x0c, 0x53, 0x69, 0x67, 0x6e, 0x57, 0x69, 0x74, 0x68, - 0x4f, 0x49, 0x44, 0x43, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x64, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x69, 0x64, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, - 0x43, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x29, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, - 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x57, 0x69, 0x74, 0x68, 0x4f, 0x49, - 0x44, 0x43, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x4f, 0x70, 0x74, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xf0, 0x01, 0x0a, 0x08, 0x53, 0x69, 0x67, 0x6e, 0x4f, 0x70, 0x74, - 0x73, 0x12, 0x22, 0x0a, 0x0a, 0x66, 0x75, 0x6c, 0x63, 0x69, 0x6f, 0x5f, 0x75, 0x72, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x66, 0x75, 0x6c, 0x63, 0x69, 0x6f, 0x55, - 0x72, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x20, 0x0a, 0x09, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x5f, 0x75, - 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x08, 0x72, 0x65, 0x6b, 0x6f, - 0x72, 0x55, 0x72, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x28, 0x0a, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, - 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x55, 0x72, 0x6c, 0x88, 0x01, - 0x01, 0x12, 0x2f, 0x0a, 0x11, 0x6f, 0x69, 0x64, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, - 0x65, 0x72, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x0f, - 0x6f, 0x69, 0x64, 0x63, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x55, 0x72, 0x6c, 0x88, - 0x01, 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x66, 0x75, 0x6c, 0x63, 0x69, 0x6f, 0x5f, 0x75, 0x72, - 0x6c, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x5f, 0x75, 0x72, 0x6c, 0x42, - 0x10, 0x0a, 0x0e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x75, 0x72, - 0x6c, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x6f, 0x69, 0x64, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x5f, 0x75, 0x72, 0x6c, 0x22, 0x5c, 0x0a, 0x0b, 0x53, 0x69, 0x67, 0x6e, 0x57, - 0x69, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, - 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x70, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x1f, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, - 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x70, 0x61, 0x73, - 0x73, 0x77, 0x6f, 0x72, 0x64, 0x88, 0x01, 0x01, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x70, 0x61, 0x73, - 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x4b, 0x0a, 0x0c, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, - 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x22, 0x4d, 0x0a, 0x0d, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x72, 0x65, - 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, - 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x52, 0x09, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, - 0x66, 0x22, 0x66, 0x0a, 0x0e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, - 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0xa9, 0x01, 0x0a, 0x0b, 0x53, 0x69, - 0x67, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x04, 0x53, 0x69, 0x67, - 0x6e, 0x12, 0x1f, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, - 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, - 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x06, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x21, - 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, - 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x22, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, - 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0xb8, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, - 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, - 0x42, 0x10, 0x53, 0x69, 0x67, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x21, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x73, 0x69, 0x67, 0x6e, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, 0x53, 0xaa, 0x02, 0x12, - 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x2e, - 0x56, 0x31, 0xca, 0x02, 0x12, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, - 0x53, 0x69, 0x67, 0x6e, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x1e, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, - 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x53, 0x69, 0x67, 0x6e, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x15, 0x41, 0x67, 0x6e, 0x74, 0x63, - 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, 0x53, 0x69, 0x67, 0x6e, 0x3a, 0x3a, 0x56, 0x31, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_agntcy_dir_sign_v1_sign_service_proto_rawDescOnce sync.Once - file_agntcy_dir_sign_v1_sign_service_proto_rawDescData []byte -) - -func file_agntcy_dir_sign_v1_sign_service_proto_rawDescGZIP() []byte { - file_agntcy_dir_sign_v1_sign_service_proto_rawDescOnce.Do(func() { - file_agntcy_dir_sign_v1_sign_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_sign_v1_sign_service_proto_rawDesc), len(file_agntcy_dir_sign_v1_sign_service_proto_rawDesc))) - }) - return file_agntcy_dir_sign_v1_sign_service_proto_rawDescData -} - -var file_agntcy_dir_sign_v1_sign_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_agntcy_dir_sign_v1_sign_service_proto_goTypes = []any{ - (*SignRequest)(nil), // 0: agntcy.dir.sign.v1.SignRequest - (*SignRequestProvider)(nil), // 1: agntcy.dir.sign.v1.SignRequestProvider - (*SignWithOIDC)(nil), // 2: agntcy.dir.sign.v1.SignWithOIDC - (*SignWithKey)(nil), // 3: agntcy.dir.sign.v1.SignWithKey - (*SignResponse)(nil), // 4: agntcy.dir.sign.v1.SignResponse - (*VerifyRequest)(nil), // 5: agntcy.dir.sign.v1.VerifyRequest - (*VerifyResponse)(nil), // 6: agntcy.dir.sign.v1.VerifyResponse - (*SignWithOIDC_SignOpts)(nil), // 7: agntcy.dir.sign.v1.SignWithOIDC.SignOpts - (*v1.RecordRef)(nil), // 8: agntcy.dir.core.v1.RecordRef - (*Signature)(nil), // 9: agntcy.dir.sign.v1.Signature -} -var file_agntcy_dir_sign_v1_sign_service_proto_depIdxs = []int32{ - 8, // 0: agntcy.dir.sign.v1.SignRequest.record_ref:type_name -> agntcy.dir.core.v1.RecordRef - 1, // 1: agntcy.dir.sign.v1.SignRequest.provider:type_name -> agntcy.dir.sign.v1.SignRequestProvider - 2, // 2: agntcy.dir.sign.v1.SignRequestProvider.oidc:type_name -> agntcy.dir.sign.v1.SignWithOIDC - 3, // 3: agntcy.dir.sign.v1.SignRequestProvider.key:type_name -> agntcy.dir.sign.v1.SignWithKey - 7, // 4: agntcy.dir.sign.v1.SignWithOIDC.options:type_name -> agntcy.dir.sign.v1.SignWithOIDC.SignOpts - 9, // 5: agntcy.dir.sign.v1.SignResponse.signature:type_name -> agntcy.dir.sign.v1.Signature - 8, // 6: agntcy.dir.sign.v1.VerifyRequest.record_ref:type_name -> agntcy.dir.core.v1.RecordRef - 0, // 7: agntcy.dir.sign.v1.SignService.Sign:input_type -> agntcy.dir.sign.v1.SignRequest - 5, // 8: agntcy.dir.sign.v1.SignService.Verify:input_type -> agntcy.dir.sign.v1.VerifyRequest - 4, // 9: agntcy.dir.sign.v1.SignService.Sign:output_type -> agntcy.dir.sign.v1.SignResponse - 6, // 10: agntcy.dir.sign.v1.SignService.Verify:output_type -> agntcy.dir.sign.v1.VerifyResponse - 9, // [9:11] is the sub-list for method output_type - 7, // [7:9] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name -} - -func init() { file_agntcy_dir_sign_v1_sign_service_proto_init() } -func file_agntcy_dir_sign_v1_sign_service_proto_init() { - if File_agntcy_dir_sign_v1_sign_service_proto != nil { - return - } - file_agntcy_dir_sign_v1_signature_proto_init() - file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[1].OneofWrappers = []any{ - (*SignRequestProvider_Oidc)(nil), - (*SignRequestProvider_Key)(nil), - } - file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[3].OneofWrappers = []any{} - file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[6].OneofWrappers = []any{} - file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[7].OneofWrappers = []any{} - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_sign_v1_sign_service_proto_rawDesc), len(file_agntcy_dir_sign_v1_sign_service_proto_rawDesc)), - NumEnums: 0, - NumMessages: 8, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_agntcy_dir_sign_v1_sign_service_proto_goTypes, - DependencyIndexes: file_agntcy_dir_sign_v1_sign_service_proto_depIdxs, - MessageInfos: file_agntcy_dir_sign_v1_sign_service_proto_msgTypes, - }.Build() - File_agntcy_dir_sign_v1_sign_service_proto = out.File - file_agntcy_dir_sign_v1_sign_service_proto_goTypes = nil - file_agntcy_dir_sign_v1_sign_service_proto_depIdxs = nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc (unknown) +// source: agntcy/dir/sign/v1/sign_service.proto + +package v1 + +import ( + v1 "github.com/agntcy/dir/api/core/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SignRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Record reference to be signed + RecordRef *v1.RecordRef `protobuf:"bytes,1,opt,name=record_ref,json=recordRef,proto3" json:"record_ref,omitempty"` + // Signing provider to use + Provider *SignRequestProvider `protobuf:"bytes,2,opt,name=provider,proto3" json:"provider,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SignRequest) Reset() { + *x = SignRequest{} + mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SignRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignRequest) ProtoMessage() {} + +func (x *SignRequest) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignRequest.ProtoReflect.Descriptor instead. +func (*SignRequest) Descriptor() ([]byte, []int) { + return file_agntcy_dir_sign_v1_sign_service_proto_rawDescGZIP(), []int{0} +} + +func (x *SignRequest) GetRecordRef() *v1.RecordRef { + if x != nil { + return x.RecordRef + } + return nil +} + +func (x *SignRequest) GetProvider() *SignRequestProvider { + if x != nil { + return x.Provider + } + return nil +} + +type SignRequestProvider struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Request: + // + // *SignRequestProvider_Oidc + // *SignRequestProvider_Key + Request isSignRequestProvider_Request `protobuf_oneof:"request"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SignRequestProvider) Reset() { + *x = SignRequestProvider{} + mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SignRequestProvider) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignRequestProvider) ProtoMessage() {} + +func (x *SignRequestProvider) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignRequestProvider.ProtoReflect.Descriptor instead. +func (*SignRequestProvider) Descriptor() ([]byte, []int) { + return file_agntcy_dir_sign_v1_sign_service_proto_rawDescGZIP(), []int{1} +} + +func (x *SignRequestProvider) GetRequest() isSignRequestProvider_Request { + if x != nil { + return x.Request + } + return nil +} + +func (x *SignRequestProvider) GetOidc() *SignWithOIDC { + if x != nil { + if x, ok := x.Request.(*SignRequestProvider_Oidc); ok { + return x.Oidc + } + } + return nil +} + +func (x *SignRequestProvider) GetKey() *SignWithKey { + if x != nil { + if x, ok := x.Request.(*SignRequestProvider_Key); ok { + return x.Key + } + } + return nil +} + +type isSignRequestProvider_Request interface { + isSignRequestProvider_Request() +} + +type SignRequestProvider_Oidc struct { + // Sign with OIDC provider + Oidc *SignWithOIDC `protobuf:"bytes,1,opt,name=oidc,proto3,oneof"` +} + +type SignRequestProvider_Key struct { + // Sign with PEM-encoded public key + Key *SignWithKey `protobuf:"bytes,2,opt,name=key,proto3,oneof"` +} + +func (*SignRequestProvider_Oidc) isSignRequestProvider_Request() {} + +func (*SignRequestProvider_Key) isSignRequestProvider_Request() {} + +type SignWithOIDC struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Token for OIDC provider + IdToken string `protobuf:"bytes,1,opt,name=id_token,json=idToken,proto3" json:"id_token,omitempty"` + // Signing options for OIDC + Options *SignWithOIDC_SignOpts `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SignWithOIDC) Reset() { + *x = SignWithOIDC{} + mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SignWithOIDC) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignWithOIDC) ProtoMessage() {} + +func (x *SignWithOIDC) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignWithOIDC.ProtoReflect.Descriptor instead. +func (*SignWithOIDC) Descriptor() ([]byte, []int) { + return file_agntcy_dir_sign_v1_sign_service_proto_rawDescGZIP(), []int{2} +} + +func (x *SignWithOIDC) GetIdToken() string { + if x != nil { + return x.IdToken + } + return "" +} + +func (x *SignWithOIDC) GetOptions() *SignWithOIDC_SignOpts { + if x != nil { + return x.Options + } + return nil +} + +type SignWithKey struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Private key used for signing + PrivateKey []byte `protobuf:"bytes,1,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"` + // Password to unlock the private key + Password []byte `protobuf:"bytes,2,opt,name=password,proto3,oneof" json:"password,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SignWithKey) Reset() { + *x = SignWithKey{} + mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SignWithKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignWithKey) ProtoMessage() {} + +func (x *SignWithKey) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignWithKey.ProtoReflect.Descriptor instead. +func (*SignWithKey) Descriptor() ([]byte, []int) { + return file_agntcy_dir_sign_v1_sign_service_proto_rawDescGZIP(), []int{3} +} + +func (x *SignWithKey) GetPrivateKey() []byte { + if x != nil { + return x.PrivateKey + } + return nil +} + +func (x *SignWithKey) GetPassword() []byte { + if x != nil { + return x.Password + } + return nil +} + +type SignResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Cryptographic signature of the record + Signature *Signature `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SignResponse) Reset() { + *x = SignResponse{} + mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SignResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignResponse) ProtoMessage() {} + +func (x *SignResponse) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignResponse.ProtoReflect.Descriptor instead. +func (*SignResponse) Descriptor() ([]byte, []int) { + return file_agntcy_dir_sign_v1_sign_service_proto_rawDescGZIP(), []int{4} +} + +func (x *SignResponse) GetSignature() *Signature { + if x != nil { + return x.Signature + } + return nil +} + +type VerifyRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Record reference to be verified + RecordRef *v1.RecordRef `protobuf:"bytes,1,opt,name=record_ref,json=recordRef,proto3" json:"record_ref,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *VerifyRequest) Reset() { + *x = VerifyRequest{} + mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *VerifyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerifyRequest) ProtoMessage() {} + +func (x *VerifyRequest) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerifyRequest.ProtoReflect.Descriptor instead. +func (*VerifyRequest) Descriptor() ([]byte, []int) { + return file_agntcy_dir_sign_v1_sign_service_proto_rawDescGZIP(), []int{5} +} + +func (x *VerifyRequest) GetRecordRef() *v1.RecordRef { + if x != nil { + return x.RecordRef + } + return nil +} + +type VerifyResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The verify process result + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + // Optional error message if verification failed + ErrorMessage *string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3,oneof" json:"error_message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *VerifyResponse) Reset() { + *x = VerifyResponse{} + mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *VerifyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerifyResponse) ProtoMessage() {} + +func (x *VerifyResponse) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerifyResponse.ProtoReflect.Descriptor instead. +func (*VerifyResponse) Descriptor() ([]byte, []int) { + return file_agntcy_dir_sign_v1_sign_service_proto_rawDescGZIP(), []int{6} +} + +func (x *VerifyResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *VerifyResponse) GetErrorMessage() string { + if x != nil && x.ErrorMessage != nil { + return *x.ErrorMessage + } + return "" +} + +// List of sign options for OIDC +type SignWithOIDC_SignOpts struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Fulcio authority access URL (default value: https://fulcio.sigstage.dev) + FulcioUrl *string `protobuf:"bytes,1,opt,name=fulcio_url,json=fulcioUrl,proto3,oneof" json:"fulcio_url,omitempty"` + // Rekor validator access URL (default value: https://rekor.sigstage.dev) + RekorUrl *string `protobuf:"bytes,2,opt,name=rekor_url,json=rekorUrl,proto3,oneof" json:"rekor_url,omitempty"` + // Timestamp authority access URL (default value: https://timestamp.sigstage.dev/api/v1/timestamp) + TimestampUrl *string `protobuf:"bytes,3,opt,name=timestamp_url,json=timestampUrl,proto3,oneof" json:"timestamp_url,omitempty"` + // OIDC provider access URL (default value: https://oauth2.sigstage.dev/auth) + OidcProviderUrl *string `protobuf:"bytes,4,opt,name=oidc_provider_url,json=oidcProviderUrl,proto3,oneof" json:"oidc_provider_url,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SignWithOIDC_SignOpts) Reset() { + *x = SignWithOIDC_SignOpts{} + mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SignWithOIDC_SignOpts) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignWithOIDC_SignOpts) ProtoMessage() {} + +func (x *SignWithOIDC_SignOpts) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignWithOIDC_SignOpts.ProtoReflect.Descriptor instead. +func (*SignWithOIDC_SignOpts) Descriptor() ([]byte, []int) { + return file_agntcy_dir_sign_v1_sign_service_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *SignWithOIDC_SignOpts) GetFulcioUrl() string { + if x != nil && x.FulcioUrl != nil { + return *x.FulcioUrl + } + return "" +} + +func (x *SignWithOIDC_SignOpts) GetRekorUrl() string { + if x != nil && x.RekorUrl != nil { + return *x.RekorUrl + } + return "" +} + +func (x *SignWithOIDC_SignOpts) GetTimestampUrl() string { + if x != nil && x.TimestampUrl != nil { + return *x.TimestampUrl + } + return "" +} + +func (x *SignWithOIDC_SignOpts) GetOidcProviderUrl() string { + if x != nil && x.OidcProviderUrl != nil { + return *x.OidcProviderUrl + } + return "" +} + +var File_agntcy_dir_sign_v1_sign_service_proto protoreflect.FileDescriptor + +var file_agntcy_dir_sign_v1_sign_service_proto_rawDesc = string([]byte{ + 0x0a, 0x25, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, 0x69, 0x67, + 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x69, 0x67, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, + 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x61, 0x67, 0x6e, + 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x61, 0x67, + 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, 0x69, 0x67, 0x6e, 0x2f, 0x76, 0x31, + 0x2f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x90, 0x01, 0x0a, 0x0b, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3c, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, + 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x52, 0x65, 0x66, 0x52, 0x09, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x12, 0x43, + 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x27, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, + 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x22, 0x8d, 0x01, 0x0a, 0x13, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x04, 0x6f, + 0x69, 0x64, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x67, 0x6e, 0x74, + 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x69, 0x67, 0x6e, 0x57, 0x69, 0x74, 0x68, 0x4f, 0x49, 0x44, 0x43, 0x48, 0x00, 0x52, 0x04, 0x6f, + 0x69, 0x64, 0x63, 0x12, 0x33, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, + 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x57, 0x69, 0x74, 0x68, 0x4b, 0x65, + 0x79, 0x48, 0x00, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x09, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0xe1, 0x02, 0x0a, 0x0c, 0x53, 0x69, 0x67, 0x6e, 0x57, 0x69, 0x74, 0x68, + 0x4f, 0x49, 0x44, 0x43, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x64, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x69, 0x64, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, + 0x43, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, + 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x57, 0x69, 0x74, 0x68, 0x4f, 0x49, + 0x44, 0x43, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x4f, 0x70, 0x74, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xf0, 0x01, 0x0a, 0x08, 0x53, 0x69, 0x67, 0x6e, 0x4f, 0x70, 0x74, + 0x73, 0x12, 0x22, 0x0a, 0x0a, 0x66, 0x75, 0x6c, 0x63, 0x69, 0x6f, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x66, 0x75, 0x6c, 0x63, 0x69, 0x6f, 0x55, + 0x72, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x20, 0x0a, 0x09, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x08, 0x72, 0x65, 0x6b, 0x6f, + 0x72, 0x55, 0x72, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x28, 0x0a, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, + 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x55, 0x72, 0x6c, 0x88, 0x01, + 0x01, 0x12, 0x2f, 0x0a, 0x11, 0x6f, 0x69, 0x64, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x0f, + 0x6f, 0x69, 0x64, 0x63, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x55, 0x72, 0x6c, 0x88, + 0x01, 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x66, 0x75, 0x6c, 0x63, 0x69, 0x6f, 0x5f, 0x75, 0x72, + 0x6c, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x6b, 0x6f, 0x72, 0x5f, 0x75, 0x72, 0x6c, 0x42, + 0x10, 0x0a, 0x0e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x75, 0x72, + 0x6c, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x6f, 0x69, 0x64, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x5f, 0x75, 0x72, 0x6c, 0x22, 0x5c, 0x0a, 0x0b, 0x53, 0x69, 0x67, 0x6e, 0x57, + 0x69, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x70, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x1f, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, + 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x70, 0x61, 0x73, + 0x73, 0x77, 0x6f, 0x72, 0x64, 0x88, 0x01, 0x01, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x70, 0x61, 0x73, + 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x4b, 0x0a, 0x0c, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, + 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x22, 0x4d, 0x0a, 0x0d, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x72, 0x65, + 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, + 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x52, 0x09, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, + 0x66, 0x22, 0x66, 0x0a, 0x0e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, + 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0xa9, 0x01, 0x0a, 0x0b, 0x53, 0x69, + 0x67, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x04, 0x53, 0x69, 0x67, + 0x6e, 0x12, 0x1f, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, + 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, + 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x06, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x21, + 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x22, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, + 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0xb8, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, + 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, + 0x42, 0x10, 0x53, 0x69, 0x67, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x21, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x73, 0x69, 0x67, 0x6e, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, 0x53, 0xaa, 0x02, 0x12, + 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x2e, + 0x56, 0x31, 0xca, 0x02, 0x12, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, + 0x53, 0x69, 0x67, 0x6e, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x1e, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, + 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x53, 0x69, 0x67, 0x6e, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x15, 0x41, 0x67, 0x6e, 0x74, 0x63, + 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, 0x53, 0x69, 0x67, 0x6e, 0x3a, 0x3a, 0x56, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_agntcy_dir_sign_v1_sign_service_proto_rawDescOnce sync.Once + file_agntcy_dir_sign_v1_sign_service_proto_rawDescData []byte +) + +func file_agntcy_dir_sign_v1_sign_service_proto_rawDescGZIP() []byte { + file_agntcy_dir_sign_v1_sign_service_proto_rawDescOnce.Do(func() { + file_agntcy_dir_sign_v1_sign_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_sign_v1_sign_service_proto_rawDesc), len(file_agntcy_dir_sign_v1_sign_service_proto_rawDesc))) + }) + return file_agntcy_dir_sign_v1_sign_service_proto_rawDescData +} + +var file_agntcy_dir_sign_v1_sign_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_agntcy_dir_sign_v1_sign_service_proto_goTypes = []any{ + (*SignRequest)(nil), // 0: agntcy.dir.sign.v1.SignRequest + (*SignRequestProvider)(nil), // 1: agntcy.dir.sign.v1.SignRequestProvider + (*SignWithOIDC)(nil), // 2: agntcy.dir.sign.v1.SignWithOIDC + (*SignWithKey)(nil), // 3: agntcy.dir.sign.v1.SignWithKey + (*SignResponse)(nil), // 4: agntcy.dir.sign.v1.SignResponse + (*VerifyRequest)(nil), // 5: agntcy.dir.sign.v1.VerifyRequest + (*VerifyResponse)(nil), // 6: agntcy.dir.sign.v1.VerifyResponse + (*SignWithOIDC_SignOpts)(nil), // 7: agntcy.dir.sign.v1.SignWithOIDC.SignOpts + (*v1.RecordRef)(nil), // 8: agntcy.dir.core.v1.RecordRef + (*Signature)(nil), // 9: agntcy.dir.sign.v1.Signature +} +var file_agntcy_dir_sign_v1_sign_service_proto_depIdxs = []int32{ + 8, // 0: agntcy.dir.sign.v1.SignRequest.record_ref:type_name -> agntcy.dir.core.v1.RecordRef + 1, // 1: agntcy.dir.sign.v1.SignRequest.provider:type_name -> agntcy.dir.sign.v1.SignRequestProvider + 2, // 2: agntcy.dir.sign.v1.SignRequestProvider.oidc:type_name -> agntcy.dir.sign.v1.SignWithOIDC + 3, // 3: agntcy.dir.sign.v1.SignRequestProvider.key:type_name -> agntcy.dir.sign.v1.SignWithKey + 7, // 4: agntcy.dir.sign.v1.SignWithOIDC.options:type_name -> agntcy.dir.sign.v1.SignWithOIDC.SignOpts + 9, // 5: agntcy.dir.sign.v1.SignResponse.signature:type_name -> agntcy.dir.sign.v1.Signature + 8, // 6: agntcy.dir.sign.v1.VerifyRequest.record_ref:type_name -> agntcy.dir.core.v1.RecordRef + 0, // 7: agntcy.dir.sign.v1.SignService.Sign:input_type -> agntcy.dir.sign.v1.SignRequest + 5, // 8: agntcy.dir.sign.v1.SignService.Verify:input_type -> agntcy.dir.sign.v1.VerifyRequest + 4, // 9: agntcy.dir.sign.v1.SignService.Sign:output_type -> agntcy.dir.sign.v1.SignResponse + 6, // 10: agntcy.dir.sign.v1.SignService.Verify:output_type -> agntcy.dir.sign.v1.VerifyResponse + 9, // [9:11] is the sub-list for method output_type + 7, // [7:9] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_agntcy_dir_sign_v1_sign_service_proto_init() } +func file_agntcy_dir_sign_v1_sign_service_proto_init() { + if File_agntcy_dir_sign_v1_sign_service_proto != nil { + return + } + file_agntcy_dir_sign_v1_signature_proto_init() + file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[1].OneofWrappers = []any{ + (*SignRequestProvider_Oidc)(nil), + (*SignRequestProvider_Key)(nil), + } + file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[3].OneofWrappers = []any{} + file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[6].OneofWrappers = []any{} + file_agntcy_dir_sign_v1_sign_service_proto_msgTypes[7].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_sign_v1_sign_service_proto_rawDesc), len(file_agntcy_dir_sign_v1_sign_service_proto_rawDesc)), + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_agntcy_dir_sign_v1_sign_service_proto_goTypes, + DependencyIndexes: file_agntcy_dir_sign_v1_sign_service_proto_depIdxs, + MessageInfos: file_agntcy_dir_sign_v1_sign_service_proto_msgTypes, + }.Build() + File_agntcy_dir_sign_v1_sign_service_proto = out.File + file_agntcy_dir_sign_v1_sign_service_proto_goTypes = nil + file_agntcy_dir_sign_v1_sign_service_proto_depIdxs = nil +} diff --git a/api/sign/v1/sign_service_grpc.pb.go b/api/sign/v1/sign_service_grpc.pb.go index 196a03d1a..24dcb08b8 100644 --- a/api/sign/v1/sign_service_grpc.pb.go +++ b/api/sign/v1/sign_service_grpc.pb.go @@ -1,168 +1,168 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc (unknown) -// source: agntcy/dir/sign/v1/sign_service.proto - -package v1 - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 - -const ( - SignService_Sign_FullMethodName = "/agntcy.dir.sign.v1.SignService/Sign" - SignService_Verify_FullMethodName = "/agntcy.dir.sign.v1.SignService/Verify" -) - -// SignServiceClient is the client API for SignService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -// -// SignService provides methods to sign and verify records. -type SignServiceClient interface { - // Sign record using keyless OIDC based provider or using PEM-encoded private key with an optional passphrase - Sign(ctx context.Context, in *SignRequest, opts ...grpc.CallOption) (*SignResponse, error) - // Verify signed record using keyless OIDC based provider or using PEM-encoded formatted PEM public key encrypted - Verify(ctx context.Context, in *VerifyRequest, opts ...grpc.CallOption) (*VerifyResponse, error) -} - -type signServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewSignServiceClient(cc grpc.ClientConnInterface) SignServiceClient { - return &signServiceClient{cc} -} - -func (c *signServiceClient) Sign(ctx context.Context, in *SignRequest, opts ...grpc.CallOption) (*SignResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(SignResponse) - err := c.cc.Invoke(ctx, SignService_Sign_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *signServiceClient) Verify(ctx context.Context, in *VerifyRequest, opts ...grpc.CallOption) (*VerifyResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(VerifyResponse) - err := c.cc.Invoke(ctx, SignService_Verify_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -// SignServiceServer is the server API for SignService service. -// All implementations should embed UnimplementedSignServiceServer -// for forward compatibility. -// -// SignService provides methods to sign and verify records. -type SignServiceServer interface { - // Sign record using keyless OIDC based provider or using PEM-encoded private key with an optional passphrase - Sign(context.Context, *SignRequest) (*SignResponse, error) - // Verify signed record using keyless OIDC based provider or using PEM-encoded formatted PEM public key encrypted - Verify(context.Context, *VerifyRequest) (*VerifyResponse, error) -} - -// UnimplementedSignServiceServer should be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedSignServiceServer struct{} - -func (UnimplementedSignServiceServer) Sign(context.Context, *SignRequest) (*SignResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Sign not implemented") -} -func (UnimplementedSignServiceServer) Verify(context.Context, *VerifyRequest) (*VerifyResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Verify not implemented") -} -func (UnimplementedSignServiceServer) testEmbeddedByValue() {} - -// UnsafeSignServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to SignServiceServer will -// result in compilation errors. -type UnsafeSignServiceServer interface { - mustEmbedUnimplementedSignServiceServer() -} - -func RegisterSignServiceServer(s grpc.ServiceRegistrar, srv SignServiceServer) { - // If the following call pancis, it indicates UnimplementedSignServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } - s.RegisterService(&SignService_ServiceDesc, srv) -} - -func _SignService_Sign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SignRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SignServiceServer).Sign(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SignService_Sign_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SignServiceServer).Sign(ctx, req.(*SignRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SignService_Verify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(VerifyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SignServiceServer).Verify(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SignService_Verify_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SignServiceServer).Verify(ctx, req.(*VerifyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// SignService_ServiceDesc is the grpc.ServiceDesc for SignService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var SignService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "agntcy.dir.sign.v1.SignService", - HandlerType: (*SignServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Sign", - Handler: _SignService_Sign_Handler, - }, - { - MethodName: "Verify", - Handler: _SignService_Verify_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "agntcy/dir/sign/v1/sign_service.proto", -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc (unknown) +// source: agntcy/dir/sign/v1/sign_service.proto + +package v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 + +const ( + SignService_Sign_FullMethodName = "/agntcy.dir.sign.v1.SignService/Sign" + SignService_Verify_FullMethodName = "/agntcy.dir.sign.v1.SignService/Verify" +) + +// SignServiceClient is the client API for SignService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// SignService provides methods to sign and verify records. +type SignServiceClient interface { + // Sign record using keyless OIDC based provider or using PEM-encoded private key with an optional passphrase + Sign(ctx context.Context, in *SignRequest, opts ...grpc.CallOption) (*SignResponse, error) + // Verify signed record using keyless OIDC based provider or using PEM-encoded formatted PEM public key encrypted + Verify(ctx context.Context, in *VerifyRequest, opts ...grpc.CallOption) (*VerifyResponse, error) +} + +type signServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewSignServiceClient(cc grpc.ClientConnInterface) SignServiceClient { + return &signServiceClient{cc} +} + +func (c *signServiceClient) Sign(ctx context.Context, in *SignRequest, opts ...grpc.CallOption) (*SignResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SignResponse) + err := c.cc.Invoke(ctx, SignService_Sign_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *signServiceClient) Verify(ctx context.Context, in *VerifyRequest, opts ...grpc.CallOption) (*VerifyResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(VerifyResponse) + err := c.cc.Invoke(ctx, SignService_Verify_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SignServiceServer is the server API for SignService service. +// All implementations should embed UnimplementedSignServiceServer +// for forward compatibility. +// +// SignService provides methods to sign and verify records. +type SignServiceServer interface { + // Sign record using keyless OIDC based provider or using PEM-encoded private key with an optional passphrase + Sign(context.Context, *SignRequest) (*SignResponse, error) + // Verify signed record using keyless OIDC based provider or using PEM-encoded formatted PEM public key encrypted + Verify(context.Context, *VerifyRequest) (*VerifyResponse, error) +} + +// UnimplementedSignServiceServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedSignServiceServer struct{} + +func (UnimplementedSignServiceServer) Sign(context.Context, *SignRequest) (*SignResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Sign not implemented") +} +func (UnimplementedSignServiceServer) Verify(context.Context, *VerifyRequest) (*VerifyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Verify not implemented") +} +func (UnimplementedSignServiceServer) testEmbeddedByValue() {} + +// UnsafeSignServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SignServiceServer will +// result in compilation errors. +type UnsafeSignServiceServer interface { + mustEmbedUnimplementedSignServiceServer() +} + +func RegisterSignServiceServer(s grpc.ServiceRegistrar, srv SignServiceServer) { + // If the following call pancis, it indicates UnimplementedSignServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&SignService_ServiceDesc, srv) +} + +func _SignService_Sign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SignRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SignServiceServer).Sign(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SignService_Sign_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SignServiceServer).Sign(ctx, req.(*SignRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SignService_Verify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VerifyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SignServiceServer).Verify(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SignService_Verify_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SignServiceServer).Verify(ctx, req.(*VerifyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// SignService_ServiceDesc is the grpc.ServiceDesc for SignService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var SignService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "agntcy.dir.sign.v1.SignService", + HandlerType: (*SignServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Sign", + Handler: _SignService_Sign_Handler, + }, + { + MethodName: "Verify", + Handler: _SignService_Verify_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "agntcy/dir/sign/v1/sign_service.proto", +} diff --git a/api/sign/v1/signature.go b/api/sign/v1/signature.go index 75a3aefe8..9480233e4 100644 --- a/api/sign/v1/signature.go +++ b/api/sign/v1/signature.go @@ -1,59 +1,59 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package v1 - -import ( - "errors" - "fmt" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/oasf-sdk/pkg/decoder" -) - -// ReferrerType returns the type for Signature. -func (s *Signature) ReferrerType() string { - return string((&Signature{}).ProtoReflect().Descriptor().FullName()) -} - -// MarshalReferrer exports the Signature into a RecordReferrer. -func (s *Signature) MarshalReferrer() (*corev1.RecordReferrer, error) { - if s == nil { - return nil, errors.New("signature is nil") - } - - // Use decoder to convert proto message to structpb - data, err := decoder.StructToProto(s) - if err != nil { - return nil, fmt.Errorf("failed to convert signature to struct: %w", err) - } - - return &corev1.RecordReferrer{ - Type: s.ReferrerType(), - Data: data, - }, nil -} - -// UnmarshalReferrer loads the Signature from a RecordReferrer. -func (s *Signature) UnmarshalReferrer(ref *corev1.RecordReferrer) error { - if ref == nil || ref.GetData() == nil { - return errors.New("referrer or data is nil") - } - - // Use decoder to convert structpb to proto message - decoded, err := decoder.ProtoToStruct[Signature](ref.GetData()) - if err != nil { - return fmt.Errorf("failed to decode signature from referrer: %w", err) - } - - // Copy fields individually to avoid copying the lock - s.Annotations = decoded.GetAnnotations() - s.SignedAt = decoded.GetSignedAt() - s.Algorithm = decoded.GetAlgorithm() - s.Signature = decoded.GetSignature() - s.Certificate = decoded.GetCertificate() - s.ContentType = decoded.GetContentType() - s.ContentBundle = decoded.GetContentBundle() - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package v1 + +import ( + "errors" + "fmt" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/oasf-sdk/pkg/decoder" +) + +// ReferrerType returns the type for Signature. +func (s *Signature) ReferrerType() string { + return string((&Signature{}).ProtoReflect().Descriptor().FullName()) +} + +// MarshalReferrer exports the Signature into a RecordReferrer. +func (s *Signature) MarshalReferrer() (*corev1.RecordReferrer, error) { + if s == nil { + return nil, errors.New("signature is nil") + } + + // Use decoder to convert proto message to structpb + data, err := decoder.StructToProto(s) + if err != nil { + return nil, fmt.Errorf("failed to convert signature to struct: %w", err) + } + + return &corev1.RecordReferrer{ + Type: s.ReferrerType(), + Data: data, + }, nil +} + +// UnmarshalReferrer loads the Signature from a RecordReferrer. +func (s *Signature) UnmarshalReferrer(ref *corev1.RecordReferrer) error { + if ref == nil || ref.GetData() == nil { + return errors.New("referrer or data is nil") + } + + // Use decoder to convert structpb to proto message + decoded, err := decoder.ProtoToStruct[Signature](ref.GetData()) + if err != nil { + return fmt.Errorf("failed to decode signature from referrer: %w", err) + } + + // Copy fields individually to avoid copying the lock + s.Annotations = decoded.GetAnnotations() + s.SignedAt = decoded.GetSignedAt() + s.Algorithm = decoded.GetAlgorithm() + s.Signature = decoded.GetSignature() + s.Certificate = decoded.GetCertificate() + s.ContentType = decoded.GetContentType() + s.ContentBundle = decoded.GetContentBundle() + + return nil +} diff --git a/api/sign/v1/signature.pb.go b/api/sign/v1/signature.pb.go index 4cc51e99e..b9cf64881 100644 --- a/api/sign/v1/signature.pb.go +++ b/api/sign/v1/signature.pb.go @@ -1,230 +1,230 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.5 -// protoc (unknown) -// source: agntcy/dir/sign/v1/signature.proto - -package v1 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Signature is the signing data associated with a Record. -// Multiple signatures can be associated with a single Record, -// ie 1 record : N record signatures. -// -// Storage and management of signatures is provided via -// StoreService as a RecordReferrer object. -// -// Signature can be encoded into RecordReferrer object as follows: -// -// type = "agntcy.dir.sign.v1.Signature" -// data = Signature message encoded as JSON -type Signature struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Metadata associated with the signature. - Annotations map[string]string `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Signing timestamp of the record in the RFC3339 format. - // Specs: https://www.rfc-editor.org/rfc/rfc3339.html - SignedAt string `protobuf:"bytes,2,opt,name=signed_at,json=signedAt,proto3" json:"signed_at,omitempty"` - // The signature algorithm used (e.g., "ECDSA_P256_SHA256"). - Algorithm string `protobuf:"bytes,3,opt,name=algorithm,proto3" json:"algorithm,omitempty"` - // Base64-encoded signature. - Signature string `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"` - // Base64-encoded signing certificate. - Certificate string `protobuf:"bytes,5,opt,name=certificate,proto3" json:"certificate,omitempty"` - // Type of the signature content bundle. - ContentType string `protobuf:"bytes,6,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` - // Base64-encoded signature bundle produced by the signer. - // It is up to the client to interpret the content of the bundle. - ContentBundle string `protobuf:"bytes,7,opt,name=content_bundle,json=contentBundle,proto3" json:"content_bundle,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *Signature) Reset() { - *x = Signature{} - mi := &file_agntcy_dir_sign_v1_signature_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Signature) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Signature) ProtoMessage() {} - -func (x *Signature) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_sign_v1_signature_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Signature.ProtoReflect.Descriptor instead. -func (*Signature) Descriptor() ([]byte, []int) { - return file_agntcy_dir_sign_v1_signature_proto_rawDescGZIP(), []int{0} -} - -func (x *Signature) GetAnnotations() map[string]string { - if x != nil { - return x.Annotations - } - return nil -} - -func (x *Signature) GetSignedAt() string { - if x != nil { - return x.SignedAt - } - return "" -} - -func (x *Signature) GetAlgorithm() string { - if x != nil { - return x.Algorithm - } - return "" -} - -func (x *Signature) GetSignature() string { - if x != nil { - return x.Signature - } - return "" -} - -func (x *Signature) GetCertificate() string { - if x != nil { - return x.Certificate - } - return "" -} - -func (x *Signature) GetContentType() string { - if x != nil { - return x.ContentType - } - return "" -} - -func (x *Signature) GetContentBundle() string { - if x != nil { - return x.ContentBundle - } - return "" -} - -var File_agntcy_dir_sign_v1_signature_proto protoreflect.FileDescriptor - -var file_agntcy_dir_sign_v1_signature_proto_rawDesc = string([]byte{ - 0x0a, 0x22, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, 0x69, 0x67, - 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, - 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xe2, 0x02, 0x0a, 0x09, 0x53, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x50, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x67, - 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, - 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x69, 0x67, - 0x6e, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, - 0x68, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, - 0x74, 0x68, 0x6d, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x1a, 0x3e, 0x0a, - 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0xb6, 0x01, - 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, - 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x21, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x69, 0x67, 0x6e, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, - 0x41, 0x44, 0x53, 0xaa, 0x02, 0x12, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, - 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x12, 0x41, 0x67, 0x6e, 0x74, 0x63, - 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x53, 0x69, 0x67, 0x6e, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x1e, - 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x53, 0x69, 0x67, 0x6e, 0x5c, - 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, - 0x15, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, 0x53, 0x69, - 0x67, 0x6e, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_agntcy_dir_sign_v1_signature_proto_rawDescOnce sync.Once - file_agntcy_dir_sign_v1_signature_proto_rawDescData []byte -) - -func file_agntcy_dir_sign_v1_signature_proto_rawDescGZIP() []byte { - file_agntcy_dir_sign_v1_signature_proto_rawDescOnce.Do(func() { - file_agntcy_dir_sign_v1_signature_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_sign_v1_signature_proto_rawDesc), len(file_agntcy_dir_sign_v1_signature_proto_rawDesc))) - }) - return file_agntcy_dir_sign_v1_signature_proto_rawDescData -} - -var file_agntcy_dir_sign_v1_signature_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_agntcy_dir_sign_v1_signature_proto_goTypes = []any{ - (*Signature)(nil), // 0: agntcy.dir.sign.v1.Signature - nil, // 1: agntcy.dir.sign.v1.Signature.AnnotationsEntry -} -var file_agntcy_dir_sign_v1_signature_proto_depIdxs = []int32{ - 1, // 0: agntcy.dir.sign.v1.Signature.annotations:type_name -> agntcy.dir.sign.v1.Signature.AnnotationsEntry - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_agntcy_dir_sign_v1_signature_proto_init() } -func file_agntcy_dir_sign_v1_signature_proto_init() { - if File_agntcy_dir_sign_v1_signature_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_sign_v1_signature_proto_rawDesc), len(file_agntcy_dir_sign_v1_signature_proto_rawDesc)), - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_agntcy_dir_sign_v1_signature_proto_goTypes, - DependencyIndexes: file_agntcy_dir_sign_v1_signature_proto_depIdxs, - MessageInfos: file_agntcy_dir_sign_v1_signature_proto_msgTypes, - }.Build() - File_agntcy_dir_sign_v1_signature_proto = out.File - file_agntcy_dir_sign_v1_signature_proto_goTypes = nil - file_agntcy_dir_sign_v1_signature_proto_depIdxs = nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc (unknown) +// source: agntcy/dir/sign/v1/signature.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Signature is the signing data associated with a Record. +// Multiple signatures can be associated with a single Record, +// ie 1 record : N record signatures. +// +// Storage and management of signatures is provided via +// StoreService as a RecordReferrer object. +// +// Signature can be encoded into RecordReferrer object as follows: +// +// type = "agntcy.dir.sign.v1.Signature" +// data = Signature message encoded as JSON +type Signature struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Metadata associated with the signature. + Annotations map[string]string `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Signing timestamp of the record in the RFC3339 format. + // Specs: https://www.rfc-editor.org/rfc/rfc3339.html + SignedAt string `protobuf:"bytes,2,opt,name=signed_at,json=signedAt,proto3" json:"signed_at,omitempty"` + // The signature algorithm used (e.g., "ECDSA_P256_SHA256"). + Algorithm string `protobuf:"bytes,3,opt,name=algorithm,proto3" json:"algorithm,omitempty"` + // Base64-encoded signature. + Signature string `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"` + // Base64-encoded signing certificate. + Certificate string `protobuf:"bytes,5,opt,name=certificate,proto3" json:"certificate,omitempty"` + // Type of the signature content bundle. + ContentType string `protobuf:"bytes,6,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + // Base64-encoded signature bundle produced by the signer. + // It is up to the client to interpret the content of the bundle. + ContentBundle string `protobuf:"bytes,7,opt,name=content_bundle,json=contentBundle,proto3" json:"content_bundle,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Signature) Reset() { + *x = Signature{} + mi := &file_agntcy_dir_sign_v1_signature_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Signature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Signature) ProtoMessage() {} + +func (x *Signature) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_sign_v1_signature_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Signature.ProtoReflect.Descriptor instead. +func (*Signature) Descriptor() ([]byte, []int) { + return file_agntcy_dir_sign_v1_signature_proto_rawDescGZIP(), []int{0} +} + +func (x *Signature) GetAnnotations() map[string]string { + if x != nil { + return x.Annotations + } + return nil +} + +func (x *Signature) GetSignedAt() string { + if x != nil { + return x.SignedAt + } + return "" +} + +func (x *Signature) GetAlgorithm() string { + if x != nil { + return x.Algorithm + } + return "" +} + +func (x *Signature) GetSignature() string { + if x != nil { + return x.Signature + } + return "" +} + +func (x *Signature) GetCertificate() string { + if x != nil { + return x.Certificate + } + return "" +} + +func (x *Signature) GetContentType() string { + if x != nil { + return x.ContentType + } + return "" +} + +func (x *Signature) GetContentBundle() string { + if x != nil { + return x.ContentBundle + } + return "" +} + +var File_agntcy_dir_sign_v1_signature_proto protoreflect.FileDescriptor + +var file_agntcy_dir_sign_v1_signature_proto_rawDesc = string([]byte{ + 0x0a, 0x22, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, 0x69, 0x67, + 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, + 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xe2, 0x02, 0x0a, 0x09, 0x53, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x50, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x67, + 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x69, 0x67, + 0x6e, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, + 0x68, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, + 0x74, 0x68, 0x6d, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x1a, 0x3e, 0x0a, + 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0xb6, 0x01, + 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, + 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x21, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, + 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x69, 0x67, 0x6e, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, + 0x41, 0x44, 0x53, 0xaa, 0x02, 0x12, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, + 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x12, 0x41, 0x67, 0x6e, 0x74, 0x63, + 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x53, 0x69, 0x67, 0x6e, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x1e, + 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x53, 0x69, 0x67, 0x6e, 0x5c, + 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, + 0x15, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, 0x53, 0x69, + 0x67, 0x6e, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_agntcy_dir_sign_v1_signature_proto_rawDescOnce sync.Once + file_agntcy_dir_sign_v1_signature_proto_rawDescData []byte +) + +func file_agntcy_dir_sign_v1_signature_proto_rawDescGZIP() []byte { + file_agntcy_dir_sign_v1_signature_proto_rawDescOnce.Do(func() { + file_agntcy_dir_sign_v1_signature_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_sign_v1_signature_proto_rawDesc), len(file_agntcy_dir_sign_v1_signature_proto_rawDesc))) + }) + return file_agntcy_dir_sign_v1_signature_proto_rawDescData +} + +var file_agntcy_dir_sign_v1_signature_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_agntcy_dir_sign_v1_signature_proto_goTypes = []any{ + (*Signature)(nil), // 0: agntcy.dir.sign.v1.Signature + nil, // 1: agntcy.dir.sign.v1.Signature.AnnotationsEntry +} +var file_agntcy_dir_sign_v1_signature_proto_depIdxs = []int32{ + 1, // 0: agntcy.dir.sign.v1.Signature.annotations:type_name -> agntcy.dir.sign.v1.Signature.AnnotationsEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_agntcy_dir_sign_v1_signature_proto_init() } +func file_agntcy_dir_sign_v1_signature_proto_init() { + if File_agntcy_dir_sign_v1_signature_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_sign_v1_signature_proto_rawDesc), len(file_agntcy_dir_sign_v1_signature_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_agntcy_dir_sign_v1_signature_proto_goTypes, + DependencyIndexes: file_agntcy_dir_sign_v1_signature_proto_depIdxs, + MessageInfos: file_agntcy_dir_sign_v1_signature_proto_msgTypes, + }.Build() + File_agntcy_dir_sign_v1_signature_proto = out.File + file_agntcy_dir_sign_v1_signature_proto_goTypes = nil + file_agntcy_dir_sign_v1_signature_proto_depIdxs = nil +} diff --git a/api/store/v1/store_service.pb.go b/api/store/v1/store_service.pb.go index 1e9580eb7..7ba09fd0c 100644 --- a/api/store/v1/store_service.pb.go +++ b/api/store/v1/store_service.pb.go @@ -1,402 +1,402 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.5 -// protoc (unknown) -// source: agntcy/dir/store/v1/store_service.proto - -package v1 - -import ( - v1 "github.com/agntcy/dir/api/core/v1" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// PushReferrerRequest represents a record with optional OCI artifacts for push operations. -type PushReferrerRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Record reference - RecordRef *v1.RecordRef `protobuf:"bytes,1,opt,name=record_ref,json=recordRef,proto3" json:"record_ref,omitempty"` - // RecordReferrer object to be stored for the record - Referrer *v1.RecordReferrer `protobuf:"bytes,2,opt,name=referrer,proto3" json:"referrer,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PushReferrerRequest) Reset() { - *x = PushReferrerRequest{} - mi := &file_agntcy_dir_store_v1_store_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PushReferrerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PushReferrerRequest) ProtoMessage() {} - -func (x *PushReferrerRequest) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_store_v1_store_service_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PushReferrerRequest.ProtoReflect.Descriptor instead. -func (*PushReferrerRequest) Descriptor() ([]byte, []int) { - return file_agntcy_dir_store_v1_store_service_proto_rawDescGZIP(), []int{0} -} - -func (x *PushReferrerRequest) GetRecordRef() *v1.RecordRef { - if x != nil { - return x.RecordRef - } - return nil -} - -func (x *PushReferrerRequest) GetReferrer() *v1.RecordReferrer { - if x != nil { - return x.Referrer - } - return nil -} - -// PushReferrerResponse -type PushReferrerResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // The push process result - Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` - // Optional error message if push failed - ErrorMessage *string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3,oneof" json:"error_message,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PushReferrerResponse) Reset() { - *x = PushReferrerResponse{} - mi := &file_agntcy_dir_store_v1_store_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PushReferrerResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PushReferrerResponse) ProtoMessage() {} - -func (x *PushReferrerResponse) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_store_v1_store_service_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PushReferrerResponse.ProtoReflect.Descriptor instead. -func (*PushReferrerResponse) Descriptor() ([]byte, []int) { - return file_agntcy_dir_store_v1_store_service_proto_rawDescGZIP(), []int{1} -} - -func (x *PushReferrerResponse) GetSuccess() bool { - if x != nil { - return x.Success - } - return false -} - -func (x *PushReferrerResponse) GetErrorMessage() string { - if x != nil && x.ErrorMessage != nil { - return *x.ErrorMessage - } - return "" -} - -// PullReferrerRequest represents a record with optional OCI artifacts for pull operations. -type PullReferrerRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Record reference - RecordRef *v1.RecordRef `protobuf:"bytes,1,opt,name=record_ref,json=recordRef,proto3" json:"record_ref,omitempty"` - // Record referrer type to be pulled - // If not provided, all referrers will be pulled - ReferrerType *string `protobuf:"bytes,2,opt,name=referrer_type,json=referrerType,proto3,oneof" json:"referrer_type,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PullReferrerRequest) Reset() { - *x = PullReferrerRequest{} - mi := &file_agntcy_dir_store_v1_store_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PullReferrerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PullReferrerRequest) ProtoMessage() {} - -func (x *PullReferrerRequest) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_store_v1_store_service_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PullReferrerRequest.ProtoReflect.Descriptor instead. -func (*PullReferrerRequest) Descriptor() ([]byte, []int) { - return file_agntcy_dir_store_v1_store_service_proto_rawDescGZIP(), []int{2} -} - -func (x *PullReferrerRequest) GetRecordRef() *v1.RecordRef { - if x != nil { - return x.RecordRef - } - return nil -} - -func (x *PullReferrerRequest) GetReferrerType() string { - if x != nil && x.ReferrerType != nil { - return *x.ReferrerType - } - return "" -} - -// PullReferrerResponse is returned after successfully fetching a record referrer. -type PullReferrerResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // RecordReferrer object associated with the record - Referrer *v1.RecordReferrer `protobuf:"bytes,1,opt,name=referrer,proto3" json:"referrer,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PullReferrerResponse) Reset() { - *x = PullReferrerResponse{} - mi := &file_agntcy_dir_store_v1_store_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PullReferrerResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PullReferrerResponse) ProtoMessage() {} - -func (x *PullReferrerResponse) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_store_v1_store_service_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PullReferrerResponse.ProtoReflect.Descriptor instead. -func (*PullReferrerResponse) Descriptor() ([]byte, []int) { - return file_agntcy_dir_store_v1_store_service_proto_rawDescGZIP(), []int{3} -} - -func (x *PullReferrerResponse) GetReferrer() *v1.RecordReferrer { - if x != nil { - return x.Referrer - } - return nil -} - -var File_agntcy_dir_store_v1_store_service_proto protoreflect.FileDescriptor - -var file_agntcy_dir_store_v1_store_service_proto_rawDesc = string([]byte{ - 0x0a, 0x27, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x61, 0x67, 0x6e, 0x74, 0x63, - 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, - 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, - 0x76, 0x31, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x93, 0x01, 0x0a, - 0x13, 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x72, - 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, - 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x52, 0x09, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, - 0x65, 0x66, 0x12, 0x3e, 0x0a, 0x08, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, - 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x52, 0x08, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, - 0x65, 0x72, 0x22, 0x6c, 0x0a, 0x14, 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, - 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, - 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x22, 0x8f, 0x01, 0x0a, 0x13, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x6f, - 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, - 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x52, 0x09, 0x72, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x12, 0x28, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, - 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x88, 0x01, 0x01, - 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x22, 0x56, 0x0a, 0x14, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, - 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x72, 0x65, - 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x61, - 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, - 0x52, 0x08, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x32, 0xfe, 0x03, 0x0a, 0x0c, 0x53, - 0x74, 0x6f, 0x72, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x04, 0x50, - 0x75, 0x73, 0x68, 0x12, 0x1a, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x1a, - 0x1d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x28, 0x01, - 0x30, 0x01, 0x12, 0x45, 0x0a, 0x04, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x1d, 0x2e, 0x61, 0x67, 0x6e, - 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x1a, 0x1a, 0x2e, 0x61, 0x67, 0x6e, 0x74, - 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4b, 0x0a, 0x06, 0x4c, 0x6f, 0x6f, - 0x6b, 0x75, 0x70, 0x12, 0x1d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, - 0x65, 0x66, 0x1a, 0x1e, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x4d, 0x65, - 0x74, 0x61, 0x28, 0x01, 0x30, 0x01, 0x12, 0x41, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x12, 0x1d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x1a, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x28, 0x01, 0x12, 0x67, 0x0a, 0x0c, 0x50, 0x75, 0x73, - 0x68, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x12, 0x28, 0x2e, 0x61, 0x67, 0x6e, 0x74, - 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, - 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, - 0x30, 0x01, 0x12, 0x67, 0x0a, 0x0c, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, - 0x65, 0x72, 0x12, 0x28, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x66, - 0x65, 0x72, 0x72, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x61, - 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0xbf, 0x01, 0x0a, 0x17, - 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, - 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, - 0xa2, 0x02, 0x03, 0x41, 0x44, 0x53, 0xaa, 0x02, 0x13, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, - 0x44, 0x69, 0x72, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x41, - 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x5c, - 0x56, 0x31, 0xe2, 0x02, 0x1f, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, - 0x53, 0x74, 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x16, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, - 0x69, 0x72, 0x3a, 0x3a, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_agntcy_dir_store_v1_store_service_proto_rawDescOnce sync.Once - file_agntcy_dir_store_v1_store_service_proto_rawDescData []byte -) - -func file_agntcy_dir_store_v1_store_service_proto_rawDescGZIP() []byte { - file_agntcy_dir_store_v1_store_service_proto_rawDescOnce.Do(func() { - file_agntcy_dir_store_v1_store_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_store_v1_store_service_proto_rawDesc), len(file_agntcy_dir_store_v1_store_service_proto_rawDesc))) - }) - return file_agntcy_dir_store_v1_store_service_proto_rawDescData -} - -var file_agntcy_dir_store_v1_store_service_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_agntcy_dir_store_v1_store_service_proto_goTypes = []any{ - (*PushReferrerRequest)(nil), // 0: agntcy.dir.store.v1.PushReferrerRequest - (*PushReferrerResponse)(nil), // 1: agntcy.dir.store.v1.PushReferrerResponse - (*PullReferrerRequest)(nil), // 2: agntcy.dir.store.v1.PullReferrerRequest - (*PullReferrerResponse)(nil), // 3: agntcy.dir.store.v1.PullReferrerResponse - (*v1.RecordRef)(nil), // 4: agntcy.dir.core.v1.RecordRef - (*v1.RecordReferrer)(nil), // 5: agntcy.dir.core.v1.RecordReferrer - (*v1.Record)(nil), // 6: agntcy.dir.core.v1.Record - (*v1.RecordMeta)(nil), // 7: agntcy.dir.core.v1.RecordMeta - (*emptypb.Empty)(nil), // 8: google.protobuf.Empty -} -var file_agntcy_dir_store_v1_store_service_proto_depIdxs = []int32{ - 4, // 0: agntcy.dir.store.v1.PushReferrerRequest.record_ref:type_name -> agntcy.dir.core.v1.RecordRef - 5, // 1: agntcy.dir.store.v1.PushReferrerRequest.referrer:type_name -> agntcy.dir.core.v1.RecordReferrer - 4, // 2: agntcy.dir.store.v1.PullReferrerRequest.record_ref:type_name -> agntcy.dir.core.v1.RecordRef - 5, // 3: agntcy.dir.store.v1.PullReferrerResponse.referrer:type_name -> agntcy.dir.core.v1.RecordReferrer - 6, // 4: agntcy.dir.store.v1.StoreService.Push:input_type -> agntcy.dir.core.v1.Record - 4, // 5: agntcy.dir.store.v1.StoreService.Pull:input_type -> agntcy.dir.core.v1.RecordRef - 4, // 6: agntcy.dir.store.v1.StoreService.Lookup:input_type -> agntcy.dir.core.v1.RecordRef - 4, // 7: agntcy.dir.store.v1.StoreService.Delete:input_type -> agntcy.dir.core.v1.RecordRef - 0, // 8: agntcy.dir.store.v1.StoreService.PushReferrer:input_type -> agntcy.dir.store.v1.PushReferrerRequest - 2, // 9: agntcy.dir.store.v1.StoreService.PullReferrer:input_type -> agntcy.dir.store.v1.PullReferrerRequest - 4, // 10: agntcy.dir.store.v1.StoreService.Push:output_type -> agntcy.dir.core.v1.RecordRef - 6, // 11: agntcy.dir.store.v1.StoreService.Pull:output_type -> agntcy.dir.core.v1.Record - 7, // 12: agntcy.dir.store.v1.StoreService.Lookup:output_type -> agntcy.dir.core.v1.RecordMeta - 8, // 13: agntcy.dir.store.v1.StoreService.Delete:output_type -> google.protobuf.Empty - 1, // 14: agntcy.dir.store.v1.StoreService.PushReferrer:output_type -> agntcy.dir.store.v1.PushReferrerResponse - 3, // 15: agntcy.dir.store.v1.StoreService.PullReferrer:output_type -> agntcy.dir.store.v1.PullReferrerResponse - 10, // [10:16] is the sub-list for method output_type - 4, // [4:10] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name -} - -func init() { file_agntcy_dir_store_v1_store_service_proto_init() } -func file_agntcy_dir_store_v1_store_service_proto_init() { - if File_agntcy_dir_store_v1_store_service_proto != nil { - return - } - file_agntcy_dir_store_v1_store_service_proto_msgTypes[1].OneofWrappers = []any{} - file_agntcy_dir_store_v1_store_service_proto_msgTypes[2].OneofWrappers = []any{} - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_store_v1_store_service_proto_rawDesc), len(file_agntcy_dir_store_v1_store_service_proto_rawDesc)), - NumEnums: 0, - NumMessages: 4, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_agntcy_dir_store_v1_store_service_proto_goTypes, - DependencyIndexes: file_agntcy_dir_store_v1_store_service_proto_depIdxs, - MessageInfos: file_agntcy_dir_store_v1_store_service_proto_msgTypes, - }.Build() - File_agntcy_dir_store_v1_store_service_proto = out.File - file_agntcy_dir_store_v1_store_service_proto_goTypes = nil - file_agntcy_dir_store_v1_store_service_proto_depIdxs = nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc (unknown) +// source: agntcy/dir/store/v1/store_service.proto + +package v1 + +import ( + v1 "github.com/agntcy/dir/api/core/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// PushReferrerRequest represents a record with optional OCI artifacts for push operations. +type PushReferrerRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Record reference + RecordRef *v1.RecordRef `protobuf:"bytes,1,opt,name=record_ref,json=recordRef,proto3" json:"record_ref,omitempty"` + // RecordReferrer object to be stored for the record + Referrer *v1.RecordReferrer `protobuf:"bytes,2,opt,name=referrer,proto3" json:"referrer,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PushReferrerRequest) Reset() { + *x = PushReferrerRequest{} + mi := &file_agntcy_dir_store_v1_store_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PushReferrerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushReferrerRequest) ProtoMessage() {} + +func (x *PushReferrerRequest) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_store_v1_store_service_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PushReferrerRequest.ProtoReflect.Descriptor instead. +func (*PushReferrerRequest) Descriptor() ([]byte, []int) { + return file_agntcy_dir_store_v1_store_service_proto_rawDescGZIP(), []int{0} +} + +func (x *PushReferrerRequest) GetRecordRef() *v1.RecordRef { + if x != nil { + return x.RecordRef + } + return nil +} + +func (x *PushReferrerRequest) GetReferrer() *v1.RecordReferrer { + if x != nil { + return x.Referrer + } + return nil +} + +// PushReferrerResponse +type PushReferrerResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The push process result + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + // Optional error message if push failed + ErrorMessage *string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3,oneof" json:"error_message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PushReferrerResponse) Reset() { + *x = PushReferrerResponse{} + mi := &file_agntcy_dir_store_v1_store_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PushReferrerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushReferrerResponse) ProtoMessage() {} + +func (x *PushReferrerResponse) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_store_v1_store_service_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PushReferrerResponse.ProtoReflect.Descriptor instead. +func (*PushReferrerResponse) Descriptor() ([]byte, []int) { + return file_agntcy_dir_store_v1_store_service_proto_rawDescGZIP(), []int{1} +} + +func (x *PushReferrerResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *PushReferrerResponse) GetErrorMessage() string { + if x != nil && x.ErrorMessage != nil { + return *x.ErrorMessage + } + return "" +} + +// PullReferrerRequest represents a record with optional OCI artifacts for pull operations. +type PullReferrerRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Record reference + RecordRef *v1.RecordRef `protobuf:"bytes,1,opt,name=record_ref,json=recordRef,proto3" json:"record_ref,omitempty"` + // Record referrer type to be pulled + // If not provided, all referrers will be pulled + ReferrerType *string `protobuf:"bytes,2,opt,name=referrer_type,json=referrerType,proto3,oneof" json:"referrer_type,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PullReferrerRequest) Reset() { + *x = PullReferrerRequest{} + mi := &file_agntcy_dir_store_v1_store_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PullReferrerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PullReferrerRequest) ProtoMessage() {} + +func (x *PullReferrerRequest) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_store_v1_store_service_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PullReferrerRequest.ProtoReflect.Descriptor instead. +func (*PullReferrerRequest) Descriptor() ([]byte, []int) { + return file_agntcy_dir_store_v1_store_service_proto_rawDescGZIP(), []int{2} +} + +func (x *PullReferrerRequest) GetRecordRef() *v1.RecordRef { + if x != nil { + return x.RecordRef + } + return nil +} + +func (x *PullReferrerRequest) GetReferrerType() string { + if x != nil && x.ReferrerType != nil { + return *x.ReferrerType + } + return "" +} + +// PullReferrerResponse is returned after successfully fetching a record referrer. +type PullReferrerResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // RecordReferrer object associated with the record + Referrer *v1.RecordReferrer `protobuf:"bytes,1,opt,name=referrer,proto3" json:"referrer,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PullReferrerResponse) Reset() { + *x = PullReferrerResponse{} + mi := &file_agntcy_dir_store_v1_store_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PullReferrerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PullReferrerResponse) ProtoMessage() {} + +func (x *PullReferrerResponse) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_store_v1_store_service_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PullReferrerResponse.ProtoReflect.Descriptor instead. +func (*PullReferrerResponse) Descriptor() ([]byte, []int) { + return file_agntcy_dir_store_v1_store_service_proto_rawDescGZIP(), []int{3} +} + +func (x *PullReferrerResponse) GetReferrer() *v1.RecordReferrer { + if x != nil { + return x.Referrer + } + return nil +} + +var File_agntcy_dir_store_v1_store_service_proto protoreflect.FileDescriptor + +var file_agntcy_dir_store_v1_store_service_proto_rawDesc = string([]byte{ + 0x0a, 0x27, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x61, 0x67, 0x6e, 0x74, 0x63, + 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, + 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, + 0x76, 0x31, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x93, 0x01, 0x0a, + 0x13, 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x72, + 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, + 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x52, 0x09, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, + 0x65, 0x66, 0x12, 0x3e, 0x0a, 0x08, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, + 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x52, 0x08, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, + 0x65, 0x72, 0x22, 0x6c, 0x0a, 0x14, 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, + 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x22, 0x8f, 0x01, 0x0a, 0x13, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x6f, + 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, + 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x52, 0x09, 0x72, 0x65, 0x63, + 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x12, 0x28, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, + 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x88, 0x01, 0x01, + 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x22, 0x56, 0x0a, 0x14, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x61, + 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, + 0x52, 0x08, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x32, 0xfe, 0x03, 0x0a, 0x0c, 0x53, + 0x74, 0x6f, 0x72, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x04, 0x50, + 0x75, 0x73, 0x68, 0x12, 0x1a, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x1a, + 0x1d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x28, 0x01, + 0x30, 0x01, 0x12, 0x45, 0x0a, 0x04, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x1d, 0x2e, 0x61, 0x67, 0x6e, + 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x1a, 0x1a, 0x2e, 0x61, 0x67, 0x6e, 0x74, + 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x65, 0x63, 0x6f, 0x72, 0x64, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4b, 0x0a, 0x06, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x12, 0x1d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, + 0x65, 0x66, 0x1a, 0x1e, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x4d, 0x65, + 0x74, 0x61, 0x28, 0x01, 0x30, 0x01, 0x12, 0x41, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x12, 0x1d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x66, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x28, 0x01, 0x12, 0x67, 0x0a, 0x0c, 0x50, 0x75, 0x73, + 0x68, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x12, 0x28, 0x2e, 0x61, 0x67, 0x6e, 0x74, + 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, + 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, + 0x30, 0x01, 0x12, 0x67, 0x0a, 0x0c, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, + 0x65, 0x72, 0x12, 0x28, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x72, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x61, + 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0xbf, 0x01, 0x0a, 0x17, + 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, + 0x64, 0x69, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, + 0xa2, 0x02, 0x03, 0x41, 0x44, 0x53, 0xaa, 0x02, 0x13, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, + 0x44, 0x69, 0x72, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x41, + 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x5c, + 0x56, 0x31, 0xe2, 0x02, 0x1f, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, + 0x53, 0x74, 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x16, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, + 0x69, 0x72, 0x3a, 0x3a, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_agntcy_dir_store_v1_store_service_proto_rawDescOnce sync.Once + file_agntcy_dir_store_v1_store_service_proto_rawDescData []byte +) + +func file_agntcy_dir_store_v1_store_service_proto_rawDescGZIP() []byte { + file_agntcy_dir_store_v1_store_service_proto_rawDescOnce.Do(func() { + file_agntcy_dir_store_v1_store_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_store_v1_store_service_proto_rawDesc), len(file_agntcy_dir_store_v1_store_service_proto_rawDesc))) + }) + return file_agntcy_dir_store_v1_store_service_proto_rawDescData +} + +var file_agntcy_dir_store_v1_store_service_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_agntcy_dir_store_v1_store_service_proto_goTypes = []any{ + (*PushReferrerRequest)(nil), // 0: agntcy.dir.store.v1.PushReferrerRequest + (*PushReferrerResponse)(nil), // 1: agntcy.dir.store.v1.PushReferrerResponse + (*PullReferrerRequest)(nil), // 2: agntcy.dir.store.v1.PullReferrerRequest + (*PullReferrerResponse)(nil), // 3: agntcy.dir.store.v1.PullReferrerResponse + (*v1.RecordRef)(nil), // 4: agntcy.dir.core.v1.RecordRef + (*v1.RecordReferrer)(nil), // 5: agntcy.dir.core.v1.RecordReferrer + (*v1.Record)(nil), // 6: agntcy.dir.core.v1.Record + (*v1.RecordMeta)(nil), // 7: agntcy.dir.core.v1.RecordMeta + (*emptypb.Empty)(nil), // 8: google.protobuf.Empty +} +var file_agntcy_dir_store_v1_store_service_proto_depIdxs = []int32{ + 4, // 0: agntcy.dir.store.v1.PushReferrerRequest.record_ref:type_name -> agntcy.dir.core.v1.RecordRef + 5, // 1: agntcy.dir.store.v1.PushReferrerRequest.referrer:type_name -> agntcy.dir.core.v1.RecordReferrer + 4, // 2: agntcy.dir.store.v1.PullReferrerRequest.record_ref:type_name -> agntcy.dir.core.v1.RecordRef + 5, // 3: agntcy.dir.store.v1.PullReferrerResponse.referrer:type_name -> agntcy.dir.core.v1.RecordReferrer + 6, // 4: agntcy.dir.store.v1.StoreService.Push:input_type -> agntcy.dir.core.v1.Record + 4, // 5: agntcy.dir.store.v1.StoreService.Pull:input_type -> agntcy.dir.core.v1.RecordRef + 4, // 6: agntcy.dir.store.v1.StoreService.Lookup:input_type -> agntcy.dir.core.v1.RecordRef + 4, // 7: agntcy.dir.store.v1.StoreService.Delete:input_type -> agntcy.dir.core.v1.RecordRef + 0, // 8: agntcy.dir.store.v1.StoreService.PushReferrer:input_type -> agntcy.dir.store.v1.PushReferrerRequest + 2, // 9: agntcy.dir.store.v1.StoreService.PullReferrer:input_type -> agntcy.dir.store.v1.PullReferrerRequest + 4, // 10: agntcy.dir.store.v1.StoreService.Push:output_type -> agntcy.dir.core.v1.RecordRef + 6, // 11: agntcy.dir.store.v1.StoreService.Pull:output_type -> agntcy.dir.core.v1.Record + 7, // 12: agntcy.dir.store.v1.StoreService.Lookup:output_type -> agntcy.dir.core.v1.RecordMeta + 8, // 13: agntcy.dir.store.v1.StoreService.Delete:output_type -> google.protobuf.Empty + 1, // 14: agntcy.dir.store.v1.StoreService.PushReferrer:output_type -> agntcy.dir.store.v1.PushReferrerResponse + 3, // 15: agntcy.dir.store.v1.StoreService.PullReferrer:output_type -> agntcy.dir.store.v1.PullReferrerResponse + 10, // [10:16] is the sub-list for method output_type + 4, // [4:10] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_agntcy_dir_store_v1_store_service_proto_init() } +func file_agntcy_dir_store_v1_store_service_proto_init() { + if File_agntcy_dir_store_v1_store_service_proto != nil { + return + } + file_agntcy_dir_store_v1_store_service_proto_msgTypes[1].OneofWrappers = []any{} + file_agntcy_dir_store_v1_store_service_proto_msgTypes[2].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_store_v1_store_service_proto_rawDesc), len(file_agntcy_dir_store_v1_store_service_proto_rawDesc)), + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_agntcy_dir_store_v1_store_service_proto_goTypes, + DependencyIndexes: file_agntcy_dir_store_v1_store_service_proto_depIdxs, + MessageInfos: file_agntcy_dir_store_v1_store_service_proto_msgTypes, + }.Build() + File_agntcy_dir_store_v1_store_service_proto = out.File + file_agntcy_dir_store_v1_store_service_proto_goTypes = nil + file_agntcy_dir_store_v1_store_service_proto_depIdxs = nil +} diff --git a/api/store/v1/store_service_grpc.pb.go b/api/store/v1/store_service_grpc.pb.go index 7650a31a0..4bfce2143 100644 --- a/api/store/v1/store_service_grpc.pb.go +++ b/api/store/v1/store_service_grpc.pb.go @@ -1,552 +1,552 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc (unknown) -// source: agntcy/dir/store/v1/store_service.proto - -package v1 - -import ( - context "context" - v1 "github.com/agntcy/dir/api/core/v1" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 - -const ( - StoreService_Push_FullMethodName = "/agntcy.dir.store.v1.StoreService/Push" - StoreService_Pull_FullMethodName = "/agntcy.dir.store.v1.StoreService/Pull" - StoreService_Lookup_FullMethodName = "/agntcy.dir.store.v1.StoreService/Lookup" - StoreService_Delete_FullMethodName = "/agntcy.dir.store.v1.StoreService/Delete" - StoreService_PushReferrer_FullMethodName = "/agntcy.dir.store.v1.StoreService/PushReferrer" - StoreService_PullReferrer_FullMethodName = "/agntcy.dir.store.v1.StoreService/PullReferrer" -) - -// StoreServiceClient is the client API for StoreService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -// -// Defines an interface for content-addressable storage -// service for objects. -// -// Max object size: 4MB (to fully fit in a single request) -// Max metadata size: 100KB -// -// Store service can be implemented by various storage backends, -// such as local file system, OCI registry, etc. -// -// Middleware should be used to control who can perform these RPCs. -// Policies for the middleware can be handled via separate service. -// -// Each operation is performed sequentially, meaning that -// for the N-th request, N-th response will be returned. -// If an error occurs, the stream will be cancelled. -type StoreServiceClient interface { - // Push performs write operation for given records. - Push(ctx context.Context, opts ...grpc.CallOption) (StoreService_PushClient, error) - // Pull performs read operation for given records. - Pull(ctx context.Context, opts ...grpc.CallOption) (StoreService_PullClient, error) - // Lookup resolves basic metadata for the records. - Lookup(ctx context.Context, opts ...grpc.CallOption) (StoreService_LookupClient, error) - // Remove performs delete operation for the records. - Delete(ctx context.Context, opts ...grpc.CallOption) (StoreService_DeleteClient, error) - // PushReferrer performs write operation for record referrers. - PushReferrer(ctx context.Context, opts ...grpc.CallOption) (StoreService_PushReferrerClient, error) - // PullReferrer performs read operation for record referrers. - PullReferrer(ctx context.Context, opts ...grpc.CallOption) (StoreService_PullReferrerClient, error) -} - -type storeServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewStoreServiceClient(cc grpc.ClientConnInterface) StoreServiceClient { - return &storeServiceClient{cc} -} - -func (c *storeServiceClient) Push(ctx context.Context, opts ...grpc.CallOption) (StoreService_PushClient, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &StoreService_ServiceDesc.Streams[0], StoreService_Push_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &storeServicePushClient{ClientStream: stream} - return x, nil -} - -type StoreService_PushClient interface { - Send(*v1.Record) error - Recv() (*v1.RecordRef, error) - grpc.ClientStream -} - -type storeServicePushClient struct { - grpc.ClientStream -} - -func (x *storeServicePushClient) Send(m *v1.Record) error { - return x.ClientStream.SendMsg(m) -} - -func (x *storeServicePushClient) Recv() (*v1.RecordRef, error) { - m := new(v1.RecordRef) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *storeServiceClient) Pull(ctx context.Context, opts ...grpc.CallOption) (StoreService_PullClient, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &StoreService_ServiceDesc.Streams[1], StoreService_Pull_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &storeServicePullClient{ClientStream: stream} - return x, nil -} - -type StoreService_PullClient interface { - Send(*v1.RecordRef) error - Recv() (*v1.Record, error) - grpc.ClientStream -} - -type storeServicePullClient struct { - grpc.ClientStream -} - -func (x *storeServicePullClient) Send(m *v1.RecordRef) error { - return x.ClientStream.SendMsg(m) -} - -func (x *storeServicePullClient) Recv() (*v1.Record, error) { - m := new(v1.Record) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *storeServiceClient) Lookup(ctx context.Context, opts ...grpc.CallOption) (StoreService_LookupClient, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &StoreService_ServiceDesc.Streams[2], StoreService_Lookup_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &storeServiceLookupClient{ClientStream: stream} - return x, nil -} - -type StoreService_LookupClient interface { - Send(*v1.RecordRef) error - Recv() (*v1.RecordMeta, error) - grpc.ClientStream -} - -type storeServiceLookupClient struct { - grpc.ClientStream -} - -func (x *storeServiceLookupClient) Send(m *v1.RecordRef) error { - return x.ClientStream.SendMsg(m) -} - -func (x *storeServiceLookupClient) Recv() (*v1.RecordMeta, error) { - m := new(v1.RecordMeta) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *storeServiceClient) Delete(ctx context.Context, opts ...grpc.CallOption) (StoreService_DeleteClient, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &StoreService_ServiceDesc.Streams[3], StoreService_Delete_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &storeServiceDeleteClient{ClientStream: stream} - return x, nil -} - -type StoreService_DeleteClient interface { - Send(*v1.RecordRef) error - CloseAndRecv() (*emptypb.Empty, error) - grpc.ClientStream -} - -type storeServiceDeleteClient struct { - grpc.ClientStream -} - -func (x *storeServiceDeleteClient) Send(m *v1.RecordRef) error { - return x.ClientStream.SendMsg(m) -} - -func (x *storeServiceDeleteClient) CloseAndRecv() (*emptypb.Empty, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(emptypb.Empty) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *storeServiceClient) PushReferrer(ctx context.Context, opts ...grpc.CallOption) (StoreService_PushReferrerClient, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &StoreService_ServiceDesc.Streams[4], StoreService_PushReferrer_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &storeServicePushReferrerClient{ClientStream: stream} - return x, nil -} - -type StoreService_PushReferrerClient interface { - Send(*PushReferrerRequest) error - Recv() (*PushReferrerResponse, error) - grpc.ClientStream -} - -type storeServicePushReferrerClient struct { - grpc.ClientStream -} - -func (x *storeServicePushReferrerClient) Send(m *PushReferrerRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *storeServicePushReferrerClient) Recv() (*PushReferrerResponse, error) { - m := new(PushReferrerResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *storeServiceClient) PullReferrer(ctx context.Context, opts ...grpc.CallOption) (StoreService_PullReferrerClient, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &StoreService_ServiceDesc.Streams[5], StoreService_PullReferrer_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &storeServicePullReferrerClient{ClientStream: stream} - return x, nil -} - -type StoreService_PullReferrerClient interface { - Send(*PullReferrerRequest) error - Recv() (*PullReferrerResponse, error) - grpc.ClientStream -} - -type storeServicePullReferrerClient struct { - grpc.ClientStream -} - -func (x *storeServicePullReferrerClient) Send(m *PullReferrerRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *storeServicePullReferrerClient) Recv() (*PullReferrerResponse, error) { - m := new(PullReferrerResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// StoreServiceServer is the server API for StoreService service. -// All implementations should embed UnimplementedStoreServiceServer -// for forward compatibility. -// -// Defines an interface for content-addressable storage -// service for objects. -// -// Max object size: 4MB (to fully fit in a single request) -// Max metadata size: 100KB -// -// Store service can be implemented by various storage backends, -// such as local file system, OCI registry, etc. -// -// Middleware should be used to control who can perform these RPCs. -// Policies for the middleware can be handled via separate service. -// -// Each operation is performed sequentially, meaning that -// for the N-th request, N-th response will be returned. -// If an error occurs, the stream will be cancelled. -type StoreServiceServer interface { - // Push performs write operation for given records. - Push(StoreService_PushServer) error - // Pull performs read operation for given records. - Pull(StoreService_PullServer) error - // Lookup resolves basic metadata for the records. - Lookup(StoreService_LookupServer) error - // Remove performs delete operation for the records. - Delete(StoreService_DeleteServer) error - // PushReferrer performs write operation for record referrers. - PushReferrer(StoreService_PushReferrerServer) error - // PullReferrer performs read operation for record referrers. - PullReferrer(StoreService_PullReferrerServer) error -} - -// UnimplementedStoreServiceServer should be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedStoreServiceServer struct{} - -func (UnimplementedStoreServiceServer) Push(StoreService_PushServer) error { - return status.Errorf(codes.Unimplemented, "method Push not implemented") -} -func (UnimplementedStoreServiceServer) Pull(StoreService_PullServer) error { - return status.Errorf(codes.Unimplemented, "method Pull not implemented") -} -func (UnimplementedStoreServiceServer) Lookup(StoreService_LookupServer) error { - return status.Errorf(codes.Unimplemented, "method Lookup not implemented") -} -func (UnimplementedStoreServiceServer) Delete(StoreService_DeleteServer) error { - return status.Errorf(codes.Unimplemented, "method Delete not implemented") -} -func (UnimplementedStoreServiceServer) PushReferrer(StoreService_PushReferrerServer) error { - return status.Errorf(codes.Unimplemented, "method PushReferrer not implemented") -} -func (UnimplementedStoreServiceServer) PullReferrer(StoreService_PullReferrerServer) error { - return status.Errorf(codes.Unimplemented, "method PullReferrer not implemented") -} -func (UnimplementedStoreServiceServer) testEmbeddedByValue() {} - -// UnsafeStoreServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to StoreServiceServer will -// result in compilation errors. -type UnsafeStoreServiceServer interface { - mustEmbedUnimplementedStoreServiceServer() -} - -func RegisterStoreServiceServer(s grpc.ServiceRegistrar, srv StoreServiceServer) { - // If the following call pancis, it indicates UnimplementedStoreServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } - s.RegisterService(&StoreService_ServiceDesc, srv) -} - -func _StoreService_Push_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(StoreServiceServer).Push(&storeServicePushServer{ServerStream: stream}) -} - -type StoreService_PushServer interface { - Send(*v1.RecordRef) error - Recv() (*v1.Record, error) - grpc.ServerStream -} - -type storeServicePushServer struct { - grpc.ServerStream -} - -func (x *storeServicePushServer) Send(m *v1.RecordRef) error { - return x.ServerStream.SendMsg(m) -} - -func (x *storeServicePushServer) Recv() (*v1.Record, error) { - m := new(v1.Record) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _StoreService_Pull_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(StoreServiceServer).Pull(&storeServicePullServer{ServerStream: stream}) -} - -type StoreService_PullServer interface { - Send(*v1.Record) error - Recv() (*v1.RecordRef, error) - grpc.ServerStream -} - -type storeServicePullServer struct { - grpc.ServerStream -} - -func (x *storeServicePullServer) Send(m *v1.Record) error { - return x.ServerStream.SendMsg(m) -} - -func (x *storeServicePullServer) Recv() (*v1.RecordRef, error) { - m := new(v1.RecordRef) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _StoreService_Lookup_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(StoreServiceServer).Lookup(&storeServiceLookupServer{ServerStream: stream}) -} - -type StoreService_LookupServer interface { - Send(*v1.RecordMeta) error - Recv() (*v1.RecordRef, error) - grpc.ServerStream -} - -type storeServiceLookupServer struct { - grpc.ServerStream -} - -func (x *storeServiceLookupServer) Send(m *v1.RecordMeta) error { - return x.ServerStream.SendMsg(m) -} - -func (x *storeServiceLookupServer) Recv() (*v1.RecordRef, error) { - m := new(v1.RecordRef) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _StoreService_Delete_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(StoreServiceServer).Delete(&storeServiceDeleteServer{ServerStream: stream}) -} - -type StoreService_DeleteServer interface { - SendAndClose(*emptypb.Empty) error - Recv() (*v1.RecordRef, error) - grpc.ServerStream -} - -type storeServiceDeleteServer struct { - grpc.ServerStream -} - -func (x *storeServiceDeleteServer) SendAndClose(m *emptypb.Empty) error { - return x.ServerStream.SendMsg(m) -} - -func (x *storeServiceDeleteServer) Recv() (*v1.RecordRef, error) { - m := new(v1.RecordRef) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _StoreService_PushReferrer_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(StoreServiceServer).PushReferrer(&storeServicePushReferrerServer{ServerStream: stream}) -} - -type StoreService_PushReferrerServer interface { - Send(*PushReferrerResponse) error - Recv() (*PushReferrerRequest, error) - grpc.ServerStream -} - -type storeServicePushReferrerServer struct { - grpc.ServerStream -} - -func (x *storeServicePushReferrerServer) Send(m *PushReferrerResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *storeServicePushReferrerServer) Recv() (*PushReferrerRequest, error) { - m := new(PushReferrerRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _StoreService_PullReferrer_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(StoreServiceServer).PullReferrer(&storeServicePullReferrerServer{ServerStream: stream}) -} - -type StoreService_PullReferrerServer interface { - Send(*PullReferrerResponse) error - Recv() (*PullReferrerRequest, error) - grpc.ServerStream -} - -type storeServicePullReferrerServer struct { - grpc.ServerStream -} - -func (x *storeServicePullReferrerServer) Send(m *PullReferrerResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *storeServicePullReferrerServer) Recv() (*PullReferrerRequest, error) { - m := new(PullReferrerRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// StoreService_ServiceDesc is the grpc.ServiceDesc for StoreService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var StoreService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "agntcy.dir.store.v1.StoreService", - HandlerType: (*StoreServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "Push", - Handler: _StoreService_Push_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "Pull", - Handler: _StoreService_Pull_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "Lookup", - Handler: _StoreService_Lookup_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "Delete", - Handler: _StoreService_Delete_Handler, - ClientStreams: true, - }, - { - StreamName: "PushReferrer", - Handler: _StoreService_PushReferrer_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "PullReferrer", - Handler: _StoreService_PullReferrer_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "agntcy/dir/store/v1/store_service.proto", -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc (unknown) +// source: agntcy/dir/store/v1/store_service.proto + +package v1 + +import ( + context "context" + v1 "github.com/agntcy/dir/api/core/v1" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 + +const ( + StoreService_Push_FullMethodName = "/agntcy.dir.store.v1.StoreService/Push" + StoreService_Pull_FullMethodName = "/agntcy.dir.store.v1.StoreService/Pull" + StoreService_Lookup_FullMethodName = "/agntcy.dir.store.v1.StoreService/Lookup" + StoreService_Delete_FullMethodName = "/agntcy.dir.store.v1.StoreService/Delete" + StoreService_PushReferrer_FullMethodName = "/agntcy.dir.store.v1.StoreService/PushReferrer" + StoreService_PullReferrer_FullMethodName = "/agntcy.dir.store.v1.StoreService/PullReferrer" +) + +// StoreServiceClient is the client API for StoreService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// Defines an interface for content-addressable storage +// service for objects. +// +// Max object size: 4MB (to fully fit in a single request) +// Max metadata size: 100KB +// +// Store service can be implemented by various storage backends, +// such as local file system, OCI registry, etc. +// +// Middleware should be used to control who can perform these RPCs. +// Policies for the middleware can be handled via separate service. +// +// Each operation is performed sequentially, meaning that +// for the N-th request, N-th response will be returned. +// If an error occurs, the stream will be cancelled. +type StoreServiceClient interface { + // Push performs write operation for given records. + Push(ctx context.Context, opts ...grpc.CallOption) (StoreService_PushClient, error) + // Pull performs read operation for given records. + Pull(ctx context.Context, opts ...grpc.CallOption) (StoreService_PullClient, error) + // Lookup resolves basic metadata for the records. + Lookup(ctx context.Context, opts ...grpc.CallOption) (StoreService_LookupClient, error) + // Remove performs delete operation for the records. + Delete(ctx context.Context, opts ...grpc.CallOption) (StoreService_DeleteClient, error) + // PushReferrer performs write operation for record referrers. + PushReferrer(ctx context.Context, opts ...grpc.CallOption) (StoreService_PushReferrerClient, error) + // PullReferrer performs read operation for record referrers. + PullReferrer(ctx context.Context, opts ...grpc.CallOption) (StoreService_PullReferrerClient, error) +} + +type storeServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewStoreServiceClient(cc grpc.ClientConnInterface) StoreServiceClient { + return &storeServiceClient{cc} +} + +func (c *storeServiceClient) Push(ctx context.Context, opts ...grpc.CallOption) (StoreService_PushClient, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &StoreService_ServiceDesc.Streams[0], StoreService_Push_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &storeServicePushClient{ClientStream: stream} + return x, nil +} + +type StoreService_PushClient interface { + Send(*v1.Record) error + Recv() (*v1.RecordRef, error) + grpc.ClientStream +} + +type storeServicePushClient struct { + grpc.ClientStream +} + +func (x *storeServicePushClient) Send(m *v1.Record) error { + return x.ClientStream.SendMsg(m) +} + +func (x *storeServicePushClient) Recv() (*v1.RecordRef, error) { + m := new(v1.RecordRef) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *storeServiceClient) Pull(ctx context.Context, opts ...grpc.CallOption) (StoreService_PullClient, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &StoreService_ServiceDesc.Streams[1], StoreService_Pull_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &storeServicePullClient{ClientStream: stream} + return x, nil +} + +type StoreService_PullClient interface { + Send(*v1.RecordRef) error + Recv() (*v1.Record, error) + grpc.ClientStream +} + +type storeServicePullClient struct { + grpc.ClientStream +} + +func (x *storeServicePullClient) Send(m *v1.RecordRef) error { + return x.ClientStream.SendMsg(m) +} + +func (x *storeServicePullClient) Recv() (*v1.Record, error) { + m := new(v1.Record) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *storeServiceClient) Lookup(ctx context.Context, opts ...grpc.CallOption) (StoreService_LookupClient, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &StoreService_ServiceDesc.Streams[2], StoreService_Lookup_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &storeServiceLookupClient{ClientStream: stream} + return x, nil +} + +type StoreService_LookupClient interface { + Send(*v1.RecordRef) error + Recv() (*v1.RecordMeta, error) + grpc.ClientStream +} + +type storeServiceLookupClient struct { + grpc.ClientStream +} + +func (x *storeServiceLookupClient) Send(m *v1.RecordRef) error { + return x.ClientStream.SendMsg(m) +} + +func (x *storeServiceLookupClient) Recv() (*v1.RecordMeta, error) { + m := new(v1.RecordMeta) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *storeServiceClient) Delete(ctx context.Context, opts ...grpc.CallOption) (StoreService_DeleteClient, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &StoreService_ServiceDesc.Streams[3], StoreService_Delete_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &storeServiceDeleteClient{ClientStream: stream} + return x, nil +} + +type StoreService_DeleteClient interface { + Send(*v1.RecordRef) error + CloseAndRecv() (*emptypb.Empty, error) + grpc.ClientStream +} + +type storeServiceDeleteClient struct { + grpc.ClientStream +} + +func (x *storeServiceDeleteClient) Send(m *v1.RecordRef) error { + return x.ClientStream.SendMsg(m) +} + +func (x *storeServiceDeleteClient) CloseAndRecv() (*emptypb.Empty, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(emptypb.Empty) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *storeServiceClient) PushReferrer(ctx context.Context, opts ...grpc.CallOption) (StoreService_PushReferrerClient, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &StoreService_ServiceDesc.Streams[4], StoreService_PushReferrer_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &storeServicePushReferrerClient{ClientStream: stream} + return x, nil +} + +type StoreService_PushReferrerClient interface { + Send(*PushReferrerRequest) error + Recv() (*PushReferrerResponse, error) + grpc.ClientStream +} + +type storeServicePushReferrerClient struct { + grpc.ClientStream +} + +func (x *storeServicePushReferrerClient) Send(m *PushReferrerRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *storeServicePushReferrerClient) Recv() (*PushReferrerResponse, error) { + m := new(PushReferrerResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *storeServiceClient) PullReferrer(ctx context.Context, opts ...grpc.CallOption) (StoreService_PullReferrerClient, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &StoreService_ServiceDesc.Streams[5], StoreService_PullReferrer_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &storeServicePullReferrerClient{ClientStream: stream} + return x, nil +} + +type StoreService_PullReferrerClient interface { + Send(*PullReferrerRequest) error + Recv() (*PullReferrerResponse, error) + grpc.ClientStream +} + +type storeServicePullReferrerClient struct { + grpc.ClientStream +} + +func (x *storeServicePullReferrerClient) Send(m *PullReferrerRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *storeServicePullReferrerClient) Recv() (*PullReferrerResponse, error) { + m := new(PullReferrerResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// StoreServiceServer is the server API for StoreService service. +// All implementations should embed UnimplementedStoreServiceServer +// for forward compatibility. +// +// Defines an interface for content-addressable storage +// service for objects. +// +// Max object size: 4MB (to fully fit in a single request) +// Max metadata size: 100KB +// +// Store service can be implemented by various storage backends, +// such as local file system, OCI registry, etc. +// +// Middleware should be used to control who can perform these RPCs. +// Policies for the middleware can be handled via separate service. +// +// Each operation is performed sequentially, meaning that +// for the N-th request, N-th response will be returned. +// If an error occurs, the stream will be cancelled. +type StoreServiceServer interface { + // Push performs write operation for given records. + Push(StoreService_PushServer) error + // Pull performs read operation for given records. + Pull(StoreService_PullServer) error + // Lookup resolves basic metadata for the records. + Lookup(StoreService_LookupServer) error + // Remove performs delete operation for the records. + Delete(StoreService_DeleteServer) error + // PushReferrer performs write operation for record referrers. + PushReferrer(StoreService_PushReferrerServer) error + // PullReferrer performs read operation for record referrers. + PullReferrer(StoreService_PullReferrerServer) error +} + +// UnimplementedStoreServiceServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedStoreServiceServer struct{} + +func (UnimplementedStoreServiceServer) Push(StoreService_PushServer) error { + return status.Errorf(codes.Unimplemented, "method Push not implemented") +} +func (UnimplementedStoreServiceServer) Pull(StoreService_PullServer) error { + return status.Errorf(codes.Unimplemented, "method Pull not implemented") +} +func (UnimplementedStoreServiceServer) Lookup(StoreService_LookupServer) error { + return status.Errorf(codes.Unimplemented, "method Lookup not implemented") +} +func (UnimplementedStoreServiceServer) Delete(StoreService_DeleteServer) error { + return status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedStoreServiceServer) PushReferrer(StoreService_PushReferrerServer) error { + return status.Errorf(codes.Unimplemented, "method PushReferrer not implemented") +} +func (UnimplementedStoreServiceServer) PullReferrer(StoreService_PullReferrerServer) error { + return status.Errorf(codes.Unimplemented, "method PullReferrer not implemented") +} +func (UnimplementedStoreServiceServer) testEmbeddedByValue() {} + +// UnsafeStoreServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to StoreServiceServer will +// result in compilation errors. +type UnsafeStoreServiceServer interface { + mustEmbedUnimplementedStoreServiceServer() +} + +func RegisterStoreServiceServer(s grpc.ServiceRegistrar, srv StoreServiceServer) { + // If the following call pancis, it indicates UnimplementedStoreServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&StoreService_ServiceDesc, srv) +} + +func _StoreService_Push_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(StoreServiceServer).Push(&storeServicePushServer{ServerStream: stream}) +} + +type StoreService_PushServer interface { + Send(*v1.RecordRef) error + Recv() (*v1.Record, error) + grpc.ServerStream +} + +type storeServicePushServer struct { + grpc.ServerStream +} + +func (x *storeServicePushServer) Send(m *v1.RecordRef) error { + return x.ServerStream.SendMsg(m) +} + +func (x *storeServicePushServer) Recv() (*v1.Record, error) { + m := new(v1.Record) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _StoreService_Pull_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(StoreServiceServer).Pull(&storeServicePullServer{ServerStream: stream}) +} + +type StoreService_PullServer interface { + Send(*v1.Record) error + Recv() (*v1.RecordRef, error) + grpc.ServerStream +} + +type storeServicePullServer struct { + grpc.ServerStream +} + +func (x *storeServicePullServer) Send(m *v1.Record) error { + return x.ServerStream.SendMsg(m) +} + +func (x *storeServicePullServer) Recv() (*v1.RecordRef, error) { + m := new(v1.RecordRef) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _StoreService_Lookup_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(StoreServiceServer).Lookup(&storeServiceLookupServer{ServerStream: stream}) +} + +type StoreService_LookupServer interface { + Send(*v1.RecordMeta) error + Recv() (*v1.RecordRef, error) + grpc.ServerStream +} + +type storeServiceLookupServer struct { + grpc.ServerStream +} + +func (x *storeServiceLookupServer) Send(m *v1.RecordMeta) error { + return x.ServerStream.SendMsg(m) +} + +func (x *storeServiceLookupServer) Recv() (*v1.RecordRef, error) { + m := new(v1.RecordRef) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _StoreService_Delete_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(StoreServiceServer).Delete(&storeServiceDeleteServer{ServerStream: stream}) +} + +type StoreService_DeleteServer interface { + SendAndClose(*emptypb.Empty) error + Recv() (*v1.RecordRef, error) + grpc.ServerStream +} + +type storeServiceDeleteServer struct { + grpc.ServerStream +} + +func (x *storeServiceDeleteServer) SendAndClose(m *emptypb.Empty) error { + return x.ServerStream.SendMsg(m) +} + +func (x *storeServiceDeleteServer) Recv() (*v1.RecordRef, error) { + m := new(v1.RecordRef) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _StoreService_PushReferrer_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(StoreServiceServer).PushReferrer(&storeServicePushReferrerServer{ServerStream: stream}) +} + +type StoreService_PushReferrerServer interface { + Send(*PushReferrerResponse) error + Recv() (*PushReferrerRequest, error) + grpc.ServerStream +} + +type storeServicePushReferrerServer struct { + grpc.ServerStream +} + +func (x *storeServicePushReferrerServer) Send(m *PushReferrerResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *storeServicePushReferrerServer) Recv() (*PushReferrerRequest, error) { + m := new(PushReferrerRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _StoreService_PullReferrer_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(StoreServiceServer).PullReferrer(&storeServicePullReferrerServer{ServerStream: stream}) +} + +type StoreService_PullReferrerServer interface { + Send(*PullReferrerResponse) error + Recv() (*PullReferrerRequest, error) + grpc.ServerStream +} + +type storeServicePullReferrerServer struct { + grpc.ServerStream +} + +func (x *storeServicePullReferrerServer) Send(m *PullReferrerResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *storeServicePullReferrerServer) Recv() (*PullReferrerRequest, error) { + m := new(PullReferrerRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// StoreService_ServiceDesc is the grpc.ServiceDesc for StoreService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var StoreService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "agntcy.dir.store.v1.StoreService", + HandlerType: (*StoreServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Push", + Handler: _StoreService_Push_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "Pull", + Handler: _StoreService_Pull_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "Lookup", + Handler: _StoreService_Lookup_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "Delete", + Handler: _StoreService_Delete_Handler, + ClientStreams: true, + }, + { + StreamName: "PushReferrer", + Handler: _StoreService_PushReferrer_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "PullReferrer", + Handler: _StoreService_PullReferrer_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "agntcy/dir/store/v1/store_service.proto", +} diff --git a/api/store/v1/sync_service.pb.go b/api/store/v1/sync_service.pb.go index cbcb31c15..821c3497e 100644 --- a/api/store/v1/sync_service.pb.go +++ b/api/store/v1/sync_service.pb.go @@ -1,942 +1,942 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.5 -// protoc (unknown) -// source: agntcy/dir/store/v1/sync_service.proto - -package v1 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// SyncStatus enumeration defines the possible states of a synchronization operation. -type SyncStatus int32 - -const ( - // Default/unset status - should not be used in practice - SyncStatus_SYNC_STATUS_UNSPECIFIED SyncStatus = 0 - // Sync operation has been created but not yet started - SyncStatus_SYNC_STATUS_PENDING SyncStatus = 1 - // Sync operation is actively discovering and transferring objects - SyncStatus_SYNC_STATUS_IN_PROGRESS SyncStatus = 2 - // Sync operation encountered an error and stopped - SyncStatus_SYNC_STATUS_FAILED SyncStatus = 3 - // Sync operation has been marked for deletion but cleanup not yet started - SyncStatus_SYNC_STATUS_DELETE_PENDING SyncStatus = 4 - // Sync operation has been successfully deleted and cleaned up - SyncStatus_SYNC_STATUS_DELETED SyncStatus = 5 -) - -// Enum value maps for SyncStatus. -var ( - SyncStatus_name = map[int32]string{ - 0: "SYNC_STATUS_UNSPECIFIED", - 1: "SYNC_STATUS_PENDING", - 2: "SYNC_STATUS_IN_PROGRESS", - 3: "SYNC_STATUS_FAILED", - 4: "SYNC_STATUS_DELETE_PENDING", - 5: "SYNC_STATUS_DELETED", - } - SyncStatus_value = map[string]int32{ - "SYNC_STATUS_UNSPECIFIED": 0, - "SYNC_STATUS_PENDING": 1, - "SYNC_STATUS_IN_PROGRESS": 2, - "SYNC_STATUS_FAILED": 3, - "SYNC_STATUS_DELETE_PENDING": 4, - "SYNC_STATUS_DELETED": 5, - } -) - -func (x SyncStatus) Enum() *SyncStatus { - p := new(SyncStatus) - *p = x - return p -} - -func (x SyncStatus) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (SyncStatus) Descriptor() protoreflect.EnumDescriptor { - return file_agntcy_dir_store_v1_sync_service_proto_enumTypes[0].Descriptor() -} - -func (SyncStatus) Type() protoreflect.EnumType { - return &file_agntcy_dir_store_v1_sync_service_proto_enumTypes[0] -} - -func (x SyncStatus) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use SyncStatus.Descriptor instead. -func (SyncStatus) EnumDescriptor() ([]byte, []int) { - return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{0} -} - -// CreateSyncRequest defines the parameters for creating a new synchronization operation. -// -// Currently supports basic synchronization of all objects from a remote Directory. -// Future versions may include additional options for filtering and scheduling capabilities. -type CreateSyncRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // URL of the remote Registry to synchronize from. - // - // This should be a complete URL including protocol and port if non-standard. - // Examples: - // - "https://directory.example.com" - // - "http://localhost:8080" - // - "https://directory.example.com:9443" - RemoteDirectoryUrl string `protobuf:"bytes,1,opt,name=remote_directory_url,json=remoteDirectoryUrl,proto3" json:"remote_directory_url,omitempty"` - // List of CIDs to synchronize from the remote Directory. - // If empty, all objects will be synchronized. - Cids []string `protobuf:"bytes,2,rep,name=cids,proto3" json:"cids,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CreateSyncRequest) Reset() { - *x = CreateSyncRequest{} - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CreateSyncRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateSyncRequest) ProtoMessage() {} - -func (x *CreateSyncRequest) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateSyncRequest.ProtoReflect.Descriptor instead. -func (*CreateSyncRequest) Descriptor() ([]byte, []int) { - return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{0} -} - -func (x *CreateSyncRequest) GetRemoteDirectoryUrl() string { - if x != nil { - return x.RemoteDirectoryUrl - } - return "" -} - -func (x *CreateSyncRequest) GetCids() []string { - if x != nil { - return x.Cids - } - return nil -} - -// CreateSyncResponse contains the result of creating a new synchronization operation. -type CreateSyncResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Unique identifier for the created synchronization operation. - // This ID can be used with other SyncService RPCs to monitor and manage the sync. - SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CreateSyncResponse) Reset() { - *x = CreateSyncResponse{} - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CreateSyncResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateSyncResponse) ProtoMessage() {} - -func (x *CreateSyncResponse) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateSyncResponse.ProtoReflect.Descriptor instead. -func (*CreateSyncResponse) Descriptor() ([]byte, []int) { - return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{1} -} - -func (x *CreateSyncResponse) GetSyncId() string { - if x != nil { - return x.SyncId - } - return "" -} - -// ListSyncsRequest specifies parameters for listing synchronization operations. -type ListSyncsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Optional limit on the number of results to return. - Limit *uint32 `protobuf:"varint,2,opt,name=limit,proto3,oneof" json:"limit,omitempty"` - // Optional offset for pagination of results. - Offset *uint32 `protobuf:"varint,3,opt,name=offset,proto3,oneof" json:"offset,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListSyncsRequest) Reset() { - *x = ListSyncsRequest{} - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListSyncsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListSyncsRequest) ProtoMessage() {} - -func (x *ListSyncsRequest) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListSyncsRequest.ProtoReflect.Descriptor instead. -func (*ListSyncsRequest) Descriptor() ([]byte, []int) { - return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{2} -} - -func (x *ListSyncsRequest) GetLimit() uint32 { - if x != nil && x.Limit != nil { - return *x.Limit - } - return 0 -} - -func (x *ListSyncsRequest) GetOffset() uint32 { - if x != nil && x.Offset != nil { - return *x.Offset - } - return 0 -} - -// ListSyncItem represents a single synchronization in the list of all syncs. -type ListSyncsItem struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Unique identifier of the synchronization operation. - SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` - // Current status of the synchronization operation. - Status SyncStatus `protobuf:"varint,2,opt,name=status,proto3,enum=agntcy.dir.store.v1.SyncStatus" json:"status,omitempty"` - // URL of the remote Directory being synchronized from. - RemoteDirectoryUrl string `protobuf:"bytes,3,opt,name=remote_directory_url,json=remoteDirectoryUrl,proto3" json:"remote_directory_url,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListSyncsItem) Reset() { - *x = ListSyncsItem{} - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListSyncsItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListSyncsItem) ProtoMessage() {} - -func (x *ListSyncsItem) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListSyncsItem.ProtoReflect.Descriptor instead. -func (*ListSyncsItem) Descriptor() ([]byte, []int) { - return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{3} -} - -func (x *ListSyncsItem) GetSyncId() string { - if x != nil { - return x.SyncId - } - return "" -} - -func (x *ListSyncsItem) GetStatus() SyncStatus { - if x != nil { - return x.Status - } - return SyncStatus_SYNC_STATUS_UNSPECIFIED -} - -func (x *ListSyncsItem) GetRemoteDirectoryUrl() string { - if x != nil { - return x.RemoteDirectoryUrl - } - return "" -} - -// GetSyncRequest specifies which synchronization status to retrieve. -type GetSyncRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Unique identifier of the synchronization operation to query. - SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetSyncRequest) Reset() { - *x = GetSyncRequest{} - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetSyncRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetSyncRequest) ProtoMessage() {} - -func (x *GetSyncRequest) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetSyncRequest.ProtoReflect.Descriptor instead. -func (*GetSyncRequest) Descriptor() ([]byte, []int) { - return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{4} -} - -func (x *GetSyncRequest) GetSyncId() string { - if x != nil { - return x.SyncId - } - return "" -} - -// GetSyncResponse provides detailed information about a specific synchronization operation. -type GetSyncResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Unique identifier of the synchronization operation. - SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` - // Current status of the synchronization operation. - Status SyncStatus `protobuf:"varint,2,opt,name=status,proto3,enum=agntcy.dir.store.v1.SyncStatus" json:"status,omitempty"` - // URL of the remote Directory node being synchronized from. - RemoteDirectoryUrl string `protobuf:"bytes,3,opt,name=remote_directory_url,json=remoteDirectoryUrl,proto3" json:"remote_directory_url,omitempty"` - // Timestamp when the synchronization operation was created in the RFC3339 format. - // Specs: https://www.rfc-editor.org/rfc/rfc3339.html - CreatedTime string `protobuf:"bytes,4,opt,name=created_time,json=createdTime,proto3" json:"created_time,omitempty"` - // Timestamp of the most recent status update for this synchronization in the RFC3339 format. - LastUpdateTime string `protobuf:"bytes,5,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetSyncResponse) Reset() { - *x = GetSyncResponse{} - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetSyncResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetSyncResponse) ProtoMessage() {} - -func (x *GetSyncResponse) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetSyncResponse.ProtoReflect.Descriptor instead. -func (*GetSyncResponse) Descriptor() ([]byte, []int) { - return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{5} -} - -func (x *GetSyncResponse) GetSyncId() string { - if x != nil { - return x.SyncId - } - return "" -} - -func (x *GetSyncResponse) GetStatus() SyncStatus { - if x != nil { - return x.Status - } - return SyncStatus_SYNC_STATUS_UNSPECIFIED -} - -func (x *GetSyncResponse) GetRemoteDirectoryUrl() string { - if x != nil { - return x.RemoteDirectoryUrl - } - return "" -} - -func (x *GetSyncResponse) GetCreatedTime() string { - if x != nil { - return x.CreatedTime - } - return "" -} - -func (x *GetSyncResponse) GetLastUpdateTime() string { - if x != nil { - return x.LastUpdateTime - } - return "" -} - -// DeleteSyncRequest specifies which synchronization to delete. -type DeleteSyncRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Unique identifier of the synchronization operation to delete. - SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *DeleteSyncRequest) Reset() { - *x = DeleteSyncRequest{} - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DeleteSyncRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteSyncRequest) ProtoMessage() {} - -func (x *DeleteSyncRequest) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[6] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteSyncRequest.ProtoReflect.Descriptor instead. -func (*DeleteSyncRequest) Descriptor() ([]byte, []int) { - return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{6} -} - -func (x *DeleteSyncRequest) GetSyncId() string { - if x != nil { - return x.SyncId - } - return "" -} - -// DeleteSyncResponse -type DeleteSyncResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *DeleteSyncResponse) Reset() { - *x = DeleteSyncResponse{} - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DeleteSyncResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteSyncResponse) ProtoMessage() {} - -func (x *DeleteSyncResponse) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteSyncResponse.ProtoReflect.Descriptor instead. -func (*DeleteSyncResponse) Descriptor() ([]byte, []int) { - return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{7} -} - -type RequestRegistryCredentialsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Identity of the requesting node - // For example: spiffe://example.org/service/foo - RequestingNodeId string `protobuf:"bytes,1,opt,name=requesting_node_id,json=requestingNodeId,proto3" json:"requesting_node_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RequestRegistryCredentialsRequest) Reset() { - *x = RequestRegistryCredentialsRequest{} - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RequestRegistryCredentialsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RequestRegistryCredentialsRequest) ProtoMessage() {} - -func (x *RequestRegistryCredentialsRequest) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[8] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RequestRegistryCredentialsRequest.ProtoReflect.Descriptor instead. -func (*RequestRegistryCredentialsRequest) Descriptor() ([]byte, []int) { - return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{8} -} - -func (x *RequestRegistryCredentialsRequest) GetRequestingNodeId() string { - if x != nil { - return x.RequestingNodeId - } - return "" -} - -type RequestRegistryCredentialsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Success status of the credential negotiation - Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` - // Error message if negotiation failed - ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` - // URL of the remote Registry being synchronized from. - RemoteRegistryUrl string `protobuf:"bytes,3,opt,name=remote_registry_url,json=remoteRegistryUrl,proto3" json:"remote_registry_url,omitempty"` - // Registry credentials (oneof based on credential type) - // - // Types that are valid to be assigned to Credentials: - // - // *RequestRegistryCredentialsResponse_BasicAuth - Credentials isRequestRegistryCredentialsResponse_Credentials `protobuf_oneof:"credentials"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RequestRegistryCredentialsResponse) Reset() { - *x = RequestRegistryCredentialsResponse{} - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *RequestRegistryCredentialsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RequestRegistryCredentialsResponse) ProtoMessage() {} - -func (x *RequestRegistryCredentialsResponse) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[9] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RequestRegistryCredentialsResponse.ProtoReflect.Descriptor instead. -func (*RequestRegistryCredentialsResponse) Descriptor() ([]byte, []int) { - return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{9} -} - -func (x *RequestRegistryCredentialsResponse) GetSuccess() bool { - if x != nil { - return x.Success - } - return false -} - -func (x *RequestRegistryCredentialsResponse) GetErrorMessage() string { - if x != nil { - return x.ErrorMessage - } - return "" -} - -func (x *RequestRegistryCredentialsResponse) GetRemoteRegistryUrl() string { - if x != nil { - return x.RemoteRegistryUrl - } - return "" -} - -func (x *RequestRegistryCredentialsResponse) GetCredentials() isRequestRegistryCredentialsResponse_Credentials { - if x != nil { - return x.Credentials - } - return nil -} - -func (x *RequestRegistryCredentialsResponse) GetBasicAuth() *BasicAuthCredentials { - if x != nil { - if x, ok := x.Credentials.(*RequestRegistryCredentialsResponse_BasicAuth); ok { - return x.BasicAuth - } - } - return nil -} - -type isRequestRegistryCredentialsResponse_Credentials interface { - isRequestRegistryCredentialsResponse_Credentials() -} - -type RequestRegistryCredentialsResponse_BasicAuth struct { - BasicAuth *BasicAuthCredentials `protobuf:"bytes,4,opt,name=basic_auth,json=basicAuth,proto3,oneof"` // CertificateCredentials certificate = 5; -} - -func (*RequestRegistryCredentialsResponse_BasicAuth) isRequestRegistryCredentialsResponse_Credentials() { -} - -// Supporting credential type definitions -type BasicAuthCredentials struct { - state protoimpl.MessageState `protogen:"open.v1"` - Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *BasicAuthCredentials) Reset() { - *x = BasicAuthCredentials{} - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *BasicAuthCredentials) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BasicAuthCredentials) ProtoMessage() {} - -func (x *BasicAuthCredentials) ProtoReflect() protoreflect.Message { - mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[10] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BasicAuthCredentials.ProtoReflect.Descriptor instead. -func (*BasicAuthCredentials) Descriptor() ([]byte, []int) { - return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{10} -} - -func (x *BasicAuthCredentials) GetUsername() string { - if x != nil { - return x.Username - } - return "" -} - -func (x *BasicAuthCredentials) GetPassword() string { - if x != nil { - return x.Password - } - return "" -} - -var File_agntcy_dir_store_v1_sync_service_proto protoreflect.FileDescriptor - -var file_agntcy_dir_store_v1_sync_service_proto_rawDesc = string([]byte{ - 0x0a, 0x26, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, - 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x22, 0x59, 0x0a, - 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x30, 0x0a, 0x14, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x64, 0x69, 0x72, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x79, 0x55, 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x04, 0x63, 0x69, 0x64, 0x73, 0x22, 0x2d, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, - 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x53, - 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x05, 0x6c, - 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x05, 0x6c, 0x69, - 0x6d, 0x69, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x01, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x09, 0x0a, - 0x07, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x22, 0x93, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, - 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x79, - 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, - 0x63, 0x49, 0x64, 0x12, 0x37, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x30, 0x0a, 0x14, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, - 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x55, 0x72, 0x6c, 0x22, 0x29, - 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x17, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x22, 0xe2, 0x01, 0x0a, 0x0f, 0x47, 0x65, - 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, 0x0a, - 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x12, 0x37, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, - 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, - 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x30, 0x0a, 0x14, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x79, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x72, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x55, 0x72, - 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x54, 0x69, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, - 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x2c, - 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x22, 0x14, 0x0a, 0x12, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x51, 0x0a, 0x21, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x72, 0x79, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4e, - 0x6f, 0x64, 0x65, 0x49, 0x64, 0x22, 0xee, 0x01, 0x0a, 0x22, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, - 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x72, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5f, 0x75, - 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x55, 0x72, 0x6c, 0x12, 0x4a, 0x0a, 0x0a, 0x62, - 0x61, 0x73, 0x69, 0x63, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x29, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x43, - 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x09, 0x62, 0x61, - 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x22, 0x4e, 0x0a, 0x14, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, - 0x75, 0x74, 0x68, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x1a, - 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, - 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, - 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x2a, 0xb0, 0x01, 0x0a, 0x0a, 0x53, 0x79, 0x6e, 0x63, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x54, - 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, - 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x1b, 0x0a, 0x17, 0x53, - 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x49, 0x4e, 0x5f, 0x50, 0x52, - 0x4f, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x59, 0x4e, 0x43, - 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, - 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, - 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x04, - 0x12, 0x17, 0x0a, 0x13, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, - 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x05, 0x32, 0x8b, 0x04, 0x0a, 0x0b, 0x53, 0x79, - 0x6e, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5d, 0x0a, 0x0a, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x26, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, - 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x27, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x79, 0x6e, 0x63, 0x73, 0x12, 0x25, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, - 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x61, - 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x49, 0x74, 0x65, 0x6d, - 0x30, 0x01, 0x12, 0x54, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x23, 0x2e, - 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x26, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, - 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, - 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8d, 0x01, 0x0a, 0x1a, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x43, 0x72, 0x65, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x36, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, - 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x43, 0x72, 0x65, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, - 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x72, 0x79, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0xbe, 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, - 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x31, 0x42, 0x10, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, - 0x53, 0xaa, 0x02, 0x13, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x53, - 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, - 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x1f, - 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x53, 0x74, 0x6f, 0x72, 0x65, - 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, - 0x02, 0x16, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, 0x53, - 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_agntcy_dir_store_v1_sync_service_proto_rawDescOnce sync.Once - file_agntcy_dir_store_v1_sync_service_proto_rawDescData []byte -) - -func file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP() []byte { - file_agntcy_dir_store_v1_sync_service_proto_rawDescOnce.Do(func() { - file_agntcy_dir_store_v1_sync_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_store_v1_sync_service_proto_rawDesc), len(file_agntcy_dir_store_v1_sync_service_proto_rawDesc))) - }) - return file_agntcy_dir_store_v1_sync_service_proto_rawDescData -} - -var file_agntcy_dir_store_v1_sync_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_agntcy_dir_store_v1_sync_service_proto_msgTypes = make([]protoimpl.MessageInfo, 11) -var file_agntcy_dir_store_v1_sync_service_proto_goTypes = []any{ - (SyncStatus)(0), // 0: agntcy.dir.store.v1.SyncStatus - (*CreateSyncRequest)(nil), // 1: agntcy.dir.store.v1.CreateSyncRequest - (*CreateSyncResponse)(nil), // 2: agntcy.dir.store.v1.CreateSyncResponse - (*ListSyncsRequest)(nil), // 3: agntcy.dir.store.v1.ListSyncsRequest - (*ListSyncsItem)(nil), // 4: agntcy.dir.store.v1.ListSyncsItem - (*GetSyncRequest)(nil), // 5: agntcy.dir.store.v1.GetSyncRequest - (*GetSyncResponse)(nil), // 6: agntcy.dir.store.v1.GetSyncResponse - (*DeleteSyncRequest)(nil), // 7: agntcy.dir.store.v1.DeleteSyncRequest - (*DeleteSyncResponse)(nil), // 8: agntcy.dir.store.v1.DeleteSyncResponse - (*RequestRegistryCredentialsRequest)(nil), // 9: agntcy.dir.store.v1.RequestRegistryCredentialsRequest - (*RequestRegistryCredentialsResponse)(nil), // 10: agntcy.dir.store.v1.RequestRegistryCredentialsResponse - (*BasicAuthCredentials)(nil), // 11: agntcy.dir.store.v1.BasicAuthCredentials -} -var file_agntcy_dir_store_v1_sync_service_proto_depIdxs = []int32{ - 0, // 0: agntcy.dir.store.v1.ListSyncsItem.status:type_name -> agntcy.dir.store.v1.SyncStatus - 0, // 1: agntcy.dir.store.v1.GetSyncResponse.status:type_name -> agntcy.dir.store.v1.SyncStatus - 11, // 2: agntcy.dir.store.v1.RequestRegistryCredentialsResponse.basic_auth:type_name -> agntcy.dir.store.v1.BasicAuthCredentials - 1, // 3: agntcy.dir.store.v1.SyncService.CreateSync:input_type -> agntcy.dir.store.v1.CreateSyncRequest - 3, // 4: agntcy.dir.store.v1.SyncService.ListSyncs:input_type -> agntcy.dir.store.v1.ListSyncsRequest - 5, // 5: agntcy.dir.store.v1.SyncService.GetSync:input_type -> agntcy.dir.store.v1.GetSyncRequest - 7, // 6: agntcy.dir.store.v1.SyncService.DeleteSync:input_type -> agntcy.dir.store.v1.DeleteSyncRequest - 9, // 7: agntcy.dir.store.v1.SyncService.RequestRegistryCredentials:input_type -> agntcy.dir.store.v1.RequestRegistryCredentialsRequest - 2, // 8: agntcy.dir.store.v1.SyncService.CreateSync:output_type -> agntcy.dir.store.v1.CreateSyncResponse - 4, // 9: agntcy.dir.store.v1.SyncService.ListSyncs:output_type -> agntcy.dir.store.v1.ListSyncsItem - 6, // 10: agntcy.dir.store.v1.SyncService.GetSync:output_type -> agntcy.dir.store.v1.GetSyncResponse - 8, // 11: agntcy.dir.store.v1.SyncService.DeleteSync:output_type -> agntcy.dir.store.v1.DeleteSyncResponse - 10, // 12: agntcy.dir.store.v1.SyncService.RequestRegistryCredentials:output_type -> agntcy.dir.store.v1.RequestRegistryCredentialsResponse - 8, // [8:13] is the sub-list for method output_type - 3, // [3:8] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_agntcy_dir_store_v1_sync_service_proto_init() } -func file_agntcy_dir_store_v1_sync_service_proto_init() { - if File_agntcy_dir_store_v1_sync_service_proto != nil { - return - } - file_agntcy_dir_store_v1_sync_service_proto_msgTypes[2].OneofWrappers = []any{} - file_agntcy_dir_store_v1_sync_service_proto_msgTypes[9].OneofWrappers = []any{ - (*RequestRegistryCredentialsResponse_BasicAuth)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_store_v1_sync_service_proto_rawDesc), len(file_agntcy_dir_store_v1_sync_service_proto_rawDesc)), - NumEnums: 1, - NumMessages: 11, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_agntcy_dir_store_v1_sync_service_proto_goTypes, - DependencyIndexes: file_agntcy_dir_store_v1_sync_service_proto_depIdxs, - EnumInfos: file_agntcy_dir_store_v1_sync_service_proto_enumTypes, - MessageInfos: file_agntcy_dir_store_v1_sync_service_proto_msgTypes, - }.Build() - File_agntcy_dir_store_v1_sync_service_proto = out.File - file_agntcy_dir_store_v1_sync_service_proto_goTypes = nil - file_agntcy_dir_store_v1_sync_service_proto_depIdxs = nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc (unknown) +// source: agntcy/dir/store/v1/sync_service.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// SyncStatus enumeration defines the possible states of a synchronization operation. +type SyncStatus int32 + +const ( + // Default/unset status - should not be used in practice + SyncStatus_SYNC_STATUS_UNSPECIFIED SyncStatus = 0 + // Sync operation has been created but not yet started + SyncStatus_SYNC_STATUS_PENDING SyncStatus = 1 + // Sync operation is actively discovering and transferring objects + SyncStatus_SYNC_STATUS_IN_PROGRESS SyncStatus = 2 + // Sync operation encountered an error and stopped + SyncStatus_SYNC_STATUS_FAILED SyncStatus = 3 + // Sync operation has been marked for deletion but cleanup not yet started + SyncStatus_SYNC_STATUS_DELETE_PENDING SyncStatus = 4 + // Sync operation has been successfully deleted and cleaned up + SyncStatus_SYNC_STATUS_DELETED SyncStatus = 5 +) + +// Enum value maps for SyncStatus. +var ( + SyncStatus_name = map[int32]string{ + 0: "SYNC_STATUS_UNSPECIFIED", + 1: "SYNC_STATUS_PENDING", + 2: "SYNC_STATUS_IN_PROGRESS", + 3: "SYNC_STATUS_FAILED", + 4: "SYNC_STATUS_DELETE_PENDING", + 5: "SYNC_STATUS_DELETED", + } + SyncStatus_value = map[string]int32{ + "SYNC_STATUS_UNSPECIFIED": 0, + "SYNC_STATUS_PENDING": 1, + "SYNC_STATUS_IN_PROGRESS": 2, + "SYNC_STATUS_FAILED": 3, + "SYNC_STATUS_DELETE_PENDING": 4, + "SYNC_STATUS_DELETED": 5, + } +) + +func (x SyncStatus) Enum() *SyncStatus { + p := new(SyncStatus) + *p = x + return p +} + +func (x SyncStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SyncStatus) Descriptor() protoreflect.EnumDescriptor { + return file_agntcy_dir_store_v1_sync_service_proto_enumTypes[0].Descriptor() +} + +func (SyncStatus) Type() protoreflect.EnumType { + return &file_agntcy_dir_store_v1_sync_service_proto_enumTypes[0] +} + +func (x SyncStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SyncStatus.Descriptor instead. +func (SyncStatus) EnumDescriptor() ([]byte, []int) { + return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{0} +} + +// CreateSyncRequest defines the parameters for creating a new synchronization operation. +// +// Currently supports basic synchronization of all objects from a remote Directory. +// Future versions may include additional options for filtering and scheduling capabilities. +type CreateSyncRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // URL of the remote Registry to synchronize from. + // + // This should be a complete URL including protocol and port if non-standard. + // Examples: + // - "https://directory.example.com" + // - "http://localhost:8080" + // - "https://directory.example.com:9443" + RemoteDirectoryUrl string `protobuf:"bytes,1,opt,name=remote_directory_url,json=remoteDirectoryUrl,proto3" json:"remote_directory_url,omitempty"` + // List of CIDs to synchronize from the remote Directory. + // If empty, all objects will be synchronized. + Cids []string `protobuf:"bytes,2,rep,name=cids,proto3" json:"cids,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateSyncRequest) Reset() { + *x = CreateSyncRequest{} + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateSyncRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSyncRequest) ProtoMessage() {} + +func (x *CreateSyncRequest) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSyncRequest.ProtoReflect.Descriptor instead. +func (*CreateSyncRequest) Descriptor() ([]byte, []int) { + return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateSyncRequest) GetRemoteDirectoryUrl() string { + if x != nil { + return x.RemoteDirectoryUrl + } + return "" +} + +func (x *CreateSyncRequest) GetCids() []string { + if x != nil { + return x.Cids + } + return nil +} + +// CreateSyncResponse contains the result of creating a new synchronization operation. +type CreateSyncResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Unique identifier for the created synchronization operation. + // This ID can be used with other SyncService RPCs to monitor and manage the sync. + SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateSyncResponse) Reset() { + *x = CreateSyncResponse{} + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateSyncResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSyncResponse) ProtoMessage() {} + +func (x *CreateSyncResponse) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSyncResponse.ProtoReflect.Descriptor instead. +func (*CreateSyncResponse) Descriptor() ([]byte, []int) { + return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{1} +} + +func (x *CreateSyncResponse) GetSyncId() string { + if x != nil { + return x.SyncId + } + return "" +} + +// ListSyncsRequest specifies parameters for listing synchronization operations. +type ListSyncsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Optional limit on the number of results to return. + Limit *uint32 `protobuf:"varint,2,opt,name=limit,proto3,oneof" json:"limit,omitempty"` + // Optional offset for pagination of results. + Offset *uint32 `protobuf:"varint,3,opt,name=offset,proto3,oneof" json:"offset,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListSyncsRequest) Reset() { + *x = ListSyncsRequest{} + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListSyncsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSyncsRequest) ProtoMessage() {} + +func (x *ListSyncsRequest) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSyncsRequest.ProtoReflect.Descriptor instead. +func (*ListSyncsRequest) Descriptor() ([]byte, []int) { + return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ListSyncsRequest) GetLimit() uint32 { + if x != nil && x.Limit != nil { + return *x.Limit + } + return 0 +} + +func (x *ListSyncsRequest) GetOffset() uint32 { + if x != nil && x.Offset != nil { + return *x.Offset + } + return 0 +} + +// ListSyncItem represents a single synchronization in the list of all syncs. +type ListSyncsItem struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Unique identifier of the synchronization operation. + SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` + // Current status of the synchronization operation. + Status SyncStatus `protobuf:"varint,2,opt,name=status,proto3,enum=agntcy.dir.store.v1.SyncStatus" json:"status,omitempty"` + // URL of the remote Directory being synchronized from. + RemoteDirectoryUrl string `protobuf:"bytes,3,opt,name=remote_directory_url,json=remoteDirectoryUrl,proto3" json:"remote_directory_url,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListSyncsItem) Reset() { + *x = ListSyncsItem{} + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListSyncsItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSyncsItem) ProtoMessage() {} + +func (x *ListSyncsItem) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSyncsItem.ProtoReflect.Descriptor instead. +func (*ListSyncsItem) Descriptor() ([]byte, []int) { + return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{3} +} + +func (x *ListSyncsItem) GetSyncId() string { + if x != nil { + return x.SyncId + } + return "" +} + +func (x *ListSyncsItem) GetStatus() SyncStatus { + if x != nil { + return x.Status + } + return SyncStatus_SYNC_STATUS_UNSPECIFIED +} + +func (x *ListSyncsItem) GetRemoteDirectoryUrl() string { + if x != nil { + return x.RemoteDirectoryUrl + } + return "" +} + +// GetSyncRequest specifies which synchronization status to retrieve. +type GetSyncRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Unique identifier of the synchronization operation to query. + SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetSyncRequest) Reset() { + *x = GetSyncRequest{} + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetSyncRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSyncRequest) ProtoMessage() {} + +func (x *GetSyncRequest) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSyncRequest.ProtoReflect.Descriptor instead. +func (*GetSyncRequest) Descriptor() ([]byte, []int) { + return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{4} +} + +func (x *GetSyncRequest) GetSyncId() string { + if x != nil { + return x.SyncId + } + return "" +} + +// GetSyncResponse provides detailed information about a specific synchronization operation. +type GetSyncResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Unique identifier of the synchronization operation. + SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` + // Current status of the synchronization operation. + Status SyncStatus `protobuf:"varint,2,opt,name=status,proto3,enum=agntcy.dir.store.v1.SyncStatus" json:"status,omitempty"` + // URL of the remote Directory node being synchronized from. + RemoteDirectoryUrl string `protobuf:"bytes,3,opt,name=remote_directory_url,json=remoteDirectoryUrl,proto3" json:"remote_directory_url,omitempty"` + // Timestamp when the synchronization operation was created in the RFC3339 format. + // Specs: https://www.rfc-editor.org/rfc/rfc3339.html + CreatedTime string `protobuf:"bytes,4,opt,name=created_time,json=createdTime,proto3" json:"created_time,omitempty"` + // Timestamp of the most recent status update for this synchronization in the RFC3339 format. + LastUpdateTime string `protobuf:"bytes,5,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetSyncResponse) Reset() { + *x = GetSyncResponse{} + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetSyncResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSyncResponse) ProtoMessage() {} + +func (x *GetSyncResponse) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSyncResponse.ProtoReflect.Descriptor instead. +func (*GetSyncResponse) Descriptor() ([]byte, []int) { + return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{5} +} + +func (x *GetSyncResponse) GetSyncId() string { + if x != nil { + return x.SyncId + } + return "" +} + +func (x *GetSyncResponse) GetStatus() SyncStatus { + if x != nil { + return x.Status + } + return SyncStatus_SYNC_STATUS_UNSPECIFIED +} + +func (x *GetSyncResponse) GetRemoteDirectoryUrl() string { + if x != nil { + return x.RemoteDirectoryUrl + } + return "" +} + +func (x *GetSyncResponse) GetCreatedTime() string { + if x != nil { + return x.CreatedTime + } + return "" +} + +func (x *GetSyncResponse) GetLastUpdateTime() string { + if x != nil { + return x.LastUpdateTime + } + return "" +} + +// DeleteSyncRequest specifies which synchronization to delete. +type DeleteSyncRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Unique identifier of the synchronization operation to delete. + SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteSyncRequest) Reset() { + *x = DeleteSyncRequest{} + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteSyncRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteSyncRequest) ProtoMessage() {} + +func (x *DeleteSyncRequest) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteSyncRequest.ProtoReflect.Descriptor instead. +func (*DeleteSyncRequest) Descriptor() ([]byte, []int) { + return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{6} +} + +func (x *DeleteSyncRequest) GetSyncId() string { + if x != nil { + return x.SyncId + } + return "" +} + +// DeleteSyncResponse +type DeleteSyncResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteSyncResponse) Reset() { + *x = DeleteSyncResponse{} + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteSyncResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteSyncResponse) ProtoMessage() {} + +func (x *DeleteSyncResponse) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteSyncResponse.ProtoReflect.Descriptor instead. +func (*DeleteSyncResponse) Descriptor() ([]byte, []int) { + return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{7} +} + +type RequestRegistryCredentialsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Identity of the requesting node + // For example: spiffe://example.org/service/foo + RequestingNodeId string `protobuf:"bytes,1,opt,name=requesting_node_id,json=requestingNodeId,proto3" json:"requesting_node_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RequestRegistryCredentialsRequest) Reset() { + *x = RequestRegistryCredentialsRequest{} + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RequestRegistryCredentialsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestRegistryCredentialsRequest) ProtoMessage() {} + +func (x *RequestRegistryCredentialsRequest) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestRegistryCredentialsRequest.ProtoReflect.Descriptor instead. +func (*RequestRegistryCredentialsRequest) Descriptor() ([]byte, []int) { + return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{8} +} + +func (x *RequestRegistryCredentialsRequest) GetRequestingNodeId() string { + if x != nil { + return x.RequestingNodeId + } + return "" +} + +type RequestRegistryCredentialsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Success status of the credential negotiation + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` + // Error message if negotiation failed + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + // URL of the remote Registry being synchronized from. + RemoteRegistryUrl string `protobuf:"bytes,3,opt,name=remote_registry_url,json=remoteRegistryUrl,proto3" json:"remote_registry_url,omitempty"` + // Registry credentials (oneof based on credential type) + // + // Types that are valid to be assigned to Credentials: + // + // *RequestRegistryCredentialsResponse_BasicAuth + Credentials isRequestRegistryCredentialsResponse_Credentials `protobuf_oneof:"credentials"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RequestRegistryCredentialsResponse) Reset() { + *x = RequestRegistryCredentialsResponse{} + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RequestRegistryCredentialsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestRegistryCredentialsResponse) ProtoMessage() {} + +func (x *RequestRegistryCredentialsResponse) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestRegistryCredentialsResponse.ProtoReflect.Descriptor instead. +func (*RequestRegistryCredentialsResponse) Descriptor() ([]byte, []int) { + return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{9} +} + +func (x *RequestRegistryCredentialsResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *RequestRegistryCredentialsResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +func (x *RequestRegistryCredentialsResponse) GetRemoteRegistryUrl() string { + if x != nil { + return x.RemoteRegistryUrl + } + return "" +} + +func (x *RequestRegistryCredentialsResponse) GetCredentials() isRequestRegistryCredentialsResponse_Credentials { + if x != nil { + return x.Credentials + } + return nil +} + +func (x *RequestRegistryCredentialsResponse) GetBasicAuth() *BasicAuthCredentials { + if x != nil { + if x, ok := x.Credentials.(*RequestRegistryCredentialsResponse_BasicAuth); ok { + return x.BasicAuth + } + } + return nil +} + +type isRequestRegistryCredentialsResponse_Credentials interface { + isRequestRegistryCredentialsResponse_Credentials() +} + +type RequestRegistryCredentialsResponse_BasicAuth struct { + BasicAuth *BasicAuthCredentials `protobuf:"bytes,4,opt,name=basic_auth,json=basicAuth,proto3,oneof"` // CertificateCredentials certificate = 5; +} + +func (*RequestRegistryCredentialsResponse_BasicAuth) isRequestRegistryCredentialsResponse_Credentials() { +} + +// Supporting credential type definitions +type BasicAuthCredentials struct { + state protoimpl.MessageState `protogen:"open.v1"` + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BasicAuthCredentials) Reset() { + *x = BasicAuthCredentials{} + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BasicAuthCredentials) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BasicAuthCredentials) ProtoMessage() {} + +func (x *BasicAuthCredentials) ProtoReflect() protoreflect.Message { + mi := &file_agntcy_dir_store_v1_sync_service_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BasicAuthCredentials.ProtoReflect.Descriptor instead. +func (*BasicAuthCredentials) Descriptor() ([]byte, []int) { + return file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP(), []int{10} +} + +func (x *BasicAuthCredentials) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *BasicAuthCredentials) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +var File_agntcy_dir_store_v1_sync_service_proto protoreflect.FileDescriptor + +var file_agntcy_dir_store_v1_sync_service_proto_rawDesc = string([]byte{ + 0x0a, 0x26, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, + 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x22, 0x59, 0x0a, + 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x30, 0x0a, 0x14, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x79, 0x55, 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x04, 0x63, 0x69, 0x64, 0x73, 0x22, 0x2d, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, + 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x53, + 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x05, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x05, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x01, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x09, 0x0a, + 0x07, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x22, 0x93, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x79, + 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, + 0x63, 0x49, 0x64, 0x12, 0x37, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x30, 0x0a, 0x14, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, + 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x55, 0x72, 0x6c, 0x22, 0x29, + 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x17, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x22, 0xe2, 0x01, 0x0a, 0x0f, 0x47, 0x65, + 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, 0x0a, + 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x12, 0x37, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, + 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, + 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x30, 0x0a, 0x14, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x55, 0x72, + 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x2c, + 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x22, 0x14, 0x0a, 0x12, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x51, 0x0a, 0x21, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x67, + 0x69, 0x73, 0x74, 0x72, 0x79, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4e, + 0x6f, 0x64, 0x65, 0x49, 0x64, 0x22, 0xee, 0x01, 0x0a, 0x22, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x55, 0x72, 0x6c, 0x12, 0x4a, 0x0a, 0x0a, 0x62, + 0x61, 0x73, 0x69, 0x63, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x29, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x43, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x09, 0x62, 0x61, + 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x22, 0x4e, 0x0a, 0x14, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, + 0x75, 0x74, 0x68, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x1a, + 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x2a, 0xb0, 0x01, 0x0a, 0x0a, 0x53, 0x79, 0x6e, 0x63, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x54, + 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x1b, 0x0a, 0x17, 0x53, + 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x49, 0x4e, 0x5f, 0x50, 0x52, + 0x4f, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x59, 0x4e, 0x43, + 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, + 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, + 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x04, + 0x12, 0x17, 0x0a, 0x13, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, + 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x05, 0x32, 0x8b, 0x04, 0x0a, 0x0b, 0x53, 0x79, + 0x6e, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5d, 0x0a, 0x0a, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x26, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, + 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x27, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x79, 0x6e, 0x63, 0x73, 0x12, 0x25, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, + 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x61, + 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x49, 0x74, 0x65, 0x6d, + 0x30, 0x01, 0x12, 0x54, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x23, 0x2e, + 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x26, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, + 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, + 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8d, 0x01, 0x0a, 0x1a, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x43, 0x72, 0x65, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x36, 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, + 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x43, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, + 0x2e, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x72, 0x79, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0xbe, 0x01, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x2e, + 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x64, 0x69, 0x72, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x31, 0x42, 0x10, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2f, 0x64, 0x69, 0x72, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x41, 0x44, + 0x53, 0xaa, 0x02, 0x13, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x2e, 0x44, 0x69, 0x72, 0x2e, 0x53, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, + 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x1f, + 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x5c, 0x44, 0x69, 0x72, 0x5c, 0x53, 0x74, 0x6f, 0x72, 0x65, + 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, + 0x02, 0x16, 0x41, 0x67, 0x6e, 0x74, 0x63, 0x79, 0x3a, 0x3a, 0x44, 0x69, 0x72, 0x3a, 0x3a, 0x53, + 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_agntcy_dir_store_v1_sync_service_proto_rawDescOnce sync.Once + file_agntcy_dir_store_v1_sync_service_proto_rawDescData []byte +) + +func file_agntcy_dir_store_v1_sync_service_proto_rawDescGZIP() []byte { + file_agntcy_dir_store_v1_sync_service_proto_rawDescOnce.Do(func() { + file_agntcy_dir_store_v1_sync_service_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_agntcy_dir_store_v1_sync_service_proto_rawDesc), len(file_agntcy_dir_store_v1_sync_service_proto_rawDesc))) + }) + return file_agntcy_dir_store_v1_sync_service_proto_rawDescData +} + +var file_agntcy_dir_store_v1_sync_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_agntcy_dir_store_v1_sync_service_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_agntcy_dir_store_v1_sync_service_proto_goTypes = []any{ + (SyncStatus)(0), // 0: agntcy.dir.store.v1.SyncStatus + (*CreateSyncRequest)(nil), // 1: agntcy.dir.store.v1.CreateSyncRequest + (*CreateSyncResponse)(nil), // 2: agntcy.dir.store.v1.CreateSyncResponse + (*ListSyncsRequest)(nil), // 3: agntcy.dir.store.v1.ListSyncsRequest + (*ListSyncsItem)(nil), // 4: agntcy.dir.store.v1.ListSyncsItem + (*GetSyncRequest)(nil), // 5: agntcy.dir.store.v1.GetSyncRequest + (*GetSyncResponse)(nil), // 6: agntcy.dir.store.v1.GetSyncResponse + (*DeleteSyncRequest)(nil), // 7: agntcy.dir.store.v1.DeleteSyncRequest + (*DeleteSyncResponse)(nil), // 8: agntcy.dir.store.v1.DeleteSyncResponse + (*RequestRegistryCredentialsRequest)(nil), // 9: agntcy.dir.store.v1.RequestRegistryCredentialsRequest + (*RequestRegistryCredentialsResponse)(nil), // 10: agntcy.dir.store.v1.RequestRegistryCredentialsResponse + (*BasicAuthCredentials)(nil), // 11: agntcy.dir.store.v1.BasicAuthCredentials +} +var file_agntcy_dir_store_v1_sync_service_proto_depIdxs = []int32{ + 0, // 0: agntcy.dir.store.v1.ListSyncsItem.status:type_name -> agntcy.dir.store.v1.SyncStatus + 0, // 1: agntcy.dir.store.v1.GetSyncResponse.status:type_name -> agntcy.dir.store.v1.SyncStatus + 11, // 2: agntcy.dir.store.v1.RequestRegistryCredentialsResponse.basic_auth:type_name -> agntcy.dir.store.v1.BasicAuthCredentials + 1, // 3: agntcy.dir.store.v1.SyncService.CreateSync:input_type -> agntcy.dir.store.v1.CreateSyncRequest + 3, // 4: agntcy.dir.store.v1.SyncService.ListSyncs:input_type -> agntcy.dir.store.v1.ListSyncsRequest + 5, // 5: agntcy.dir.store.v1.SyncService.GetSync:input_type -> agntcy.dir.store.v1.GetSyncRequest + 7, // 6: agntcy.dir.store.v1.SyncService.DeleteSync:input_type -> agntcy.dir.store.v1.DeleteSyncRequest + 9, // 7: agntcy.dir.store.v1.SyncService.RequestRegistryCredentials:input_type -> agntcy.dir.store.v1.RequestRegistryCredentialsRequest + 2, // 8: agntcy.dir.store.v1.SyncService.CreateSync:output_type -> agntcy.dir.store.v1.CreateSyncResponse + 4, // 9: agntcy.dir.store.v1.SyncService.ListSyncs:output_type -> agntcy.dir.store.v1.ListSyncsItem + 6, // 10: agntcy.dir.store.v1.SyncService.GetSync:output_type -> agntcy.dir.store.v1.GetSyncResponse + 8, // 11: agntcy.dir.store.v1.SyncService.DeleteSync:output_type -> agntcy.dir.store.v1.DeleteSyncResponse + 10, // 12: agntcy.dir.store.v1.SyncService.RequestRegistryCredentials:output_type -> agntcy.dir.store.v1.RequestRegistryCredentialsResponse + 8, // [8:13] is the sub-list for method output_type + 3, // [3:8] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_agntcy_dir_store_v1_sync_service_proto_init() } +func file_agntcy_dir_store_v1_sync_service_proto_init() { + if File_agntcy_dir_store_v1_sync_service_proto != nil { + return + } + file_agntcy_dir_store_v1_sync_service_proto_msgTypes[2].OneofWrappers = []any{} + file_agntcy_dir_store_v1_sync_service_proto_msgTypes[9].OneofWrappers = []any{ + (*RequestRegistryCredentialsResponse_BasicAuth)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_agntcy_dir_store_v1_sync_service_proto_rawDesc), len(file_agntcy_dir_store_v1_sync_service_proto_rawDesc)), + NumEnums: 1, + NumMessages: 11, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_agntcy_dir_store_v1_sync_service_proto_goTypes, + DependencyIndexes: file_agntcy_dir_store_v1_sync_service_proto_depIdxs, + EnumInfos: file_agntcy_dir_store_v1_sync_service_proto_enumTypes, + MessageInfos: file_agntcy_dir_store_v1_sync_service_proto_msgTypes, + }.Build() + File_agntcy_dir_store_v1_sync_service_proto = out.File + file_agntcy_dir_store_v1_sync_service_proto_goTypes = nil + file_agntcy_dir_store_v1_sync_service_proto_depIdxs = nil +} diff --git a/api/store/v1/sync_service_grpc.pb.go b/api/store/v1/sync_service_grpc.pb.go index c12f83a68..027bd5d33 100644 --- a/api/store/v1/sync_service_grpc.pb.go +++ b/api/store/v1/sync_service_grpc.pb.go @@ -1,340 +1,340 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc (unknown) -// source: agntcy/dir/store/v1/sync_service.proto - -package v1 - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 - -const ( - SyncService_CreateSync_FullMethodName = "/agntcy.dir.store.v1.SyncService/CreateSync" - SyncService_ListSyncs_FullMethodName = "/agntcy.dir.store.v1.SyncService/ListSyncs" - SyncService_GetSync_FullMethodName = "/agntcy.dir.store.v1.SyncService/GetSync" - SyncService_DeleteSync_FullMethodName = "/agntcy.dir.store.v1.SyncService/DeleteSync" - SyncService_RequestRegistryCredentials_FullMethodName = "/agntcy.dir.store.v1.SyncService/RequestRegistryCredentials" -) - -// SyncServiceClient is the client API for SyncService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -// -// SyncService provides functionality for synchronizing objects between Directory nodes. -// -// This service enables one-way synchronization from a remote Directory node to the local node, -// allowing distributed Directory instances to share and replicate objects. The service supports -// both on-demand synchronization and tracking of sync operations through their lifecycle. -type SyncServiceClient interface { - // CreateSync initiates a new synchronization operation from a remote Directory node. - // - // The operation is non-blocking and returns immediately with a sync ID that can be used - // to track progress and manage the sync operation. - CreateSync(ctx context.Context, in *CreateSyncRequest, opts ...grpc.CallOption) (*CreateSyncResponse, error) - // ListSyncs returns a stream of all sync operations known to the system. - // - // This includes active, completed, and failed synchronizations. - ListSyncs(ctx context.Context, in *ListSyncsRequest, opts ...grpc.CallOption) (SyncService_ListSyncsClient, error) - // GetSync retrieves detailed status information for a specific synchronization. - GetSync(ctx context.Context, in *GetSyncRequest, opts ...grpc.CallOption) (*GetSyncResponse, error) - // DeleteSync removes a synchronization operation from the system. - DeleteSync(ctx context.Context, in *DeleteSyncRequest, opts ...grpc.CallOption) (*DeleteSyncResponse, error) - // RequestRegistryCredentials requests registry credentials between two Directory nodes. - // - // This RPC allows a requesting node to authenticate with this node and obtain - // temporary registry credentials for secure Zot-based synchronization. - RequestRegistryCredentials(ctx context.Context, in *RequestRegistryCredentialsRequest, opts ...grpc.CallOption) (*RequestRegistryCredentialsResponse, error) -} - -type syncServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewSyncServiceClient(cc grpc.ClientConnInterface) SyncServiceClient { - return &syncServiceClient{cc} -} - -func (c *syncServiceClient) CreateSync(ctx context.Context, in *CreateSyncRequest, opts ...grpc.CallOption) (*CreateSyncResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(CreateSyncResponse) - err := c.cc.Invoke(ctx, SyncService_CreateSync_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *syncServiceClient) ListSyncs(ctx context.Context, in *ListSyncsRequest, opts ...grpc.CallOption) (SyncService_ListSyncsClient, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &SyncService_ServiceDesc.Streams[0], SyncService_ListSyncs_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &syncServiceListSyncsClient{ClientStream: stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type SyncService_ListSyncsClient interface { - Recv() (*ListSyncsItem, error) - grpc.ClientStream -} - -type syncServiceListSyncsClient struct { - grpc.ClientStream -} - -func (x *syncServiceListSyncsClient) Recv() (*ListSyncsItem, error) { - m := new(ListSyncsItem) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *syncServiceClient) GetSync(ctx context.Context, in *GetSyncRequest, opts ...grpc.CallOption) (*GetSyncResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(GetSyncResponse) - err := c.cc.Invoke(ctx, SyncService_GetSync_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *syncServiceClient) DeleteSync(ctx context.Context, in *DeleteSyncRequest, opts ...grpc.CallOption) (*DeleteSyncResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(DeleteSyncResponse) - err := c.cc.Invoke(ctx, SyncService_DeleteSync_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *syncServiceClient) RequestRegistryCredentials(ctx context.Context, in *RequestRegistryCredentialsRequest, opts ...grpc.CallOption) (*RequestRegistryCredentialsResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(RequestRegistryCredentialsResponse) - err := c.cc.Invoke(ctx, SyncService_RequestRegistryCredentials_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -// SyncServiceServer is the server API for SyncService service. -// All implementations should embed UnimplementedSyncServiceServer -// for forward compatibility. -// -// SyncService provides functionality for synchronizing objects between Directory nodes. -// -// This service enables one-way synchronization from a remote Directory node to the local node, -// allowing distributed Directory instances to share and replicate objects. The service supports -// both on-demand synchronization and tracking of sync operations through their lifecycle. -type SyncServiceServer interface { - // CreateSync initiates a new synchronization operation from a remote Directory node. - // - // The operation is non-blocking and returns immediately with a sync ID that can be used - // to track progress and manage the sync operation. - CreateSync(context.Context, *CreateSyncRequest) (*CreateSyncResponse, error) - // ListSyncs returns a stream of all sync operations known to the system. - // - // This includes active, completed, and failed synchronizations. - ListSyncs(*ListSyncsRequest, SyncService_ListSyncsServer) error - // GetSync retrieves detailed status information for a specific synchronization. - GetSync(context.Context, *GetSyncRequest) (*GetSyncResponse, error) - // DeleteSync removes a synchronization operation from the system. - DeleteSync(context.Context, *DeleteSyncRequest) (*DeleteSyncResponse, error) - // RequestRegistryCredentials requests registry credentials between two Directory nodes. - // - // This RPC allows a requesting node to authenticate with this node and obtain - // temporary registry credentials for secure Zot-based synchronization. - RequestRegistryCredentials(context.Context, *RequestRegistryCredentialsRequest) (*RequestRegistryCredentialsResponse, error) -} - -// UnimplementedSyncServiceServer should be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedSyncServiceServer struct{} - -func (UnimplementedSyncServiceServer) CreateSync(context.Context, *CreateSyncRequest) (*CreateSyncResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateSync not implemented") -} -func (UnimplementedSyncServiceServer) ListSyncs(*ListSyncsRequest, SyncService_ListSyncsServer) error { - return status.Errorf(codes.Unimplemented, "method ListSyncs not implemented") -} -func (UnimplementedSyncServiceServer) GetSync(context.Context, *GetSyncRequest) (*GetSyncResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetSync not implemented") -} -func (UnimplementedSyncServiceServer) DeleteSync(context.Context, *DeleteSyncRequest) (*DeleteSyncResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteSync not implemented") -} -func (UnimplementedSyncServiceServer) RequestRegistryCredentials(context.Context, *RequestRegistryCredentialsRequest) (*RequestRegistryCredentialsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RequestRegistryCredentials not implemented") -} -func (UnimplementedSyncServiceServer) testEmbeddedByValue() {} - -// UnsafeSyncServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to SyncServiceServer will -// result in compilation errors. -type UnsafeSyncServiceServer interface { - mustEmbedUnimplementedSyncServiceServer() -} - -func RegisterSyncServiceServer(s grpc.ServiceRegistrar, srv SyncServiceServer) { - // If the following call pancis, it indicates UnimplementedSyncServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } - s.RegisterService(&SyncService_ServiceDesc, srv) -} - -func _SyncService_CreateSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateSyncRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SyncServiceServer).CreateSync(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SyncService_CreateSync_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SyncServiceServer).CreateSync(ctx, req.(*CreateSyncRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SyncService_ListSyncs_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ListSyncsRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(SyncServiceServer).ListSyncs(m, &syncServiceListSyncsServer{ServerStream: stream}) -} - -type SyncService_ListSyncsServer interface { - Send(*ListSyncsItem) error - grpc.ServerStream -} - -type syncServiceListSyncsServer struct { - grpc.ServerStream -} - -func (x *syncServiceListSyncsServer) Send(m *ListSyncsItem) error { - return x.ServerStream.SendMsg(m) -} - -func _SyncService_GetSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetSyncRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SyncServiceServer).GetSync(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SyncService_GetSync_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SyncServiceServer).GetSync(ctx, req.(*GetSyncRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SyncService_DeleteSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteSyncRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SyncServiceServer).DeleteSync(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SyncService_DeleteSync_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SyncServiceServer).DeleteSync(ctx, req.(*DeleteSyncRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SyncService_RequestRegistryCredentials_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestRegistryCredentialsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SyncServiceServer).RequestRegistryCredentials(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SyncService_RequestRegistryCredentials_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SyncServiceServer).RequestRegistryCredentials(ctx, req.(*RequestRegistryCredentialsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// SyncService_ServiceDesc is the grpc.ServiceDesc for SyncService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var SyncService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "agntcy.dir.store.v1.SyncService", - HandlerType: (*SyncServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateSync", - Handler: _SyncService_CreateSync_Handler, - }, - { - MethodName: "GetSync", - Handler: _SyncService_GetSync_Handler, - }, - { - MethodName: "DeleteSync", - Handler: _SyncService_DeleteSync_Handler, - }, - { - MethodName: "RequestRegistryCredentials", - Handler: _SyncService_RequestRegistryCredentials_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ListSyncs", - Handler: _SyncService_ListSyncs_Handler, - ServerStreams: true, - }, - }, - Metadata: "agntcy/dir/store/v1/sync_service.proto", -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc (unknown) +// source: agntcy/dir/store/v1/sync_service.proto + +package v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 + +const ( + SyncService_CreateSync_FullMethodName = "/agntcy.dir.store.v1.SyncService/CreateSync" + SyncService_ListSyncs_FullMethodName = "/agntcy.dir.store.v1.SyncService/ListSyncs" + SyncService_GetSync_FullMethodName = "/agntcy.dir.store.v1.SyncService/GetSync" + SyncService_DeleteSync_FullMethodName = "/agntcy.dir.store.v1.SyncService/DeleteSync" + SyncService_RequestRegistryCredentials_FullMethodName = "/agntcy.dir.store.v1.SyncService/RequestRegistryCredentials" +) + +// SyncServiceClient is the client API for SyncService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// SyncService provides functionality for synchronizing objects between Directory nodes. +// +// This service enables one-way synchronization from a remote Directory node to the local node, +// allowing distributed Directory instances to share and replicate objects. The service supports +// both on-demand synchronization and tracking of sync operations through their lifecycle. +type SyncServiceClient interface { + // CreateSync initiates a new synchronization operation from a remote Directory node. + // + // The operation is non-blocking and returns immediately with a sync ID that can be used + // to track progress and manage the sync operation. + CreateSync(ctx context.Context, in *CreateSyncRequest, opts ...grpc.CallOption) (*CreateSyncResponse, error) + // ListSyncs returns a stream of all sync operations known to the system. + // + // This includes active, completed, and failed synchronizations. + ListSyncs(ctx context.Context, in *ListSyncsRequest, opts ...grpc.CallOption) (SyncService_ListSyncsClient, error) + // GetSync retrieves detailed status information for a specific synchronization. + GetSync(ctx context.Context, in *GetSyncRequest, opts ...grpc.CallOption) (*GetSyncResponse, error) + // DeleteSync removes a synchronization operation from the system. + DeleteSync(ctx context.Context, in *DeleteSyncRequest, opts ...grpc.CallOption) (*DeleteSyncResponse, error) + // RequestRegistryCredentials requests registry credentials between two Directory nodes. + // + // This RPC allows a requesting node to authenticate with this node and obtain + // temporary registry credentials for secure Zot-based synchronization. + RequestRegistryCredentials(ctx context.Context, in *RequestRegistryCredentialsRequest, opts ...grpc.CallOption) (*RequestRegistryCredentialsResponse, error) +} + +type syncServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewSyncServiceClient(cc grpc.ClientConnInterface) SyncServiceClient { + return &syncServiceClient{cc} +} + +func (c *syncServiceClient) CreateSync(ctx context.Context, in *CreateSyncRequest, opts ...grpc.CallOption) (*CreateSyncResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(CreateSyncResponse) + err := c.cc.Invoke(ctx, SyncService_CreateSync_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *syncServiceClient) ListSyncs(ctx context.Context, in *ListSyncsRequest, opts ...grpc.CallOption) (SyncService_ListSyncsClient, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &SyncService_ServiceDesc.Streams[0], SyncService_ListSyncs_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &syncServiceListSyncsClient{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SyncService_ListSyncsClient interface { + Recv() (*ListSyncsItem, error) + grpc.ClientStream +} + +type syncServiceListSyncsClient struct { + grpc.ClientStream +} + +func (x *syncServiceListSyncsClient) Recv() (*ListSyncsItem, error) { + m := new(ListSyncsItem) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *syncServiceClient) GetSync(ctx context.Context, in *GetSyncRequest, opts ...grpc.CallOption) (*GetSyncResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetSyncResponse) + err := c.cc.Invoke(ctx, SyncService_GetSync_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *syncServiceClient) DeleteSync(ctx context.Context, in *DeleteSyncRequest, opts ...grpc.CallOption) (*DeleteSyncResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(DeleteSyncResponse) + err := c.cc.Invoke(ctx, SyncService_DeleteSync_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *syncServiceClient) RequestRegistryCredentials(ctx context.Context, in *RequestRegistryCredentialsRequest, opts ...grpc.CallOption) (*RequestRegistryCredentialsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RequestRegistryCredentialsResponse) + err := c.cc.Invoke(ctx, SyncService_RequestRegistryCredentials_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SyncServiceServer is the server API for SyncService service. +// All implementations should embed UnimplementedSyncServiceServer +// for forward compatibility. +// +// SyncService provides functionality for synchronizing objects between Directory nodes. +// +// This service enables one-way synchronization from a remote Directory node to the local node, +// allowing distributed Directory instances to share and replicate objects. The service supports +// both on-demand synchronization and tracking of sync operations through their lifecycle. +type SyncServiceServer interface { + // CreateSync initiates a new synchronization operation from a remote Directory node. + // + // The operation is non-blocking and returns immediately with a sync ID that can be used + // to track progress and manage the sync operation. + CreateSync(context.Context, *CreateSyncRequest) (*CreateSyncResponse, error) + // ListSyncs returns a stream of all sync operations known to the system. + // + // This includes active, completed, and failed synchronizations. + ListSyncs(*ListSyncsRequest, SyncService_ListSyncsServer) error + // GetSync retrieves detailed status information for a specific synchronization. + GetSync(context.Context, *GetSyncRequest) (*GetSyncResponse, error) + // DeleteSync removes a synchronization operation from the system. + DeleteSync(context.Context, *DeleteSyncRequest) (*DeleteSyncResponse, error) + // RequestRegistryCredentials requests registry credentials between two Directory nodes. + // + // This RPC allows a requesting node to authenticate with this node and obtain + // temporary registry credentials for secure Zot-based synchronization. + RequestRegistryCredentials(context.Context, *RequestRegistryCredentialsRequest) (*RequestRegistryCredentialsResponse, error) +} + +// UnimplementedSyncServiceServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedSyncServiceServer struct{} + +func (UnimplementedSyncServiceServer) CreateSync(context.Context, *CreateSyncRequest) (*CreateSyncResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSync not implemented") +} +func (UnimplementedSyncServiceServer) ListSyncs(*ListSyncsRequest, SyncService_ListSyncsServer) error { + return status.Errorf(codes.Unimplemented, "method ListSyncs not implemented") +} +func (UnimplementedSyncServiceServer) GetSync(context.Context, *GetSyncRequest) (*GetSyncResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSync not implemented") +} +func (UnimplementedSyncServiceServer) DeleteSync(context.Context, *DeleteSyncRequest) (*DeleteSyncResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteSync not implemented") +} +func (UnimplementedSyncServiceServer) RequestRegistryCredentials(context.Context, *RequestRegistryCredentialsRequest) (*RequestRegistryCredentialsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RequestRegistryCredentials not implemented") +} +func (UnimplementedSyncServiceServer) testEmbeddedByValue() {} + +// UnsafeSyncServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SyncServiceServer will +// result in compilation errors. +type UnsafeSyncServiceServer interface { + mustEmbedUnimplementedSyncServiceServer() +} + +func RegisterSyncServiceServer(s grpc.ServiceRegistrar, srv SyncServiceServer) { + // If the following call pancis, it indicates UnimplementedSyncServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&SyncService_ServiceDesc, srv) +} + +func _SyncService_CreateSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSyncRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SyncServiceServer).CreateSync(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SyncService_CreateSync_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SyncServiceServer).CreateSync(ctx, req.(*CreateSyncRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SyncService_ListSyncs_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListSyncsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SyncServiceServer).ListSyncs(m, &syncServiceListSyncsServer{ServerStream: stream}) +} + +type SyncService_ListSyncsServer interface { + Send(*ListSyncsItem) error + grpc.ServerStream +} + +type syncServiceListSyncsServer struct { + grpc.ServerStream +} + +func (x *syncServiceListSyncsServer) Send(m *ListSyncsItem) error { + return x.ServerStream.SendMsg(m) +} + +func _SyncService_GetSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSyncRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SyncServiceServer).GetSync(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SyncService_GetSync_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SyncServiceServer).GetSync(ctx, req.(*GetSyncRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SyncService_DeleteSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSyncRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SyncServiceServer).DeleteSync(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SyncService_DeleteSync_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SyncServiceServer).DeleteSync(ctx, req.(*DeleteSyncRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SyncService_RequestRegistryCredentials_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestRegistryCredentialsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SyncServiceServer).RequestRegistryCredentials(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SyncService_RequestRegistryCredentials_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SyncServiceServer).RequestRegistryCredentials(ctx, req.(*RequestRegistryCredentialsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// SyncService_ServiceDesc is the grpc.ServiceDesc for SyncService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var SyncService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "agntcy.dir.store.v1.SyncService", + HandlerType: (*SyncServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateSync", + Handler: _SyncService_CreateSync_Handler, + }, + { + MethodName: "GetSync", + Handler: _SyncService_GetSync_Handler, + }, + { + MethodName: "DeleteSync", + Handler: _SyncService_DeleteSync_Handler, + }, + { + MethodName: "RequestRegistryCredentials", + Handler: _SyncService_RequestRegistryCredentials_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListSyncs", + Handler: _SyncService_ListSyncs_Handler, + ServerStreams: true, + }, + }, + Metadata: "agntcy/dir/store/v1/sync_service.proto", +} diff --git a/api/version/version.go b/api/version/version.go index f4e309152..cb38adef3 100644 --- a/api/version/version.go +++ b/api/version/version.go @@ -1,16 +1,16 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package version - -import "fmt" - -// overridden using ldflags. -var ( - Version string - CommitHash string -) - -func String() string { - return fmt.Sprintf("%s (%s)", Version, CommitHash) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package version + +import "fmt" + +// overridden using ldflags. +var ( + Version string + CommitHash string +) + +func String() string { + return fmt.Sprintf("%s (%s)", Version, CommitHash) +} diff --git a/cli/Dockerfile b/cli/Dockerfile index c3f612d9e..b1cf38d8f 100644 --- a/cli/Dockerfile +++ b/cli/Dockerfile @@ -1,45 +1,45 @@ -# syntax=docker/dockerfile:1@sha256:fe40cf4e92cd0c467be2cfc30657a680ae2398318afd50b0c80585784c604f28 - -# xx is a helper for cross-compilation -FROM --platform=$BUILDPLATFORM tonistiigi/xx:1.4.0@sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4 AS xx - -FROM --platform=$BUILDPLATFORM golang:1.25.2-bookworm@sha256:42d8e9dea06f23d0bfc908826455213ee7f3ed48c43e287a422064220c501be9 AS builder - -COPY --link --from=xx / / - -ARG TARGETPLATFORM - -RUN --mount=type=cache,id=${TARGETPLATFORM}-apt,target=/var/cache/apt,sharing=locked \ - apt-get update \ - && xx-apt-get install -y --no-install-recommends \ - gcc \ - libc6-dev - -WORKDIR /build/cli - -RUN --mount=type=cache,target=/go/pkg/mod \ - --mount=type=cache,target=/root/.cache/go-build \ - --mount=type=bind,source=.,target=/build,ro \ - xx-go mod download -x - -ARG BUILD_OPTS -ARG EXTRA_LDFLAGS - -# TODO(adamtagscherer): Currently we don't need C libraries but in the future we may need to turn this on once we add -# security libraries, etc. -ENV CGO_ENABLED=0 - -RUN --mount=type=cache,target=/go/pkg/mod \ - --mount=type=cache,target=/root/.cache/go-build \ - --mount=type=bind,source=.,target=/build,ro \ - xx-go build ${BUILD_OPTS} -ldflags="-s -w -extldflags -static ${EXTRA_LDFLAGS}" \ - -o /bin/dirctl ./cli.go - -RUN xx-verify /bin/dirctl - -FROM gcr.io/distroless/static:nonroot@sha256:c0f429e16b13e583da7e5a6ec20dd656d325d88e6819cafe0adb0828976529dc -WORKDIR / -COPY --from=builder /bin/dirctl ./dirctl - -USER 65532:65532 -ENTRYPOINT ["./dirctl"] +# syntax=docker/dockerfile:1@sha256:fe40cf4e92cd0c467be2cfc30657a680ae2398318afd50b0c80585784c604f28 + +# xx is a helper for cross-compilation +FROM --platform=$BUILDPLATFORM tonistiigi/xx:1.4.0@sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4 AS xx + +FROM --platform=$BUILDPLATFORM golang:1.25.2-bookworm@sha256:42d8e9dea06f23d0bfc908826455213ee7f3ed48c43e287a422064220c501be9 AS builder + +COPY --link --from=xx / / + +ARG TARGETPLATFORM + +RUN --mount=type=cache,id=${TARGETPLATFORM}-apt,target=/var/cache/apt,sharing=locked \ + apt-get update \ + && xx-apt-get install -y --no-install-recommends \ + gcc \ + libc6-dev + +WORKDIR /build/cli + +RUN --mount=type=cache,target=/go/pkg/mod \ + --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=bind,source=.,target=/build,ro \ + xx-go mod download -x + +ARG BUILD_OPTS +ARG EXTRA_LDFLAGS + +# TODO(adamtagscherer): Currently we don't need C libraries but in the future we may need to turn this on once we add +# security libraries, etc. +ENV CGO_ENABLED=0 + +RUN --mount=type=cache,target=/go/pkg/mod \ + --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=bind,source=.,target=/build,ro \ + xx-go build ${BUILD_OPTS} -ldflags="-s -w -extldflags -static ${EXTRA_LDFLAGS}" \ + -o /bin/dirctl ./cli.go + +RUN xx-verify /bin/dirctl + +FROM gcr.io/distroless/static:nonroot@sha256:c0f429e16b13e583da7e5a6ec20dd656d325d88e6819cafe0adb0828976529dc +WORKDIR / +COPY --from=builder /bin/dirctl ./dirctl + +USER 65532:65532 +ENTRYPOINT ["./dirctl"] diff --git a/cli/README.md b/cli/README.md index ab26fb5e7..332151a3a 100644 --- a/cli/README.md +++ b/cli/README.md @@ -1,720 +1,720 @@ -# Directory CLI (dirctl) - -The Directory CLI provides comprehensive command-line tools for interacting with the Directory system, including storage, routing, search, and security operations. - -## Installation - -### From Brew Tap -```bash -brew tap agntcy/dir https://github.com/agntcy/dir/ -brew install dirctl -``` - -### From Release Binaries -```bash -# Download from GitHub Releases -curl -L https://github.com/agntcy/dir/releases/latest/download/dirctl-linux-amd64 -o dirctl -chmod +x dirctl -sudo mv dirctl /usr/local/bin/ -``` - -### From Source -```bash -git clone https://github.com/agntcy/dir -cd dir -task build-dirctl -``` - -### From Container -```bash -docker pull ghcr.io/agntcy/dir-ctl:latest -docker run --rm ghcr.io/agntcy/dir-ctl:latest --help -``` - -## Quick Start - -```bash -# 1. Store a record -dirctl push my-agent.json -# Returns: baeareihdr6t7s6sr2q4zo456sza66eewqc7huzatyfgvoupaqyjw23ilvi - -# 2. Publish for network discovery -dirctl routing publish baeareihdr6t7s6sr2q4zo456sza66eewqc7huzatyfgvoupaqyjw23ilvi - -# 3. Search for records -dirctl routing search --skill "AI" --limit 10 - -# 4. Retrieve a record -dirctl pull baeareihdr6t7s6sr2q4zo456sza66eewqc7huzatyfgvoupaqyjw23ilvi -``` - -## Output Formats - -All `dirctl` commands support the `--output` (or `-o`) flag to control output formatting: - -| Format | Description | Use Case | -|--------|-------------|----------| -| `human` | Human-readable, colored output (default) | Interactive terminal use | -| `json` | Pretty-printed JSON | Single-shot commands with `jq` | -| `jsonl` | Newline-delimited JSON | Streaming events with `jq --seq` | -| `raw` | Raw values only (CIDs, IDs) | Shell scripting and piping | - -### Examples - -```bash -# Human-readable (default) -dirctl search --skill "AI" - -# JSON output (pretty-printed) -dirctl search --skill "AI" --output json -dirctl search --skill "AI" -o json # short form - -# JSONL output (streaming-friendly) -dirctl events listen --output jsonl | jq -c . - -# Raw output (CIDs only) -dirctl push record.json --output raw -``` - -### Piping to jq - -For JSON and JSONL formats, metadata messages are automatically sent to stderr, allowing clean piping to tools like `jq`: - -```bash -# Works perfectly - metadata goes to stderr, JSON to stdout -dirctl events listen --output jsonl | jq '.resource_id' - -# Chain with other commands -dirctl routing search --skill "AI" --output json | jq '.[].peer.addrs[]' - -# Process streaming events in real-time -dirctl events listen --output jsonl | jq -c 'select(.type == "EVENT_TYPE_RECORD_PUSHED")' - -# Extract CIDs for processing -dirctl search --skill "AI" --output json | jq -r '.[]' | while read cid; do - dirctl pull "$cid" -done -``` - -## Command Reference - -### 📦 **Storage Operations** - -#### `dirctl push ` -Store records in the content-addressable store. - -**Examples:** -```bash -# Push from file -dirctl push agent-model.json - -# Push from stdin -cat agent-model.json | dirctl push --stdin - -# Push with signature -dirctl push agent-model.json --sign --key private.key -``` - -**Features:** -- Supports OASF v1, v2, v3 record formats -- Content-addressable storage with CID generation -- Optional cryptographic signing -- Data integrity validation - -#### `dirctl pull ` -Retrieve records by their Content Identifier (CID). - -**Examples:** -```bash -# Pull record content -dirctl pull baeareihdr6t7s6sr2q4zo456sza66eewqc7huzatyfgvoupaqyjw23ilvi - -# Pull with signature verification -dirctl pull --signature --public-key public.key -``` - -#### `dirctl delete ` -Remove records from storage. - -**Examples:** -```bash -# Delete a record -dirctl delete baeareihdr6t7s6sr2q4zo456sza66eewqc7huzatyfgvoupaqyjw23ilvi -``` - -#### `dirctl info ` -Display metadata about stored records. - -**Examples:** -```bash -# Show record metadata -dirctl info baeareihdr6t7s6sr2q4zo456sza66eewqc7huzatyfgvoupaqyjw23ilvi -``` - -### 📡 **Routing Operations** - -The routing commands manage record announcement and discovery across the peer-to-peer network. - -#### `dirctl routing publish ` -Announce records to the network for discovery by other peers. - -**Examples:** -```bash -# Publish a record to the network -dirctl routing publish baeareihdr6t7s6sr2q4zo456sza66eewqc7huzatyfgvoupaqyjw23ilvi -``` - -**What it does:** -- Announces record to DHT network -- Makes record discoverable by other peers -- Stores routing metadata locally -- Enables network-wide discovery - -#### `dirctl routing unpublish ` -Remove records from network discovery while keeping them in local storage. - -**Examples:** -```bash -# Remove from network discovery -dirctl routing unpublish baeareihdr6t7s6sr2q4zo456sza66eewqc7huzatyfgvoupaqyjw23ilvi -``` - -**What it does:** -- Removes DHT announcements -- Stops network discovery -- Keeps record in local storage -- Cleans up routing metadata - -#### `dirctl routing list [flags]` -Query local published records with optional filtering. - -**Examples:** -```bash -# List all local published records -dirctl routing list - -# List by skill -dirctl routing list --skill "AI" -dirctl routing list --skill "Natural Language Processing" - -# List by locator type -dirctl routing list --locator "docker-image" - -# Multiple criteria (AND logic) -dirctl routing list --skill "AI" --locator "docker-image" - -# Specific record by CID -dirctl routing list --cid baeareihdr6t7s6sr2q4zo456sza66eewqc7huzatyfgvoupaqyjw23ilvi - -# Limit results -dirctl routing list --skill "AI" --limit 5 -``` - -**Flags:** -- `--skill ` - Filter by skill (repeatable) -- `--locator ` - Filter by locator type (repeatable) -- `--cid ` - List specific record by CID -- `--limit ` - Limit number of results - -#### `dirctl routing search [flags]` -Discover records from other peers across the network. - -**Examples:** -```bash -# Search for AI records across the network -dirctl routing search --skill "AI" - -# Search with multiple criteria -dirctl routing search --skill "AI" --skill "ML" --min-score 2 - -# Search by locator type -dirctl routing search --locator "docker-image" - -# Advanced search with scoring -dirctl routing search --skill "web-development" --limit 10 --min-score 1 -``` - -**Flags:** -- `--skill ` - Search by skill (repeatable) -- `--locator ` - Search by locator type (repeatable) -- `--limit ` - Maximum results to return -- `--min-score ` - Minimum match score threshold - -**Output includes:** -- Record CID and provider peer information -- Match score showing query relevance -- Specific queries that matched -- Peer connection details - -#### `dirctl routing info` -Show routing statistics and summary information. - -**Examples:** -```bash -# Show local routing statistics -dirctl routing info -``` - -**Output includes:** -- Total published records count -- Skills distribution with counts -- Locators distribution with counts -- Helpful usage tips - -### 🔍 **Search & Discovery** - -#### `dirctl search [flags]` -Search for records in the directory. Use `--format` to control output type. - -**Format options:** -- `--format cid` (default) - Return only record CIDs (efficient for piping) -- `--format record` - Return full record data - -**Examples:** -```bash -# Search by record name (returns CIDs by default) -dirctl search --name "my-agent" - -# Search by version -dirctl search --version "v1.0.0" - -# Search by skill name -dirctl search --skill "natural_language_processing" - -# Search by skill ID -dirctl search --skill-id "10201" - -# Complex search with multiple criteria -dirctl search --limit 10 --offset 0 \ - --name "my-agent" \ - --skill "natural_language_processing/natural_language_generation/text_completion" \ - --locator "docker-image:https://example.com/image" - -# Wildcard search examples -dirctl search --name "web*" --version "v1.*" -dirctl search --skill "python*" --skill "*script" - -# Pipe CIDs to other commands -dirctl search --name "web*" --output raw | xargs -I {} dirctl pull {} - -# Get full records as JSON -dirctl search --name "my-agent" --format record --output json - -# Search with comparison operators -dirctl search --version ">=1.0.0" --version "<2.0.0" --format record -dirctl search --created-at ">=2024-01-01" -``` - -**Flags:** -- `--name ` - Search by record name (repeatable, supports wildcards) -- `--version ` - Search by version (repeatable, supports wildcards and comparison operators) -- `--skill ` - Search by skill name (repeatable, supports wildcards) -- `--skill-id ` - Search by skill ID (repeatable) -- `--locator ` - Search by locator type (repeatable, supports wildcards) -- `--module ` - Search by module name (repeatable, supports wildcards) -- `--domain ` - Search by domain name (repeatable, supports wildcards) -- `--domain-id ` - Search by domain ID (repeatable) -- `--author ` - Search by author (repeatable, supports wildcards) -- `--schema-version ` - Search by schema version (repeatable, supports wildcards) -- `--created-at ` - Search by created_at (repeatable, supports comparison operators) -- `--limit ` - Maximum results (default: 100) -- `--offset ` - Result offset for pagination - -### 🔐 **Security & Verification** - -#### `dirctl sign [flags]` -Sign records for integrity and authenticity. - -**Examples:** -```bash -# Sign with private key -dirctl sign --key private.key - -# Sign with OIDC (keyless signing) -dirctl sign --oidc --fulcio-url https://fulcio.example.com -``` - -#### `dirctl verify [flags]` -Verify record signatures. - -**Examples:** -```bash -# Verify with public key -dirctl verify record.json signature.sig --key public.key -``` - -### 📥 **Import Operations** - -Import records from external registries into DIR. Supports automated batch imports from various registry types. - -#### `dirctl import [flags]` -Fetch and import records from external registries. - -**Supported Registries:** -- `mcp` - Model Context Protocol registry v0.1 - -**Examples:** -```bash -# Import from MCP registry -dirctl import --type=mcp --url=https://registry.modelcontextprotocol.io/v0.1 - -# Import with debug output (shows detailed diagnostics for failures) -dirctl import --type=mcp \ - --url=https://registry.modelcontextprotocol.io/v0.1 \ - --debug - -# Force reimport of existing records (skips deduplication) -dirctl import --type=mcp \ - --url=https://registry.modelcontextprotocol.io/v0.1 \ - --force - -# Import with time-based filter -dirctl import --type=mcp \ - --url=https://registry.modelcontextprotocol.io/v0.1 \ - --filter=updated_since=2025-08-07T13:15:04.280Z - -# Combine multiple filters -dirctl import --type=mcp \ - --url=https://registry.modelcontextprotocol.io/v0.1 \ - --filter=search=github \ - --filter=version=latest \ - --filter=updated_since=2025-08-07T13:15:04.280Z - -# Limit number of records -dirctl import --type=mcp \ - --url=https://registry.modelcontextprotocol.io/v0.1 \ - --limit=50 - -# Preview without importing (dry run) -dirctl import --type=mcp \ - --url=https://registry.modelcontextprotocol.io/v0.1 \ - --dry-run -``` - -**Configuration Options:** - -| Flag | Environment Variable | Description | Required | Default | -|------|---------------------|-------------|----------|---------| -| `--type` | - | Registry type (mcp, a2a) | Yes | - | -| `--url` | - | Registry base URL | Yes | - | -| `--filter` | - | Registry-specific filters (key=value, repeatable) | No | - | -| `--limit` | - | Maximum records to import (0 = no limit) | No | 0 | -| `--dry-run` | - | Preview without importing | No | false | -| `--debug` | - | Enable debug output (shows MCP source and OASF record for failures) | No | false | -| `--force` | - | Force reimport of existing records (skip deduplication) | No | false | -| `--enrich` | - | Enable LLM-based enrichment for OASF skills/domains | No | false | -| `--enrich-config` | - | Path to MCPHost configuration file (mcphost.json) | No | importer/enricher/mcphost.json | -| `--enrich-skills-prompt` | - | Optional: path to custom skills prompt template or inline prompt | No | "" (uses default) | -| `--enrich-domains-prompt` | - | Optional: path to custom domains prompt template or inline prompt | No | "" (uses default) | -| `--server-addr` | `DIRECTORY_CLIENT_SERVER_ADDRESS` | DIR server address | No | localhost:8888 | - -**Import Behavior:** - -By default, the importer performs **deduplication** - it builds a cache of existing records (by name and version) and skips importing records that already exist. This prevents duplicate imports when running the import command multiple times. - -- Use `--force` to bypass deduplication and reimport existing records -- Use `--debug` to see detailed output including which records were skipped and why imports failed - -**MCP Registry Filters:** - -For the Model Context Protocol registry, available filters include: -- `search` - Filter by server name (substring match) -- `version` - Filter by version ('latest' for latest version, or an exact version like '1.2.3') -- `updated_since` - Filter by updated time (RFC3339 datetime format, e.g., '2025-08-07T13:15:04.280Z') - -See the [MCP Registry API docs](https://registry.modelcontextprotocol.io/docs#/operations/list-servers#Query-Parameters) for the complete list of supported filters. - -#### LLM-based Enrichment - -The import command supports automatic enrichment of MCP server records using LLM models to map them to appropriate OASF skills and domains. This is powered by [mcphost](https://github.com/mark3labs/mcphost), which provides a Model Context Protocol (MCP) host that can run AI models with tool-calling capabilities. - -**Requirements:** -- `dirctl` binary (includes the built-in MCP server with `agntcy_oasf_get_schema_skills` and `agntcy_oasf_get_schema_domains` tools) -- An LLM model with tool-calling support (GPT-4o, Claude, or compatible Ollama models) - -**How it works:** -1. The enricher starts an MCP server using `dirctl mcp serve` -2. The LLM uses the `agntcy_oasf_get_schema_skills` tool to browse available OASF skills -3. The LLM uses the `agntcy_oasf_get_schema_domains` tool to browse available OASF domains -4. Based on the MCP server description and capabilities, the LLM selects appropriate skills and domains -5. Selected skills and domains replace the defaults in the imported records - -**Setting up mcphost:** - -1. Edit a configuration file (default: `importer/enricher/mcphost.json`): - -```json -{ - "mcpServers": { - "dir-mcp-server": { - "command": "dirctl", - "args": ["mcp", "serve"] - } - }, - "model": "azure:gpt-4o", - "max-tokens": 4096, - "max-steps": 20 -} -``` - -**Recommended LLM providers:** -- `azure:gpt-4o` - Azure OpenAI GPT-4o (recommended for speed and accuracy) -- `ollama:qwen3:8b` - Local Qwen3 via Ollama - -**Environment variables for LLM providers:** -- Azure OpenAI: `AZURE_OPENAI_API_KEY`, `AZURE_OPENAI_ENDPOINT`, `AZURE_OPENAI_DEPLOYMENT` - -**Customizing Enrichment Prompts:** - -The enricher uses separate default prompt templates for skills and domains. You can customize these prompts for specific use cases: - -**Skills Prompt:** -1. **Use default prompt** (recommended): Simply omit the `--enrich-skills-prompt` flag -2. **Custom prompt from file**: `--enrich-skills-prompt=/path/to/custom-skills-prompt.md` -3. **Inline prompt**: `--enrich-skills-prompt="Your custom prompt text..."` - -**Domains Prompt:** -1. **Use default prompt** (recommended): Simply omit the `--enrich-domains-prompt` flag -2. **Custom prompt from file**: `--enrich-domains-prompt=/path/to/custom-domains-prompt.md` -3. **Inline prompt**: `--enrich-domains-prompt="Your custom prompt text..."` - -The default prompt templates are available at: -- Skills: `importer/enricher/enricher.skills.prompt.md` -- Domains: `importer/enricher/enricher.domains.prompt.md` - -These can be used as starting points for customization. - -**Examples:** - -```bash -# Import with LLM enrichment using default config -dirctl import --type=mcp \ - --url=https://registry.modelcontextprotocol.io/v0.1 \ - --enrich \ - --debug - -# Import with custom mcphost configuration -dirctl import --type=mcp \ - --url=https://registry.modelcontextprotocol.io/v0.1 \ - --enrich \ - --enrich-config=/path/to/custom-mcphost.json - -# Import with custom prompt templates (from files) -dirctl import --type=mcp \ - --url=https://registry.modelcontextprotocol.io/v0.1 \ - --enrich \ - --enrich-skills-prompt=/path/to/custom-skills-prompt.md \ - --enrich-domains-prompt=/path/to/custom-domains-prompt.md - -# Import with all custom enrichment settings and debug output -dirctl import --type=mcp \ - --url=https://registry.modelcontextprotocol.io/v0.1 \ - --enrich \ - --enrich-config=/path/to/mcphost.json \ - --enrich-skills-prompt=/path/to/custom-skills-prompt.md \ - --enrich-domains-prompt=/path/to/custom-domains-prompt.md \ - --debug - -# Import latest 10 servers with enrichment and force reimport -dirctl import --type=mcp \ - --url=https://registry.modelcontextprotocol.io/v0.1 \ - --filter=version=latest \ - --limit=10 \ - --enrich \ - --force -``` - -### 🔄 **Synchronization** - -#### `dirctl sync create ` -Create peer-to-peer synchronization. - -**Examples:** -```bash -# Create sync with remote peer -dirctl sync create https://peer.example.com -``` - -#### `dirctl sync list` -List active synchronizations. - -**Examples:** -```bash -# Show all active syncs -dirctl sync list -``` - -#### `dirctl sync status ` -Check synchronization status. - -**Examples:** -```bash -# Check specific sync status -dirctl sync status abc123-def456-ghi789 -``` - -#### `dirctl sync delete ` -Remove synchronization. - -**Examples:** -```bash -# Delete a sync -dirctl sync delete abc123-def456-ghi789 -``` - -## Configuration - -### Server Connection -```bash -# Connect to specific server -dirctl --server-addr localhost:8888 routing list - -# Use environment variable -export DIRECTORY_CLIENT_SERVER_ADDRESS=localhost:8888 -dirctl routing list -``` - -### SPIFFE Authentication -```bash -# Use SPIFFE Workload API -dirctl --spiffe-socket-path /run/spire/sockets/agent.sock routing list -``` - -## Common Workflows - -### 📤 **Publishing Workflow** -```bash -# 1. Store your record (get raw CID for scripting) -CID=$(dirctl push my-agent.json --output raw) - -# 2. Publish for discovery -dirctl routing publish $CID - -# 3. Verify it's published -dirctl routing list --cid $CID - -# 4. Check routing statistics -dirctl routing info - -# 5. Export statistics as JSON -dirctl routing info --output json > stats.json -``` - -### 🔍 **Discovery Workflow** -```bash -# 1. Search for records by skill -dirctl routing search --skill "AI" --limit 10 - -# 2. Search with multiple criteria and get JSON -dirctl routing search --skill "AI" --locator "docker-image" --min-score 2 --output json - -# 3. Pull interesting records -dirctl pull - -# 4. Process search results programmatically -dirctl routing search --skill "AI" --output json | jq -r '.[].record_ref.cid' | while read cid; do - echo "Processing $cid..." - dirctl pull "$cid" --output json > "records/${cid}.json" -done -``` - -### 📥 **Import Workflow** -```bash -# 1. Preview import with dry run -dirctl import --type=mcp \ - --url=https://registry.modelcontextprotocol.io/v0.1 \ - --limit=10 \ - --dry-run - -# 2. Perform actual import with debug output -dirctl import --type=mcp \ - --url=https://registry.modelcontextprotocol.io/v0.1 \ - --filter=updated_since=2025-08-07T13:15:04.280Z \ - --debug - -# 3. Force reimport to update existing records -dirctl import --type=mcp \ - --url=https://registry.modelcontextprotocol.io/v0.1 \ - --limit=10 \ - --force - -# 4. Import with LLM enrichment for better skill mapping -dirctl import --type=mcp \ - --url=https://registry.modelcontextprotocol.io/v0.1 \ - --limit=5 \ - --enrich \ - --debug - -# 5. Search imported records -dirctl search --module "runtime/mcp" -``` - -### 🔄 **Synchronization Workflow** -```bash -# 1. Create sync with remote peer (get raw ID for scripting) -SYNC_ID=$(dirctl sync create https://peer.example.com --output raw) - -# 2. Monitor sync progress -dirctl sync status $SYNC_ID - -# 3. List all syncs -dirctl sync list - -# 4. Export sync list as JSON -dirctl sync list --output json > active-syncs.json - -# 5. Clean up when done -dirctl sync delete $SYNC_ID -``` - -### 📡 **Event Streaming Workflow** -```bash -# 1. Listen to all events (human-readable) -dirctl events listen - -# 2. Stream events as JSONL for processing -dirctl events listen --output jsonl | jq -c . - -# 3. Filter and process specific event types -dirctl events listen --types RECORD_PUSHED --output jsonl | \ - jq -c 'select(.type == "EVENT_TYPE_RECORD_PUSHED")' | \ - while read event; do - CID=$(echo "$event" | jq -r '.resource_id') - echo "New record pushed: $CID" - done - -# 4. Monitor events with label filters -dirctl events listen --labels /skills/AI --output jsonl | \ - jq -c '.resource_id' >> ai-records.log - -# 5. Extract just resource IDs from events -dirctl events listen --output raw | tee event-cids.txt -``` - -## Command Organization - -The CLI follows a clear service-based organization: - -- **Storage**: Direct record management (`push`, `pull`, `delete`, `info`) -- **Routing**: Network announcement and discovery (`routing publish`, `routing list`, `routing search`) -- **Search**: General content search (`search`) -- **Security**: Signing and verification (`sign`, `verify`) -- **Import**: External registry imports (`import`) -- **Sync**: Peer synchronization (`sync`) - -Each command group provides focused functionality with consistent flag patterns and clear separation of concerns. - -## Getting Help - -```bash -# General help -dirctl --help - -# Command group help -dirctl routing --help - -# Specific command help -dirctl routing search --help -``` - -For more advanced usage, troubleshooting, and development workflows, see the [complete documentation](https://docs.agntcy.org/dir/). +# Directory CLI (dirctl) + +The Directory CLI provides comprehensive command-line tools for interacting with the Directory system, including storage, routing, search, and security operations. + +## Installation + +### From Brew Tap +```bash +brew tap agntcy/dir https://github.com/agntcy/dir/ +brew install dirctl +``` + +### From Release Binaries +```bash +# Download from GitHub Releases +curl -L https://github.com/agntcy/dir/releases/latest/download/dirctl-linux-amd64 -o dirctl +chmod +x dirctl +sudo mv dirctl /usr/local/bin/ +``` + +### From Source +```bash +git clone https://github.com/agntcy/dir +cd dir +task build-dirctl +``` + +### From Container +```bash +docker pull ghcr.io/agntcy/dir-ctl:latest +docker run --rm ghcr.io/agntcy/dir-ctl:latest --help +``` + +## Quick Start + +```bash +# 1. Store a record +dirctl push my-agent.json +# Returns: baeareihdr6t7s6sr2q4zo456sza66eewqc7huzatyfgvoupaqyjw23ilvi + +# 2. Publish for network discovery +dirctl routing publish baeareihdr6t7s6sr2q4zo456sza66eewqc7huzatyfgvoupaqyjw23ilvi + +# 3. Search for records +dirctl routing search --skill "AI" --limit 10 + +# 4. Retrieve a record +dirctl pull baeareihdr6t7s6sr2q4zo456sza66eewqc7huzatyfgvoupaqyjw23ilvi +``` + +## Output Formats + +All `dirctl` commands support the `--output` (or `-o`) flag to control output formatting: + +| Format | Description | Use Case | +|--------|-------------|----------| +| `human` | Human-readable, colored output (default) | Interactive terminal use | +| `json` | Pretty-printed JSON | Single-shot commands with `jq` | +| `jsonl` | Newline-delimited JSON | Streaming events with `jq --seq` | +| `raw` | Raw values only (CIDs, IDs) | Shell scripting and piping | + +### Examples + +```bash +# Human-readable (default) +dirctl search --skill "AI" + +# JSON output (pretty-printed) +dirctl search --skill "AI" --output json +dirctl search --skill "AI" -o json # short form + +# JSONL output (streaming-friendly) +dirctl events listen --output jsonl | jq -c . + +# Raw output (CIDs only) +dirctl push record.json --output raw +``` + +### Piping to jq + +For JSON and JSONL formats, metadata messages are automatically sent to stderr, allowing clean piping to tools like `jq`: + +```bash +# Works perfectly - metadata goes to stderr, JSON to stdout +dirctl events listen --output jsonl | jq '.resource_id' + +# Chain with other commands +dirctl routing search --skill "AI" --output json | jq '.[].peer.addrs[]' + +# Process streaming events in real-time +dirctl events listen --output jsonl | jq -c 'select(.type == "EVENT_TYPE_RECORD_PUSHED")' + +# Extract CIDs for processing +dirctl search --skill "AI" --output json | jq -r '.[]' | while read cid; do + dirctl pull "$cid" +done +``` + +## Command Reference + +### 📦 **Storage Operations** + +#### `dirctl push ` +Store records in the content-addressable store. + +**Examples:** +```bash +# Push from file +dirctl push agent-model.json + +# Push from stdin +cat agent-model.json | dirctl push --stdin + +# Push with signature +dirctl push agent-model.json --sign --key private.key +``` + +**Features:** +- Supports OASF v1, v2, v3 record formats +- Content-addressable storage with CID generation +- Optional cryptographic signing +- Data integrity validation + +#### `dirctl pull ` +Retrieve records by their Content Identifier (CID). + +**Examples:** +```bash +# Pull record content +dirctl pull baeareihdr6t7s6sr2q4zo456sza66eewqc7huzatyfgvoupaqyjw23ilvi + +# Pull with signature verification +dirctl pull --signature --public-key public.key +``` + +#### `dirctl delete ` +Remove records from storage. + +**Examples:** +```bash +# Delete a record +dirctl delete baeareihdr6t7s6sr2q4zo456sza66eewqc7huzatyfgvoupaqyjw23ilvi +``` + +#### `dirctl info ` +Display metadata about stored records. + +**Examples:** +```bash +# Show record metadata +dirctl info baeareihdr6t7s6sr2q4zo456sza66eewqc7huzatyfgvoupaqyjw23ilvi +``` + +### 📡 **Routing Operations** + +The routing commands manage record announcement and discovery across the peer-to-peer network. + +#### `dirctl routing publish ` +Announce records to the network for discovery by other peers. + +**Examples:** +```bash +# Publish a record to the network +dirctl routing publish baeareihdr6t7s6sr2q4zo456sza66eewqc7huzatyfgvoupaqyjw23ilvi +``` + +**What it does:** +- Announces record to DHT network +- Makes record discoverable by other peers +- Stores routing metadata locally +- Enables network-wide discovery + +#### `dirctl routing unpublish ` +Remove records from network discovery while keeping them in local storage. + +**Examples:** +```bash +# Remove from network discovery +dirctl routing unpublish baeareihdr6t7s6sr2q4zo456sza66eewqc7huzatyfgvoupaqyjw23ilvi +``` + +**What it does:** +- Removes DHT announcements +- Stops network discovery +- Keeps record in local storage +- Cleans up routing metadata + +#### `dirctl routing list [flags]` +Query local published records with optional filtering. + +**Examples:** +```bash +# List all local published records +dirctl routing list + +# List by skill +dirctl routing list --skill "AI" +dirctl routing list --skill "Natural Language Processing" + +# List by locator type +dirctl routing list --locator "docker-image" + +# Multiple criteria (AND logic) +dirctl routing list --skill "AI" --locator "docker-image" + +# Specific record by CID +dirctl routing list --cid baeareihdr6t7s6sr2q4zo456sza66eewqc7huzatyfgvoupaqyjw23ilvi + +# Limit results +dirctl routing list --skill "AI" --limit 5 +``` + +**Flags:** +- `--skill ` - Filter by skill (repeatable) +- `--locator ` - Filter by locator type (repeatable) +- `--cid ` - List specific record by CID +- `--limit ` - Limit number of results + +#### `dirctl routing search [flags]` +Discover records from other peers across the network. + +**Examples:** +```bash +# Search for AI records across the network +dirctl routing search --skill "AI" + +# Search with multiple criteria +dirctl routing search --skill "AI" --skill "ML" --min-score 2 + +# Search by locator type +dirctl routing search --locator "docker-image" + +# Advanced search with scoring +dirctl routing search --skill "web-development" --limit 10 --min-score 1 +``` + +**Flags:** +- `--skill ` - Search by skill (repeatable) +- `--locator ` - Search by locator type (repeatable) +- `--limit ` - Maximum results to return +- `--min-score ` - Minimum match score threshold + +**Output includes:** +- Record CID and provider peer information +- Match score showing query relevance +- Specific queries that matched +- Peer connection details + +#### `dirctl routing info` +Show routing statistics and summary information. + +**Examples:** +```bash +# Show local routing statistics +dirctl routing info +``` + +**Output includes:** +- Total published records count +- Skills distribution with counts +- Locators distribution with counts +- Helpful usage tips + +### 🔍 **Search & Discovery** + +#### `dirctl search [flags]` +Search for records in the directory. Use `--format` to control output type. + +**Format options:** +- `--format cid` (default) - Return only record CIDs (efficient for piping) +- `--format record` - Return full record data + +**Examples:** +```bash +# Search by record name (returns CIDs by default) +dirctl search --name "my-agent" + +# Search by version +dirctl search --version "v1.0.0" + +# Search by skill name +dirctl search --skill "natural_language_processing" + +# Search by skill ID +dirctl search --skill-id "10201" + +# Complex search with multiple criteria +dirctl search --limit 10 --offset 0 \ + --name "my-agent" \ + --skill "natural_language_processing/natural_language_generation/text_completion" \ + --locator "docker-image:https://example.com/image" + +# Wildcard search examples +dirctl search --name "web*" --version "v1.*" +dirctl search --skill "python*" --skill "*script" + +# Pipe CIDs to other commands +dirctl search --name "web*" --output raw | xargs -I {} dirctl pull {} + +# Get full records as JSON +dirctl search --name "my-agent" --format record --output json + +# Search with comparison operators +dirctl search --version ">=1.0.0" --version "<2.0.0" --format record +dirctl search --created-at ">=2024-01-01" +``` + +**Flags:** +- `--name ` - Search by record name (repeatable, supports wildcards) +- `--version ` - Search by version (repeatable, supports wildcards and comparison operators) +- `--skill ` - Search by skill name (repeatable, supports wildcards) +- `--skill-id ` - Search by skill ID (repeatable) +- `--locator ` - Search by locator type (repeatable, supports wildcards) +- `--module ` - Search by module name (repeatable, supports wildcards) +- `--domain ` - Search by domain name (repeatable, supports wildcards) +- `--domain-id ` - Search by domain ID (repeatable) +- `--author ` - Search by author (repeatable, supports wildcards) +- `--schema-version ` - Search by schema version (repeatable, supports wildcards) +- `--created-at ` - Search by created_at (repeatable, supports comparison operators) +- `--limit ` - Maximum results (default: 100) +- `--offset ` - Result offset for pagination + +### 🔐 **Security & Verification** + +#### `dirctl sign [flags]` +Sign records for integrity and authenticity. + +**Examples:** +```bash +# Sign with private key +dirctl sign --key private.key + +# Sign with OIDC (keyless signing) +dirctl sign --oidc --fulcio-url https://fulcio.example.com +``` + +#### `dirctl verify [flags]` +Verify record signatures. + +**Examples:** +```bash +# Verify with public key +dirctl verify record.json signature.sig --key public.key +``` + +### 📥 **Import Operations** + +Import records from external registries into DIR. Supports automated batch imports from various registry types. + +#### `dirctl import [flags]` +Fetch and import records from external registries. + +**Supported Registries:** +- `mcp` - Model Context Protocol registry v0.1 + +**Examples:** +```bash +# Import from MCP registry +dirctl import --type=mcp --url=https://registry.modelcontextprotocol.io/v0.1 + +# Import with debug output (shows detailed diagnostics for failures) +dirctl import --type=mcp \ + --url=https://registry.modelcontextprotocol.io/v0.1 \ + --debug + +# Force reimport of existing records (skips deduplication) +dirctl import --type=mcp \ + --url=https://registry.modelcontextprotocol.io/v0.1 \ + --force + +# Import with time-based filter +dirctl import --type=mcp \ + --url=https://registry.modelcontextprotocol.io/v0.1 \ + --filter=updated_since=2025-08-07T13:15:04.280Z + +# Combine multiple filters +dirctl import --type=mcp \ + --url=https://registry.modelcontextprotocol.io/v0.1 \ + --filter=search=github \ + --filter=version=latest \ + --filter=updated_since=2025-08-07T13:15:04.280Z + +# Limit number of records +dirctl import --type=mcp \ + --url=https://registry.modelcontextprotocol.io/v0.1 \ + --limit=50 + +# Preview without importing (dry run) +dirctl import --type=mcp \ + --url=https://registry.modelcontextprotocol.io/v0.1 \ + --dry-run +``` + +**Configuration Options:** + +| Flag | Environment Variable | Description | Required | Default | +|------|---------------------|-------------|----------|---------| +| `--type` | - | Registry type (mcp, a2a) | Yes | - | +| `--url` | - | Registry base URL | Yes | - | +| `--filter` | - | Registry-specific filters (key=value, repeatable) | No | - | +| `--limit` | - | Maximum records to import (0 = no limit) | No | 0 | +| `--dry-run` | - | Preview without importing | No | false | +| `--debug` | - | Enable debug output (shows MCP source and OASF record for failures) | No | false | +| `--force` | - | Force reimport of existing records (skip deduplication) | No | false | +| `--enrich` | - | Enable LLM-based enrichment for OASF skills/domains | No | false | +| `--enrich-config` | - | Path to MCPHost configuration file (mcphost.json) | No | importer/enricher/mcphost.json | +| `--enrich-skills-prompt` | - | Optional: path to custom skills prompt template or inline prompt | No | "" (uses default) | +| `--enrich-domains-prompt` | - | Optional: path to custom domains prompt template or inline prompt | No | "" (uses default) | +| `--server-addr` | `DIRECTORY_CLIENT_SERVER_ADDRESS` | DIR server address | No | localhost:8888 | + +**Import Behavior:** + +By default, the importer performs **deduplication** - it builds a cache of existing records (by name and version) and skips importing records that already exist. This prevents duplicate imports when running the import command multiple times. + +- Use `--force` to bypass deduplication and reimport existing records +- Use `--debug` to see detailed output including which records were skipped and why imports failed + +**MCP Registry Filters:** + +For the Model Context Protocol registry, available filters include: +- `search` - Filter by server name (substring match) +- `version` - Filter by version ('latest' for latest version, or an exact version like '1.2.3') +- `updated_since` - Filter by updated time (RFC3339 datetime format, e.g., '2025-08-07T13:15:04.280Z') + +See the [MCP Registry API docs](https://registry.modelcontextprotocol.io/docs#/operations/list-servers#Query-Parameters) for the complete list of supported filters. + +#### LLM-based Enrichment + +The import command supports automatic enrichment of MCP server records using LLM models to map them to appropriate OASF skills and domains. This is powered by [mcphost](https://github.com/mark3labs/mcphost), which provides a Model Context Protocol (MCP) host that can run AI models with tool-calling capabilities. + +**Requirements:** +- `dirctl` binary (includes the built-in MCP server with `agntcy_oasf_get_schema_skills` and `agntcy_oasf_get_schema_domains` tools) +- An LLM model with tool-calling support (GPT-4o, Claude, or compatible Ollama models) + +**How it works:** +1. The enricher starts an MCP server using `dirctl mcp serve` +2. The LLM uses the `agntcy_oasf_get_schema_skills` tool to browse available OASF skills +3. The LLM uses the `agntcy_oasf_get_schema_domains` tool to browse available OASF domains +4. Based on the MCP server description and capabilities, the LLM selects appropriate skills and domains +5. Selected skills and domains replace the defaults in the imported records + +**Setting up mcphost:** + +1. Edit a configuration file (default: `importer/enricher/mcphost.json`): + +```json +{ + "mcpServers": { + "dir-mcp-server": { + "command": "dirctl", + "args": ["mcp", "serve"] + } + }, + "model": "azure:gpt-4o", + "max-tokens": 4096, + "max-steps": 20 +} +``` + +**Recommended LLM providers:** +- `azure:gpt-4o` - Azure OpenAI GPT-4o (recommended for speed and accuracy) +- `ollama:qwen3:8b` - Local Qwen3 via Ollama + +**Environment variables for LLM providers:** +- Azure OpenAI: `AZURE_OPENAI_API_KEY`, `AZURE_OPENAI_ENDPOINT`, `AZURE_OPENAI_DEPLOYMENT` + +**Customizing Enrichment Prompts:** + +The enricher uses separate default prompt templates for skills and domains. You can customize these prompts for specific use cases: + +**Skills Prompt:** +1. **Use default prompt** (recommended): Simply omit the `--enrich-skills-prompt` flag +2. **Custom prompt from file**: `--enrich-skills-prompt=/path/to/custom-skills-prompt.md` +3. **Inline prompt**: `--enrich-skills-prompt="Your custom prompt text..."` + +**Domains Prompt:** +1. **Use default prompt** (recommended): Simply omit the `--enrich-domains-prompt` flag +2. **Custom prompt from file**: `--enrich-domains-prompt=/path/to/custom-domains-prompt.md` +3. **Inline prompt**: `--enrich-domains-prompt="Your custom prompt text..."` + +The default prompt templates are available at: +- Skills: `importer/enricher/enricher.skills.prompt.md` +- Domains: `importer/enricher/enricher.domains.prompt.md` + +These can be used as starting points for customization. + +**Examples:** + +```bash +# Import with LLM enrichment using default config +dirctl import --type=mcp \ + --url=https://registry.modelcontextprotocol.io/v0.1 \ + --enrich \ + --debug + +# Import with custom mcphost configuration +dirctl import --type=mcp \ + --url=https://registry.modelcontextprotocol.io/v0.1 \ + --enrich \ + --enrich-config=/path/to/custom-mcphost.json + +# Import with custom prompt templates (from files) +dirctl import --type=mcp \ + --url=https://registry.modelcontextprotocol.io/v0.1 \ + --enrich \ + --enrich-skills-prompt=/path/to/custom-skills-prompt.md \ + --enrich-domains-prompt=/path/to/custom-domains-prompt.md + +# Import with all custom enrichment settings and debug output +dirctl import --type=mcp \ + --url=https://registry.modelcontextprotocol.io/v0.1 \ + --enrich \ + --enrich-config=/path/to/mcphost.json \ + --enrich-skills-prompt=/path/to/custom-skills-prompt.md \ + --enrich-domains-prompt=/path/to/custom-domains-prompt.md \ + --debug + +# Import latest 10 servers with enrichment and force reimport +dirctl import --type=mcp \ + --url=https://registry.modelcontextprotocol.io/v0.1 \ + --filter=version=latest \ + --limit=10 \ + --enrich \ + --force +``` + +### 🔄 **Synchronization** + +#### `dirctl sync create ` +Create peer-to-peer synchronization. + +**Examples:** +```bash +# Create sync with remote peer +dirctl sync create https://peer.example.com +``` + +#### `dirctl sync list` +List active synchronizations. + +**Examples:** +```bash +# Show all active syncs +dirctl sync list +``` + +#### `dirctl sync status ` +Check synchronization status. + +**Examples:** +```bash +# Check specific sync status +dirctl sync status abc123-def456-ghi789 +``` + +#### `dirctl sync delete ` +Remove synchronization. + +**Examples:** +```bash +# Delete a sync +dirctl sync delete abc123-def456-ghi789 +``` + +## Configuration + +### Server Connection +```bash +# Connect to specific server +dirctl --server-addr localhost:8888 routing list + +# Use environment variable +export DIRECTORY_CLIENT_SERVER_ADDRESS=localhost:8888 +dirctl routing list +``` + +### SPIFFE Authentication +```bash +# Use SPIFFE Workload API +dirctl --spiffe-socket-path /run/spire/sockets/agent.sock routing list +``` + +## Common Workflows + +### 📤 **Publishing Workflow** +```bash +# 1. Store your record (get raw CID for scripting) +CID=$(dirctl push my-agent.json --output raw) + +# 2. Publish for discovery +dirctl routing publish $CID + +# 3. Verify it's published +dirctl routing list --cid $CID + +# 4. Check routing statistics +dirctl routing info + +# 5. Export statistics as JSON +dirctl routing info --output json > stats.json +``` + +### 🔍 **Discovery Workflow** +```bash +# 1. Search for records by skill +dirctl routing search --skill "AI" --limit 10 + +# 2. Search with multiple criteria and get JSON +dirctl routing search --skill "AI" --locator "docker-image" --min-score 2 --output json + +# 3. Pull interesting records +dirctl pull + +# 4. Process search results programmatically +dirctl routing search --skill "AI" --output json | jq -r '.[].record_ref.cid' | while read cid; do + echo "Processing $cid..." + dirctl pull "$cid" --output json > "records/${cid}.json" +done +``` + +### 📥 **Import Workflow** +```bash +# 1. Preview import with dry run +dirctl import --type=mcp \ + --url=https://registry.modelcontextprotocol.io/v0.1 \ + --limit=10 \ + --dry-run + +# 2. Perform actual import with debug output +dirctl import --type=mcp \ + --url=https://registry.modelcontextprotocol.io/v0.1 \ + --filter=updated_since=2025-08-07T13:15:04.280Z \ + --debug + +# 3. Force reimport to update existing records +dirctl import --type=mcp \ + --url=https://registry.modelcontextprotocol.io/v0.1 \ + --limit=10 \ + --force + +# 4. Import with LLM enrichment for better skill mapping +dirctl import --type=mcp \ + --url=https://registry.modelcontextprotocol.io/v0.1 \ + --limit=5 \ + --enrich \ + --debug + +# 5. Search imported records +dirctl search --module "runtime/mcp" +``` + +### 🔄 **Synchronization Workflow** +```bash +# 1. Create sync with remote peer (get raw ID for scripting) +SYNC_ID=$(dirctl sync create https://peer.example.com --output raw) + +# 2. Monitor sync progress +dirctl sync status $SYNC_ID + +# 3. List all syncs +dirctl sync list + +# 4. Export sync list as JSON +dirctl sync list --output json > active-syncs.json + +# 5. Clean up when done +dirctl sync delete $SYNC_ID +``` + +### 📡 **Event Streaming Workflow** +```bash +# 1. Listen to all events (human-readable) +dirctl events listen + +# 2. Stream events as JSONL for processing +dirctl events listen --output jsonl | jq -c . + +# 3. Filter and process specific event types +dirctl events listen --types RECORD_PUSHED --output jsonl | \ + jq -c 'select(.type == "EVENT_TYPE_RECORD_PUSHED")' | \ + while read event; do + CID=$(echo "$event" | jq -r '.resource_id') + echo "New record pushed: $CID" + done + +# 4. Monitor events with label filters +dirctl events listen --labels /skills/AI --output jsonl | \ + jq -c '.resource_id' >> ai-records.log + +# 5. Extract just resource IDs from events +dirctl events listen --output raw | tee event-cids.txt +``` + +## Command Organization + +The CLI follows a clear service-based organization: + +- **Storage**: Direct record management (`push`, `pull`, `delete`, `info`) +- **Routing**: Network announcement and discovery (`routing publish`, `routing list`, `routing search`) +- **Search**: General content search (`search`) +- **Security**: Signing and verification (`sign`, `verify`) +- **Import**: External registry imports (`import`) +- **Sync**: Peer synchronization (`sync`) + +Each command group provides focused functionality with consistent flag patterns and clear separation of concerns. + +## Getting Help + +```bash +# General help +dirctl --help + +# Command group help +dirctl routing --help + +# Specific command help +dirctl routing search --help +``` + +For more advanced usage, troubleshooting, and development workflows, see the [complete documentation](https://docs.agntcy.org/dir/). diff --git a/cli/cli.go b/cli/cli.go index 75c67f68b..b63c86952 100644 --- a/cli/cli.go +++ b/cli/cli.go @@ -1,22 +1,22 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package main - -import ( - "context" - "os" - "os/signal" - "syscall" - - "github.com/agntcy/dir/cli/cmd" -) - -func main() { - ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGHUP, syscall.SIGTERM) - - if err := cmd.Run(ctx); err != nil { - cancel() - os.Exit(1) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "context" + "os" + "os/signal" + "syscall" + + "github.com/agntcy/dir/cli/cmd" +) + +func main() { + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGHUP, syscall.SIGTERM) + + if err := cmd.Run(ctx); err != nil { + cancel() + os.Exit(1) + } +} diff --git a/cli/cmd/delete/delete.go b/cli/cmd/delete/delete.go index 8608032c3..b1ec95b3e 100644 --- a/cli/cmd/delete/delete.go +++ b/cli/cmd/delete/delete.go @@ -1,68 +1,68 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:predeclared,wrapcheck -package delete - -import ( - "errors" - "fmt" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/cli/presenter" - ctxUtils "github.com/agntcy/dir/cli/util/context" - "github.com/spf13/cobra" -) - -func init() { - // Add output format flags - presenter.AddOutputFlags(Command) -} - -var Command = &cobra.Command{ - Use: "delete", - Short: "Delete record from Directory store", - Long: `This command deletes a record from the Directory store. - -Usage examples: - -1. Delete a record: - - dirctl delete - -2. Output formats: - - # Delete with JSON confirmation - dirctl delete --output json - - # Delete with raw output for scripting - dirctl delete --output raw - -`, - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) != 1 { - return errors.New("cid is a required argument") - } - - return runCommand(cmd, args[0]) - }, -} - -func runCommand(cmd *cobra.Command, cid string) error { - // Get the client from the context. - c, ok := ctxUtils.GetClientFromContext(cmd.Context()) - if !ok { - return errors.New("failed to get client from context") - } - - // Delete object from store - err := c.Delete(cmd.Context(), &corev1.RecordRef{ - Cid: cid, - }) - if err != nil { - return fmt.Errorf("failed to delete record: %w", err) - } - - // Output in the appropriate format - return presenter.PrintMessage(cmd, "record", "Deleted record with CID", cid) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:predeclared,wrapcheck +package delete + +import ( + "errors" + "fmt" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/cli/presenter" + ctxUtils "github.com/agntcy/dir/cli/util/context" + "github.com/spf13/cobra" +) + +func init() { + // Add output format flags + presenter.AddOutputFlags(Command) +} + +var Command = &cobra.Command{ + Use: "delete", + Short: "Delete record from Directory store", + Long: `This command deletes a record from the Directory store. + +Usage examples: + +1. Delete a record: + + dirctl delete + +2. Output formats: + + # Delete with JSON confirmation + dirctl delete --output json + + # Delete with raw output for scripting + dirctl delete --output raw + +`, + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 1 { + return errors.New("cid is a required argument") + } + + return runCommand(cmd, args[0]) + }, +} + +func runCommand(cmd *cobra.Command, cid string) error { + // Get the client from the context. + c, ok := ctxUtils.GetClientFromContext(cmd.Context()) + if !ok { + return errors.New("failed to get client from context") + } + + // Delete object from store + err := c.Delete(cmd.Context(), &corev1.RecordRef{ + Cid: cid, + }) + if err != nil { + return fmt.Errorf("failed to delete record: %w", err) + } + + // Output in the appropriate format + return presenter.PrintMessage(cmd, "record", "Deleted record with CID", cid) +} diff --git a/cli/cmd/events/events.go b/cli/cmd/events/events.go index d6bc91738..e0e8b3515 100644 --- a/cli/cmd/events/events.go +++ b/cli/cmd/events/events.go @@ -1,46 +1,46 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package events - -import ( - "github.com/agntcy/dir/cli/presenter" - "github.com/spf13/cobra" -) - -var Command = &cobra.Command{ - Use: "events", - Short: "Stream real-time system events", - Long: `Stream real-time events from the Directory system. - -This command allows you to monitor system activity by subscribing to -events from various services (store, routing, sync, signing). - -Examples: - -1. Listen to all events: - dirctl events listen - -2. Filter by event type: - dirctl events listen --types RECORD_PUSHED,RECORD_PUBLISHED - -3. Filter by labels: - dirctl events listen --labels /skills/AI - -4. Output formats: - dirctl events listen --output jsonl # Streaming JSON (one per line) - dirctl events listen --output json # Pretty-printed JSON - dirctl events listen --output raw # Resource IDs only - -Events are delivered from subscription time forward (no history). -The stream remains active until interrupted (Ctrl+C). -`, -} - -func init() { - // Add subcommands - Command.AddCommand(listenCmd) - - // Add output format flags - presenter.AddOutputFlags(listenCmd) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "github.com/agntcy/dir/cli/presenter" + "github.com/spf13/cobra" +) + +var Command = &cobra.Command{ + Use: "events", + Short: "Stream real-time system events", + Long: `Stream real-time events from the Directory system. + +This command allows you to monitor system activity by subscribing to +events from various services (store, routing, sync, signing). + +Examples: + +1. Listen to all events: + dirctl events listen + +2. Filter by event type: + dirctl events listen --types RECORD_PUSHED,RECORD_PUBLISHED + +3. Filter by labels: + dirctl events listen --labels /skills/AI + +4. Output formats: + dirctl events listen --output jsonl # Streaming JSON (one per line) + dirctl events listen --output json # Pretty-printed JSON + dirctl events listen --output raw # Resource IDs only + +Events are delivered from subscription time forward (no history). +The stream remains active until interrupted (Ctrl+C). +`, +} + +func init() { + // Add subcommands + Command.AddCommand(listenCmd) + + // Add output format flags + presenter.AddOutputFlags(listenCmd) +} diff --git a/cli/cmd/events/listen.go b/cli/cmd/events/listen.go index 4a20e8623..6836be710 100644 --- a/cli/cmd/events/listen.go +++ b/cli/cmd/events/listen.go @@ -1,224 +1,224 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package events - -import ( - "encoding/json" - "errors" - "fmt" - "strings" - - eventsv1 "github.com/agntcy/dir/api/events/v1" - "github.com/agntcy/dir/cli/presenter" - ctxUtils "github.com/agntcy/dir/cli/util/context" - "github.com/spf13/cobra" -) - -var listenCmd = &cobra.Command{ - Use: "listen", - Short: "Listen to real-time system events", - Long: `Listen to real-time system events with optional filtering. - -Events are streamed from the Directory server in real-time. -Only events occurring after subscription are delivered (no history). -The stream remains active until interrupted (Ctrl+C). - -Examples: - -1. Listen to all events: - dirctl events listen - -2. Filter by specific event types: - dirctl events listen --types RECORD_PUSHED,RECORD_PUBLISHED - -3. Filter by labels (AI-related records): - dirctl events listen --labels /skills/AI - -4. Filter by CID: - dirctl events listen --cids bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi - -5. Combine filters: - dirctl events listen --types RECORD_PUSHED --labels /skills/AI --output jsonl - -Available event types: -- Store: RECORD_PUSHED, RECORD_PULLED, RECORD_DELETED -- Routing: RECORD_PUBLISHED, RECORD_UNPUBLISHED -- Sync: SYNC_CREATED, SYNC_COMPLETED, SYNC_FAILED -- Sign: RECORD_SIGNED -`, - RunE: func(cmd *cobra.Command, _ []string) error { - return runListenCommand(cmd) - }, -} - -// Listen command options. -var listenOpts struct { - EventTypes []string - LabelFilters []string - CIDFilters []string -} - -func init() { - listenCmd.Flags().StringArrayVar(&listenOpts.EventTypes, "types", nil, - "Event types to filter (e.g., --types RECORD_PUSHED,RECORD_PUBLISHED)") - listenCmd.Flags().StringArrayVar(&listenOpts.LabelFilters, "labels", nil, - "Label filters (e.g., --labels /skills/AI --labels /domains/research)") - listenCmd.Flags().StringArrayVar(&listenOpts.CIDFilters, "cids", nil, - "CID filters (e.g., --cids bafyxxx)") -} - -func runListenCommand(cmd *cobra.Command) error { - // Get client from context - c, ok := ctxUtils.GetClientFromContext(cmd.Context()) - if !ok { - return errors.New("failed to get client from context") - } - - // Parse event types from strings to enums - eventTypes, err := parseEventTypes(listenOpts.EventTypes) - if err != nil { - return fmt.Errorf("invalid event types: %w", err) - } - - // Build request - req := &eventsv1.ListenRequest{ - EventTypes: eventTypes, - LabelFilters: listenOpts.LabelFilters, - CidFilters: listenOpts.CIDFilters, - } - - // Start listening - result, err := c.ListenStream(cmd.Context(), req) - if err != nil { - return fmt.Errorf("failed to start event stream: %w", err) - } - - // Show metadata only in human format (route to stderr for structured formats) - opts := presenter.GetOutputOptions(cmd) - if opts.Format == presenter.FormatHuman { - presenter.Printf(cmd, "Listening to events (press Ctrl+C to stop)...\n") - - if len(eventTypes) > 0 { - presenter.Printf(cmd, "Event types: %v\n", listenOpts.EventTypes) - } - - if len(listenOpts.LabelFilters) > 0 { - presenter.Printf(cmd, "Label filters: %v\n", listenOpts.LabelFilters) - } - - if len(listenOpts.CIDFilters) > 0 { - presenter.Printf(cmd, "CID filters: %v\n", listenOpts.CIDFilters) - } - - presenter.Printf(cmd, "\n") - } - - // Stream events using StreamResult pattern - for { - select { - case resp := <-result.ResCh(): - event := resp.GetEvent() - if event != nil { - displayEvent(cmd, event) - } - case err := <-result.ErrCh(): - return fmt.Errorf("error receiving event: %w", err) - case <-result.DoneCh(): - // Stream ended normally - return nil - case <-cmd.Context().Done(): - // Return unwrapped context error so callers can check for context.Canceled - //nolint:wrapcheck - return cmd.Context().Err() - } - } -} - -// displayEvent formats and displays an event. -func displayEvent(cmd *cobra.Command, event *eventsv1.Event) { - // Get output options - opts := presenter.GetOutputOptions(cmd) - - switch opts.Format { - case presenter.FormatJSON: - // Pretty-printed JSON - data, err := json.MarshalIndent(event, "", " ") - if err != nil { - presenter.Errorf(cmd, "Error marshaling event: %v\n", err) - - return - } - - presenter.Printf(cmd, "%s\n", string(data)) - - case presenter.FormatJSONL: - // Compact JSON for streaming (newline-delimited) - data, err := json.Marshal(event) - if err != nil { - presenter.Errorf(cmd, "Error marshaling event: %v\n", err) - - return - } - - presenter.Printf(cmd, "%s\n", string(data)) - - case presenter.FormatRaw: - // Just print resource ID - presenter.Printf(cmd, "%s\n", event.GetResourceId()) - - case presenter.FormatHuman: - // Human-readable format - eventType := strings.TrimPrefix(event.GetType().String(), "EVENT_TYPE_") - - presenter.Printf(cmd, "[%s] %s: %s", - event.GetTimestamp().AsTime().Format("15:04:05"), - eventType, - event.GetResourceId()) - - if len(event.GetLabels()) > 0 { - presenter.Printf(cmd, " (labels: %s)", strings.Join(event.GetLabels(), ", ")) - } - - if len(event.GetMetadata()) > 0 { - presenter.Printf(cmd, " %v", event.GetMetadata()) - } - - presenter.Printf(cmd, "\n") - } -} - -// parseEventTypes converts string event type names to enum values. -func parseEventTypes(typeStrings []string) ([]eventsv1.EventType, error) { - if len(typeStrings) == 0 { - return nil, nil - } - - var eventTypes []eventsv1.EventType - - for _, typeStr := range typeStrings { - // Handle comma-separated values - parts := strings.Split(typeStr, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "" { - continue - } - - // Add EVENT_TYPE_ prefix if not present - if !strings.HasPrefix(part, "EVENT_TYPE_") { - part = "EVENT_TYPE_" + part - } - - // Parse enum value - enumValue, ok := eventsv1.EventType_value[part] - if !ok { - return nil, fmt.Errorf("unknown event type: %s (use one of: RECORD_PUSHED, RECORD_PULLED, etc.)", part) - } - - eventTypes = append(eventTypes, eventsv1.EventType(enumValue)) - } - } - - return eventTypes, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + + eventsv1 "github.com/agntcy/dir/api/events/v1" + "github.com/agntcy/dir/cli/presenter" + ctxUtils "github.com/agntcy/dir/cli/util/context" + "github.com/spf13/cobra" +) + +var listenCmd = &cobra.Command{ + Use: "listen", + Short: "Listen to real-time system events", + Long: `Listen to real-time system events with optional filtering. + +Events are streamed from the Directory server in real-time. +Only events occurring after subscription are delivered (no history). +The stream remains active until interrupted (Ctrl+C). + +Examples: + +1. Listen to all events: + dirctl events listen + +2. Filter by specific event types: + dirctl events listen --types RECORD_PUSHED,RECORD_PUBLISHED + +3. Filter by labels (AI-related records): + dirctl events listen --labels /skills/AI + +4. Filter by CID: + dirctl events listen --cids bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi + +5. Combine filters: + dirctl events listen --types RECORD_PUSHED --labels /skills/AI --output jsonl + +Available event types: +- Store: RECORD_PUSHED, RECORD_PULLED, RECORD_DELETED +- Routing: RECORD_PUBLISHED, RECORD_UNPUBLISHED +- Sync: SYNC_CREATED, SYNC_COMPLETED, SYNC_FAILED +- Sign: RECORD_SIGNED +`, + RunE: func(cmd *cobra.Command, _ []string) error { + return runListenCommand(cmd) + }, +} + +// Listen command options. +var listenOpts struct { + EventTypes []string + LabelFilters []string + CIDFilters []string +} + +func init() { + listenCmd.Flags().StringArrayVar(&listenOpts.EventTypes, "types", nil, + "Event types to filter (e.g., --types RECORD_PUSHED,RECORD_PUBLISHED)") + listenCmd.Flags().StringArrayVar(&listenOpts.LabelFilters, "labels", nil, + "Label filters (e.g., --labels /skills/AI --labels /domains/research)") + listenCmd.Flags().StringArrayVar(&listenOpts.CIDFilters, "cids", nil, + "CID filters (e.g., --cids bafyxxx)") +} + +func runListenCommand(cmd *cobra.Command) error { + // Get client from context + c, ok := ctxUtils.GetClientFromContext(cmd.Context()) + if !ok { + return errors.New("failed to get client from context") + } + + // Parse event types from strings to enums + eventTypes, err := parseEventTypes(listenOpts.EventTypes) + if err != nil { + return fmt.Errorf("invalid event types: %w", err) + } + + // Build request + req := &eventsv1.ListenRequest{ + EventTypes: eventTypes, + LabelFilters: listenOpts.LabelFilters, + CidFilters: listenOpts.CIDFilters, + } + + // Start listening + result, err := c.ListenStream(cmd.Context(), req) + if err != nil { + return fmt.Errorf("failed to start event stream: %w", err) + } + + // Show metadata only in human format (route to stderr for structured formats) + opts := presenter.GetOutputOptions(cmd) + if opts.Format == presenter.FormatHuman { + presenter.Printf(cmd, "Listening to events (press Ctrl+C to stop)...\n") + + if len(eventTypes) > 0 { + presenter.Printf(cmd, "Event types: %v\n", listenOpts.EventTypes) + } + + if len(listenOpts.LabelFilters) > 0 { + presenter.Printf(cmd, "Label filters: %v\n", listenOpts.LabelFilters) + } + + if len(listenOpts.CIDFilters) > 0 { + presenter.Printf(cmd, "CID filters: %v\n", listenOpts.CIDFilters) + } + + presenter.Printf(cmd, "\n") + } + + // Stream events using StreamResult pattern + for { + select { + case resp := <-result.ResCh(): + event := resp.GetEvent() + if event != nil { + displayEvent(cmd, event) + } + case err := <-result.ErrCh(): + return fmt.Errorf("error receiving event: %w", err) + case <-result.DoneCh(): + // Stream ended normally + return nil + case <-cmd.Context().Done(): + // Return unwrapped context error so callers can check for context.Canceled + //nolint:wrapcheck + return cmd.Context().Err() + } + } +} + +// displayEvent formats and displays an event. +func displayEvent(cmd *cobra.Command, event *eventsv1.Event) { + // Get output options + opts := presenter.GetOutputOptions(cmd) + + switch opts.Format { + case presenter.FormatJSON: + // Pretty-printed JSON + data, err := json.MarshalIndent(event, "", " ") + if err != nil { + presenter.Errorf(cmd, "Error marshaling event: %v\n", err) + + return + } + + presenter.Printf(cmd, "%s\n", string(data)) + + case presenter.FormatJSONL: + // Compact JSON for streaming (newline-delimited) + data, err := json.Marshal(event) + if err != nil { + presenter.Errorf(cmd, "Error marshaling event: %v\n", err) + + return + } + + presenter.Printf(cmd, "%s\n", string(data)) + + case presenter.FormatRaw: + // Just print resource ID + presenter.Printf(cmd, "%s\n", event.GetResourceId()) + + case presenter.FormatHuman: + // Human-readable format + eventType := strings.TrimPrefix(event.GetType().String(), "EVENT_TYPE_") + + presenter.Printf(cmd, "[%s] %s: %s", + event.GetTimestamp().AsTime().Format("15:04:05"), + eventType, + event.GetResourceId()) + + if len(event.GetLabels()) > 0 { + presenter.Printf(cmd, " (labels: %s)", strings.Join(event.GetLabels(), ", ")) + } + + if len(event.GetMetadata()) > 0 { + presenter.Printf(cmd, " %v", event.GetMetadata()) + } + + presenter.Printf(cmd, "\n") + } +} + +// parseEventTypes converts string event type names to enum values. +func parseEventTypes(typeStrings []string) ([]eventsv1.EventType, error) { + if len(typeStrings) == 0 { + return nil, nil + } + + var eventTypes []eventsv1.EventType + + for _, typeStr := range typeStrings { + // Handle comma-separated values + parts := strings.Split(typeStr, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "" { + continue + } + + // Add EVENT_TYPE_ prefix if not present + if !strings.HasPrefix(part, "EVENT_TYPE_") { + part = "EVENT_TYPE_" + part + } + + // Parse enum value + enumValue, ok := eventsv1.EventType_value[part] + if !ok { + return nil, fmt.Errorf("unknown event type: %s (use one of: RECORD_PUSHED, RECORD_PULLED, etc.)", part) + } + + eventTypes = append(eventTypes, eventsv1.EventType(enumValue)) + } + } + + return eventTypes, nil +} diff --git a/cli/cmd/events/listen_test.go b/cli/cmd/events/listen_test.go index 8b1041dee..cdb432b88 100644 --- a/cli/cmd/events/listen_test.go +++ b/cli/cmd/events/listen_test.go @@ -1,586 +1,586 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package events - -import ( - "bytes" - "strings" - "testing" - - eventsv1 "github.com/agntcy/dir/api/events/v1" - "github.com/spf13/cobra" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/types/known/timestamppb" -) - -// TestParseEventTypes_Empty tests parsing empty input. -func TestParseEventTypes_Empty(t *testing.T) { - result, err := parseEventTypes(nil) - require.NoError(t, err) - assert.Nil(t, result) - - result, err = parseEventTypes([]string{}) - require.NoError(t, err) - assert.Nil(t, result) -} - -// TestParseEventTypes_SingleType tests parsing a single event type. -func TestParseEventTypes_SingleType(t *testing.T) { - tests := []struct { - name string - input []string - expected []eventsv1.EventType - }{ - { - name: "without prefix", - input: []string{"RECORD_PUSHED"}, - expected: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, - }, - { - name: "with prefix", - input: []string{"EVENT_TYPE_RECORD_PUSHED"}, - expected: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, - }, - { - name: "with whitespace", - input: []string{" RECORD_PUSHED "}, - expected: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := parseEventTypes(tt.input) - require.NoError(t, err) - assert.Equal(t, tt.expected, result) - }) - } -} - -// TestParseEventTypes_MultipleTypes tests parsing multiple event types. -func TestParseEventTypes_MultipleTypes(t *testing.T) { - tests := []struct { - name string - input []string - expected []eventsv1.EventType - }{ - { - name: "comma-separated in single string", - input: []string{"RECORD_PUSHED,RECORD_PULLED"}, - expected: []eventsv1.EventType{ - eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - eventsv1.EventType_EVENT_TYPE_RECORD_PULLED, - }, - }, - { - name: "separate strings", - input: []string{"RECORD_PUSHED", "RECORD_PULLED"}, - expected: []eventsv1.EventType{ - eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - eventsv1.EventType_EVENT_TYPE_RECORD_PULLED, - }, - }, - { - name: "mixed with whitespace", - input: []string{"RECORD_PUSHED , RECORD_PULLED "}, - expected: []eventsv1.EventType{ - eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - eventsv1.EventType_EVENT_TYPE_RECORD_PULLED, - }, - }, - { - name: "all store event types", - input: []string{"RECORD_PUSHED,RECORD_PULLED,RECORD_DELETED"}, - expected: []eventsv1.EventType{ - eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - eventsv1.EventType_EVENT_TYPE_RECORD_PULLED, - eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := parseEventTypes(tt.input) - require.NoError(t, err) - assert.Equal(t, tt.expected, result) - }) - } -} - -// TestParseEventTypes_AllEventTypes tests all valid event types. -func TestParseEventTypes_AllEventTypes(t *testing.T) { - tests := []struct { - input string - expected eventsv1.EventType - }{ - {"RECORD_PUSHED", eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, - {"RECORD_PULLED", eventsv1.EventType_EVENT_TYPE_RECORD_PULLED}, - {"RECORD_DELETED", eventsv1.EventType_EVENT_TYPE_RECORD_DELETED}, - {"RECORD_PUBLISHED", eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED}, - {"RECORD_UNPUBLISHED", eventsv1.EventType_EVENT_TYPE_RECORD_UNPUBLISHED}, - {"SYNC_CREATED", eventsv1.EventType_EVENT_TYPE_SYNC_CREATED}, - {"SYNC_COMPLETED", eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED}, - {"SYNC_FAILED", eventsv1.EventType_EVENT_TYPE_SYNC_FAILED}, - {"RECORD_SIGNED", eventsv1.EventType_EVENT_TYPE_RECORD_SIGNED}, - } - - for _, tt := range tests { - t.Run(tt.input, func(t *testing.T) { - result, err := parseEventTypes([]string{tt.input}) - require.NoError(t, err) - require.Len(t, result, 1) - assert.Equal(t, tt.expected, result[0]) - }) - } -} - -// TestParseEventTypes_EmptyStrings tests handling of empty strings. -func TestParseEventTypes_EmptyStrings(t *testing.T) { - tests := []struct { - name string - input []string - expected []eventsv1.EventType - }{ - { - name: "single empty string", - input: []string{""}, - expected: nil, - }, - { - name: "empty string in comma list", - input: []string{"RECORD_PUSHED,,RECORD_PULLED"}, - expected: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, eventsv1.EventType_EVENT_TYPE_RECORD_PULLED}, - }, - { - name: "whitespace only", - input: []string{" "}, - expected: nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := parseEventTypes(tt.input) - require.NoError(t, err) - assert.Equal(t, tt.expected, result) - }) - } -} - -// TestParseEventTypes_InvalidType tests error handling for invalid event types. -func TestParseEventTypes_InvalidType(t *testing.T) { - tests := []struct { - name string - input []string - errContains string - }{ - { - name: "completely invalid", - input: []string{"INVALID_TYPE"}, - errContains: "unknown event type", - }, - { - name: "typo in type", - input: []string{"RECORD_PUSHD"}, - errContains: "unknown event type", - }, - { - name: "invalid in list", - input: []string{"RECORD_PUSHED,INVALID_TYPE"}, - errContains: "unknown event type", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := parseEventTypes(tt.input) - require.Error(t, err) - assert.Nil(t, result) - assert.Contains(t, err.Error(), tt.errContains) - }) - } -} - -// createTestEvent creates a test event for display testing. -func createTestEvent(resourceID string, eventType eventsv1.EventType, labels []string, metadata map[string]string) *eventsv1.Event { - return &eventsv1.Event{ - Id: "test-event-id", - Type: eventType, - ResourceId: resourceID, - Timestamp: timestamppb.Now(), - Labels: labels, - Metadata: metadata, - } -} - -// TestDisplayEvent_FormatJSON tests JSON format output. -func TestDisplayEvent_FormatJSON(t *testing.T) { - event := createTestEvent("test-cid", eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, nil, nil) - - cmd := &cobra.Command{} - cmd.SetOut(&bytes.Buffer{}) - cmd.SetErr(&bytes.Buffer{}) - cmd.Flags().String("output", "json", "") - require.NoError(t, cmd.Flags().Set("output", "json")) - - var stdout bytes.Buffer - cmd.SetOut(&stdout) - - displayEvent(cmd, event) - - output := stdout.String() - assert.Contains(t, output, `"resource_id": "test-cid"`) - assert.Contains(t, output, `"id": "test-event-id"`) - // Pretty-printed JSON should have indentation - assert.Contains(t, output, " ") - assert.Contains(t, output, `"type"`) -} - -// TestDisplayEvent_FormatJSONL tests JSONL format output. -func TestDisplayEvent_FormatJSONL(t *testing.T) { - event := createTestEvent("test-cid", eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, nil, nil) - - cmd := &cobra.Command{} - cmd.SetOut(&bytes.Buffer{}) - cmd.SetErr(&bytes.Buffer{}) - cmd.Flags().String("output", "jsonl", "") - require.NoError(t, cmd.Flags().Set("output", "jsonl")) - - var stdout bytes.Buffer - cmd.SetOut(&stdout) - - displayEvent(cmd, event) - - output := stdout.String() - assert.Contains(t, output, `"resource_id":"test-cid"`) - assert.Contains(t, output, `"id":"test-event-id"`) - assert.Contains(t, output, `"type"`) - // JSONL should be compact (no extra whitespace) - lines := strings.Split(strings.TrimSpace(output), "\n") - assert.Len(t, lines, 1, "JSONL should be single line") -} - -// TestDisplayEvent_FormatRaw tests raw format output. -func TestDisplayEvent_FormatRaw(t *testing.T) { - event := createTestEvent("test-cid-123", eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, nil, nil) - - cmd := &cobra.Command{} - cmd.SetOut(&bytes.Buffer{}) - cmd.SetErr(&bytes.Buffer{}) - cmd.Flags().String("output", "raw", "") - require.NoError(t, cmd.Flags().Set("output", "raw")) - - var stdout bytes.Buffer - cmd.SetOut(&stdout) - - displayEvent(cmd, event) - - output := strings.TrimSpace(stdout.String()) - assert.Equal(t, "test-cid-123", output) -} - -// TestDisplayEvent_FormatHuman tests human-readable format. -func TestDisplayEvent_FormatHuman(t *testing.T) { - tests := []struct { - name string - event *eventsv1.Event - contains []string - }{ - { - name: "basic event", - event: createTestEvent("test-cid", eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, nil, nil), - contains: []string{ - "RECORD_PUSHED", - "test-cid", - }, - }, - { - name: "event with labels", - event: createTestEvent("test-cid", eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, []string{"/skills/AI", "/domains/research"}, nil), - contains: []string{ - "RECORD_PUSHED", - "test-cid", - "labels: /skills/AI, /domains/research", - }, - }, - { - name: "event with metadata", - event: createTestEvent("test-cid", eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, nil, map[string]string{"key": "value"}), - contains: []string{ - "RECORD_PUSHED", - "test-cid", - "key:value", - }, - }, - { - name: "event with labels and metadata", - event: createTestEvent( - "test-cid", - eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - []string{"/skills/AI"}, - map[string]string{"key": "value"}, - ), - contains: []string{ - "RECORD_PUSHED", - "test-cid", - "labels: /skills/AI", - "key:value", - }, - }, - { - name: "different event type", - event: createTestEvent("sync-id", eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED, nil, nil), - contains: []string{ - "SYNC_COMPLETED", - "sync-id", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cmd := &cobra.Command{} - cmd.SetOut(&bytes.Buffer{}) - cmd.SetErr(&bytes.Buffer{}) - cmd.Flags().String("output", "human", "") - require.NoError(t, cmd.Flags().Set("output", "human")) - - var stdout bytes.Buffer - cmd.SetOut(&stdout) - - displayEvent(cmd, tt.event) - - output := stdout.String() - for _, expected := range tt.contains { - assert.Contains(t, output, expected) - } - // Human format should include timestamp - assert.Contains(t, output, ":") - }) - } -} - -// TestDisplayEvent_AllFormats tests that all formats handle the same event. -func TestDisplayEvent_AllFormats(t *testing.T) { - event := createTestEvent( - "test-cid-complete", - eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, - []string{"/skills/ML"}, - map[string]string{"source": "test"}, - ) - - formats := []struct { - format string - check func(t *testing.T, output string) - }{ - { - format: "json", - check: func(t *testing.T, output string) { - t.Helper() - assert.Contains(t, output, "test-cid-complete") - assert.Contains(t, output, `"type"`) - assert.Contains(t, output, "/skills/ML") - }, - }, - { - format: "jsonl", - check: func(t *testing.T, output string) { - t.Helper() - assert.Contains(t, output, "test-cid-complete") - lines := strings.Split(strings.TrimSpace(output), "\n") - assert.Len(t, lines, 1) - }, - }, - { - format: "raw", - check: func(t *testing.T, output string) { - t.Helper() - assert.Equal(t, "test-cid-complete", strings.TrimSpace(output)) - }, - }, - { - format: "human", - check: func(t *testing.T, output string) { - t.Helper() - assert.Contains(t, output, "test-cid-complete") - assert.Contains(t, output, "RECORD_PUBLISHED") - assert.Contains(t, output, "/skills/ML") - }, - }, - } - - for _, tt := range formats { - t.Run(tt.format, func(t *testing.T) { - cmd := &cobra.Command{} - cmd.SetOut(&bytes.Buffer{}) - cmd.SetErr(&bytes.Buffer{}) - cmd.Flags().String("output", tt.format, "") - require.NoError(t, cmd.Flags().Set("output", tt.format)) - - var stdout bytes.Buffer - cmd.SetOut(&stdout) - - displayEvent(cmd, event) - - output := stdout.String() - assert.NotEmpty(t, output) - tt.check(t, output) - }) - } -} - -// TestDisplayEvent_DefaultFormat tests default format is human. -func TestDisplayEvent_DefaultFormat(t *testing.T) { - event := createTestEvent("test-cid", eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, nil, nil) - - cmd := &cobra.Command{} - cmd.SetOut(&bytes.Buffer{}) - cmd.SetErr(&bytes.Buffer{}) - // Don't set output flag - should default to human - - var stdout bytes.Buffer - cmd.SetOut(&stdout) - - displayEvent(cmd, event) - - output := stdout.String() - // Human format should contain event type without EVENT_TYPE_ prefix - assert.Contains(t, output, "RECORD_PUSHED") - assert.Contains(t, output, "test-cid") - // Should have timestamp format - assert.Contains(t, output, ":") -} - -// TestListenCmd_Initialization tests that listenCmd is properly initialized. -func TestListenCmd_Initialization(t *testing.T) { - assert.NotNil(t, listenCmd) - assert.Equal(t, "listen", listenCmd.Use) - assert.NotEmpty(t, listenCmd.Short) - assert.NotEmpty(t, listenCmd.Long) - assert.NotNil(t, listenCmd.RunE) - - // Check flags are registered - typesFlag := listenCmd.Flags().Lookup("types") - assert.NotNil(t, typesFlag) - - labelsFlag := listenCmd.Flags().Lookup("labels") - assert.NotNil(t, labelsFlag) - - cidsFlag := listenCmd.Flags().Lookup("cids") - assert.NotNil(t, cidsFlag) -} - -// TestListenOpts_Structure tests the listenOpts structure. -func TestListenOpts_Structure(t *testing.T) { - // Reset to ensure clean state - listenOpts.EventTypes = []string{"RECORD_PUSHED"} - listenOpts.LabelFilters = []string{"/skills/AI"} - listenOpts.CIDFilters = []string{"test-cid"} - - assert.Equal(t, []string{"RECORD_PUSHED"}, listenOpts.EventTypes) - assert.Equal(t, []string{"/skills/AI"}, listenOpts.LabelFilters) - assert.Equal(t, []string{"test-cid"}, listenOpts.CIDFilters) - - // Reset after test - listenOpts.EventTypes = nil - listenOpts.LabelFilters = nil - listenOpts.CIDFilters = nil -} - -// TestDisplayEvent_ErrorHandling tests error handling in displayEvent. -func TestDisplayEvent_ErrorHandling(t *testing.T) { - tests := []struct { - name string - format string - }{ - {"json error handling", "json"}, - {"jsonl error handling", "jsonl"}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create event (even with nil, the function should handle it gracefully) - event := &eventsv1.Event{ - Id: "test", - Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - ResourceId: "test-cid", - Timestamp: timestamppb.Now(), - } - - cmd := &cobra.Command{} - cmd.SetOut(&bytes.Buffer{}) - - var stderr bytes.Buffer - cmd.SetErr(&stderr) - cmd.Flags().String("output", tt.format, "") - require.NoError(t, cmd.Flags().Set("output", tt.format)) - - // Should not panic - displayEvent(cmd, event) - }) - } -} - -// TestDisplayEvent_HumanWithNoLabelsOrMetadata tests human format with minimal event. -func TestDisplayEvent_HumanWithNoLabelsOrMetadata(t *testing.T) { - event := createTestEvent("minimal-cid", eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, nil, nil) - - cmd := &cobra.Command{} - cmd.SetOut(&bytes.Buffer{}) - cmd.SetErr(&bytes.Buffer{}) - cmd.Flags().String("output", "human", "") - require.NoError(t, cmd.Flags().Set("output", "human")) - - var stdout bytes.Buffer - cmd.SetOut(&stdout) - - displayEvent(cmd, event) - - output := stdout.String() - assert.Contains(t, output, "RECORD_DELETED") - assert.Contains(t, output, "minimal-cid") - // Should not contain labels or metadata sections - assert.NotContains(t, output, "labels:") -} - -// TestDisplayEvent_AllEventTypes tests display for all event types. -func TestDisplayEvent_AllEventTypes(t *testing.T) { - eventTypes := []struct { - eventType eventsv1.EventType - name string - }{ - {eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "RECORD_PUSHED"}, - {eventsv1.EventType_EVENT_TYPE_RECORD_PULLED, "RECORD_PULLED"}, - {eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, "RECORD_DELETED"}, - {eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, "RECORD_PUBLISHED"}, - {eventsv1.EventType_EVENT_TYPE_RECORD_UNPUBLISHED, "RECORD_UNPUBLISHED"}, - {eventsv1.EventType_EVENT_TYPE_SYNC_CREATED, "SYNC_CREATED"}, - {eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED, "SYNC_COMPLETED"}, - {eventsv1.EventType_EVENT_TYPE_SYNC_FAILED, "SYNC_FAILED"}, - {eventsv1.EventType_EVENT_TYPE_RECORD_SIGNED, "RECORD_SIGNED"}, - } - - for _, tt := range eventTypes { - t.Run(tt.name, func(t *testing.T) { - event := createTestEvent("test-cid", tt.eventType, nil, nil) - - cmd := &cobra.Command{} - cmd.SetOut(&bytes.Buffer{}) - cmd.SetErr(&bytes.Buffer{}) - cmd.Flags().String("output", "human", "") - require.NoError(t, cmd.Flags().Set("output", "human")) - - var stdout bytes.Buffer - cmd.SetOut(&stdout) - - displayEvent(cmd, event) - - output := stdout.String() - assert.Contains(t, output, tt.name) - assert.Contains(t, output, "test-cid") - }) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "bytes" + "strings" + "testing" + + eventsv1 "github.com/agntcy/dir/api/events/v1" + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// TestParseEventTypes_Empty tests parsing empty input. +func TestParseEventTypes_Empty(t *testing.T) { + result, err := parseEventTypes(nil) + require.NoError(t, err) + assert.Nil(t, result) + + result, err = parseEventTypes([]string{}) + require.NoError(t, err) + assert.Nil(t, result) +} + +// TestParseEventTypes_SingleType tests parsing a single event type. +func TestParseEventTypes_SingleType(t *testing.T) { + tests := []struct { + name string + input []string + expected []eventsv1.EventType + }{ + { + name: "without prefix", + input: []string{"RECORD_PUSHED"}, + expected: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, + }, + { + name: "with prefix", + input: []string{"EVENT_TYPE_RECORD_PUSHED"}, + expected: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, + }, + { + name: "with whitespace", + input: []string{" RECORD_PUSHED "}, + expected: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := parseEventTypes(tt.input) + require.NoError(t, err) + assert.Equal(t, tt.expected, result) + }) + } +} + +// TestParseEventTypes_MultipleTypes tests parsing multiple event types. +func TestParseEventTypes_MultipleTypes(t *testing.T) { + tests := []struct { + name string + input []string + expected []eventsv1.EventType + }{ + { + name: "comma-separated in single string", + input: []string{"RECORD_PUSHED,RECORD_PULLED"}, + expected: []eventsv1.EventType{ + eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + eventsv1.EventType_EVENT_TYPE_RECORD_PULLED, + }, + }, + { + name: "separate strings", + input: []string{"RECORD_PUSHED", "RECORD_PULLED"}, + expected: []eventsv1.EventType{ + eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + eventsv1.EventType_EVENT_TYPE_RECORD_PULLED, + }, + }, + { + name: "mixed with whitespace", + input: []string{"RECORD_PUSHED , RECORD_PULLED "}, + expected: []eventsv1.EventType{ + eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + eventsv1.EventType_EVENT_TYPE_RECORD_PULLED, + }, + }, + { + name: "all store event types", + input: []string{"RECORD_PUSHED,RECORD_PULLED,RECORD_DELETED"}, + expected: []eventsv1.EventType{ + eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + eventsv1.EventType_EVENT_TYPE_RECORD_PULLED, + eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := parseEventTypes(tt.input) + require.NoError(t, err) + assert.Equal(t, tt.expected, result) + }) + } +} + +// TestParseEventTypes_AllEventTypes tests all valid event types. +func TestParseEventTypes_AllEventTypes(t *testing.T) { + tests := []struct { + input string + expected eventsv1.EventType + }{ + {"RECORD_PUSHED", eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, + {"RECORD_PULLED", eventsv1.EventType_EVENT_TYPE_RECORD_PULLED}, + {"RECORD_DELETED", eventsv1.EventType_EVENT_TYPE_RECORD_DELETED}, + {"RECORD_PUBLISHED", eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED}, + {"RECORD_UNPUBLISHED", eventsv1.EventType_EVENT_TYPE_RECORD_UNPUBLISHED}, + {"SYNC_CREATED", eventsv1.EventType_EVENT_TYPE_SYNC_CREATED}, + {"SYNC_COMPLETED", eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED}, + {"SYNC_FAILED", eventsv1.EventType_EVENT_TYPE_SYNC_FAILED}, + {"RECORD_SIGNED", eventsv1.EventType_EVENT_TYPE_RECORD_SIGNED}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + result, err := parseEventTypes([]string{tt.input}) + require.NoError(t, err) + require.Len(t, result, 1) + assert.Equal(t, tt.expected, result[0]) + }) + } +} + +// TestParseEventTypes_EmptyStrings tests handling of empty strings. +func TestParseEventTypes_EmptyStrings(t *testing.T) { + tests := []struct { + name string + input []string + expected []eventsv1.EventType + }{ + { + name: "single empty string", + input: []string{""}, + expected: nil, + }, + { + name: "empty string in comma list", + input: []string{"RECORD_PUSHED,,RECORD_PULLED"}, + expected: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, eventsv1.EventType_EVENT_TYPE_RECORD_PULLED}, + }, + { + name: "whitespace only", + input: []string{" "}, + expected: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := parseEventTypes(tt.input) + require.NoError(t, err) + assert.Equal(t, tt.expected, result) + }) + } +} + +// TestParseEventTypes_InvalidType tests error handling for invalid event types. +func TestParseEventTypes_InvalidType(t *testing.T) { + tests := []struct { + name string + input []string + errContains string + }{ + { + name: "completely invalid", + input: []string{"INVALID_TYPE"}, + errContains: "unknown event type", + }, + { + name: "typo in type", + input: []string{"RECORD_PUSHD"}, + errContains: "unknown event type", + }, + { + name: "invalid in list", + input: []string{"RECORD_PUSHED,INVALID_TYPE"}, + errContains: "unknown event type", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := parseEventTypes(tt.input) + require.Error(t, err) + assert.Nil(t, result) + assert.Contains(t, err.Error(), tt.errContains) + }) + } +} + +// createTestEvent creates a test event for display testing. +func createTestEvent(resourceID string, eventType eventsv1.EventType, labels []string, metadata map[string]string) *eventsv1.Event { + return &eventsv1.Event{ + Id: "test-event-id", + Type: eventType, + ResourceId: resourceID, + Timestamp: timestamppb.Now(), + Labels: labels, + Metadata: metadata, + } +} + +// TestDisplayEvent_FormatJSON tests JSON format output. +func TestDisplayEvent_FormatJSON(t *testing.T) { + event := createTestEvent("test-cid", eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, nil, nil) + + cmd := &cobra.Command{} + cmd.SetOut(&bytes.Buffer{}) + cmd.SetErr(&bytes.Buffer{}) + cmd.Flags().String("output", "json", "") + require.NoError(t, cmd.Flags().Set("output", "json")) + + var stdout bytes.Buffer + cmd.SetOut(&stdout) + + displayEvent(cmd, event) + + output := stdout.String() + assert.Contains(t, output, `"resource_id": "test-cid"`) + assert.Contains(t, output, `"id": "test-event-id"`) + // Pretty-printed JSON should have indentation + assert.Contains(t, output, " ") + assert.Contains(t, output, `"type"`) +} + +// TestDisplayEvent_FormatJSONL tests JSONL format output. +func TestDisplayEvent_FormatJSONL(t *testing.T) { + event := createTestEvent("test-cid", eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, nil, nil) + + cmd := &cobra.Command{} + cmd.SetOut(&bytes.Buffer{}) + cmd.SetErr(&bytes.Buffer{}) + cmd.Flags().String("output", "jsonl", "") + require.NoError(t, cmd.Flags().Set("output", "jsonl")) + + var stdout bytes.Buffer + cmd.SetOut(&stdout) + + displayEvent(cmd, event) + + output := stdout.String() + assert.Contains(t, output, `"resource_id":"test-cid"`) + assert.Contains(t, output, `"id":"test-event-id"`) + assert.Contains(t, output, `"type"`) + // JSONL should be compact (no extra whitespace) + lines := strings.Split(strings.TrimSpace(output), "\n") + assert.Len(t, lines, 1, "JSONL should be single line") +} + +// TestDisplayEvent_FormatRaw tests raw format output. +func TestDisplayEvent_FormatRaw(t *testing.T) { + event := createTestEvent("test-cid-123", eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, nil, nil) + + cmd := &cobra.Command{} + cmd.SetOut(&bytes.Buffer{}) + cmd.SetErr(&bytes.Buffer{}) + cmd.Flags().String("output", "raw", "") + require.NoError(t, cmd.Flags().Set("output", "raw")) + + var stdout bytes.Buffer + cmd.SetOut(&stdout) + + displayEvent(cmd, event) + + output := strings.TrimSpace(stdout.String()) + assert.Equal(t, "test-cid-123", output) +} + +// TestDisplayEvent_FormatHuman tests human-readable format. +func TestDisplayEvent_FormatHuman(t *testing.T) { + tests := []struct { + name string + event *eventsv1.Event + contains []string + }{ + { + name: "basic event", + event: createTestEvent("test-cid", eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, nil, nil), + contains: []string{ + "RECORD_PUSHED", + "test-cid", + }, + }, + { + name: "event with labels", + event: createTestEvent("test-cid", eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, []string{"/skills/AI", "/domains/research"}, nil), + contains: []string{ + "RECORD_PUSHED", + "test-cid", + "labels: /skills/AI, /domains/research", + }, + }, + { + name: "event with metadata", + event: createTestEvent("test-cid", eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, nil, map[string]string{"key": "value"}), + contains: []string{ + "RECORD_PUSHED", + "test-cid", + "key:value", + }, + }, + { + name: "event with labels and metadata", + event: createTestEvent( + "test-cid", + eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + []string{"/skills/AI"}, + map[string]string{"key": "value"}, + ), + contains: []string{ + "RECORD_PUSHED", + "test-cid", + "labels: /skills/AI", + "key:value", + }, + }, + { + name: "different event type", + event: createTestEvent("sync-id", eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED, nil, nil), + contains: []string{ + "SYNC_COMPLETED", + "sync-id", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cmd := &cobra.Command{} + cmd.SetOut(&bytes.Buffer{}) + cmd.SetErr(&bytes.Buffer{}) + cmd.Flags().String("output", "human", "") + require.NoError(t, cmd.Flags().Set("output", "human")) + + var stdout bytes.Buffer + cmd.SetOut(&stdout) + + displayEvent(cmd, tt.event) + + output := stdout.String() + for _, expected := range tt.contains { + assert.Contains(t, output, expected) + } + // Human format should include timestamp + assert.Contains(t, output, ":") + }) + } +} + +// TestDisplayEvent_AllFormats tests that all formats handle the same event. +func TestDisplayEvent_AllFormats(t *testing.T) { + event := createTestEvent( + "test-cid-complete", + eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, + []string{"/skills/ML"}, + map[string]string{"source": "test"}, + ) + + formats := []struct { + format string + check func(t *testing.T, output string) + }{ + { + format: "json", + check: func(t *testing.T, output string) { + t.Helper() + assert.Contains(t, output, "test-cid-complete") + assert.Contains(t, output, `"type"`) + assert.Contains(t, output, "/skills/ML") + }, + }, + { + format: "jsonl", + check: func(t *testing.T, output string) { + t.Helper() + assert.Contains(t, output, "test-cid-complete") + lines := strings.Split(strings.TrimSpace(output), "\n") + assert.Len(t, lines, 1) + }, + }, + { + format: "raw", + check: func(t *testing.T, output string) { + t.Helper() + assert.Equal(t, "test-cid-complete", strings.TrimSpace(output)) + }, + }, + { + format: "human", + check: func(t *testing.T, output string) { + t.Helper() + assert.Contains(t, output, "test-cid-complete") + assert.Contains(t, output, "RECORD_PUBLISHED") + assert.Contains(t, output, "/skills/ML") + }, + }, + } + + for _, tt := range formats { + t.Run(tt.format, func(t *testing.T) { + cmd := &cobra.Command{} + cmd.SetOut(&bytes.Buffer{}) + cmd.SetErr(&bytes.Buffer{}) + cmd.Flags().String("output", tt.format, "") + require.NoError(t, cmd.Flags().Set("output", tt.format)) + + var stdout bytes.Buffer + cmd.SetOut(&stdout) + + displayEvent(cmd, event) + + output := stdout.String() + assert.NotEmpty(t, output) + tt.check(t, output) + }) + } +} + +// TestDisplayEvent_DefaultFormat tests default format is human. +func TestDisplayEvent_DefaultFormat(t *testing.T) { + event := createTestEvent("test-cid", eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, nil, nil) + + cmd := &cobra.Command{} + cmd.SetOut(&bytes.Buffer{}) + cmd.SetErr(&bytes.Buffer{}) + // Don't set output flag - should default to human + + var stdout bytes.Buffer + cmd.SetOut(&stdout) + + displayEvent(cmd, event) + + output := stdout.String() + // Human format should contain event type without EVENT_TYPE_ prefix + assert.Contains(t, output, "RECORD_PUSHED") + assert.Contains(t, output, "test-cid") + // Should have timestamp format + assert.Contains(t, output, ":") +} + +// TestListenCmd_Initialization tests that listenCmd is properly initialized. +func TestListenCmd_Initialization(t *testing.T) { + assert.NotNil(t, listenCmd) + assert.Equal(t, "listen", listenCmd.Use) + assert.NotEmpty(t, listenCmd.Short) + assert.NotEmpty(t, listenCmd.Long) + assert.NotNil(t, listenCmd.RunE) + + // Check flags are registered + typesFlag := listenCmd.Flags().Lookup("types") + assert.NotNil(t, typesFlag) + + labelsFlag := listenCmd.Flags().Lookup("labels") + assert.NotNil(t, labelsFlag) + + cidsFlag := listenCmd.Flags().Lookup("cids") + assert.NotNil(t, cidsFlag) +} + +// TestListenOpts_Structure tests the listenOpts structure. +func TestListenOpts_Structure(t *testing.T) { + // Reset to ensure clean state + listenOpts.EventTypes = []string{"RECORD_PUSHED"} + listenOpts.LabelFilters = []string{"/skills/AI"} + listenOpts.CIDFilters = []string{"test-cid"} + + assert.Equal(t, []string{"RECORD_PUSHED"}, listenOpts.EventTypes) + assert.Equal(t, []string{"/skills/AI"}, listenOpts.LabelFilters) + assert.Equal(t, []string{"test-cid"}, listenOpts.CIDFilters) + + // Reset after test + listenOpts.EventTypes = nil + listenOpts.LabelFilters = nil + listenOpts.CIDFilters = nil +} + +// TestDisplayEvent_ErrorHandling tests error handling in displayEvent. +func TestDisplayEvent_ErrorHandling(t *testing.T) { + tests := []struct { + name string + format string + }{ + {"json error handling", "json"}, + {"jsonl error handling", "jsonl"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create event (even with nil, the function should handle it gracefully) + event := &eventsv1.Event{ + Id: "test", + Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + ResourceId: "test-cid", + Timestamp: timestamppb.Now(), + } + + cmd := &cobra.Command{} + cmd.SetOut(&bytes.Buffer{}) + + var stderr bytes.Buffer + cmd.SetErr(&stderr) + cmd.Flags().String("output", tt.format, "") + require.NoError(t, cmd.Flags().Set("output", tt.format)) + + // Should not panic + displayEvent(cmd, event) + }) + } +} + +// TestDisplayEvent_HumanWithNoLabelsOrMetadata tests human format with minimal event. +func TestDisplayEvent_HumanWithNoLabelsOrMetadata(t *testing.T) { + event := createTestEvent("minimal-cid", eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, nil, nil) + + cmd := &cobra.Command{} + cmd.SetOut(&bytes.Buffer{}) + cmd.SetErr(&bytes.Buffer{}) + cmd.Flags().String("output", "human", "") + require.NoError(t, cmd.Flags().Set("output", "human")) + + var stdout bytes.Buffer + cmd.SetOut(&stdout) + + displayEvent(cmd, event) + + output := stdout.String() + assert.Contains(t, output, "RECORD_DELETED") + assert.Contains(t, output, "minimal-cid") + // Should not contain labels or metadata sections + assert.NotContains(t, output, "labels:") +} + +// TestDisplayEvent_AllEventTypes tests display for all event types. +func TestDisplayEvent_AllEventTypes(t *testing.T) { + eventTypes := []struct { + eventType eventsv1.EventType + name string + }{ + {eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "RECORD_PUSHED"}, + {eventsv1.EventType_EVENT_TYPE_RECORD_PULLED, "RECORD_PULLED"}, + {eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, "RECORD_DELETED"}, + {eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, "RECORD_PUBLISHED"}, + {eventsv1.EventType_EVENT_TYPE_RECORD_UNPUBLISHED, "RECORD_UNPUBLISHED"}, + {eventsv1.EventType_EVENT_TYPE_SYNC_CREATED, "SYNC_CREATED"}, + {eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED, "SYNC_COMPLETED"}, + {eventsv1.EventType_EVENT_TYPE_SYNC_FAILED, "SYNC_FAILED"}, + {eventsv1.EventType_EVENT_TYPE_RECORD_SIGNED, "RECORD_SIGNED"}, + } + + for _, tt := range eventTypes { + t.Run(tt.name, func(t *testing.T) { + event := createTestEvent("test-cid", tt.eventType, nil, nil) + + cmd := &cobra.Command{} + cmd.SetOut(&bytes.Buffer{}) + cmd.SetErr(&bytes.Buffer{}) + cmd.Flags().String("output", "human", "") + require.NoError(t, cmd.Flags().Set("output", "human")) + + var stdout bytes.Buffer + cmd.SetOut(&stdout) + + displayEvent(cmd, event) + + output := stdout.String() + assert.Contains(t, output, tt.name) + assert.Contains(t, output, "test-cid") + }) + } +} diff --git a/cli/cmd/import/import.go b/cli/cmd/import/import.go index 78e236ee8..f9aac7608 100644 --- a/cli/cmd/import/import.go +++ b/cli/cmd/import/import.go @@ -1,147 +1,147 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package importcmd - -import ( - "errors" - "fmt" - - "github.com/agntcy/dir/cli/presenter" - ctxUtils "github.com/agntcy/dir/cli/util/context" - "github.com/agntcy/dir/importer/config" - "github.com/agntcy/dir/importer/enricher" - _ "github.com/agntcy/dir/importer/mcp" // Import MCP importer to trigger its init() function for auto-registration. - "github.com/agntcy/dir/importer/types" - "github.com/agntcy/dir/importer/types/factory" - "github.com/spf13/cobra" -) - -var ( - cfg config.Config - registryType string -) - -var Command = &cobra.Command{ - Use: "import", - Short: "Import records from external registries", - Long: `Import records from external registries into DIR. - -Supported registries: - - mcp: Model Context Protocol registry v0.1 - -The import command fetches records from the specified registry and pushes -them to DIR. - -Examples: - # Import from MCP registry - dirctl import --type=mcp --url=https://registry.modelcontextprotocol.io - - # Import with filters - # Available filters: https://registry.modelcontextprotocol.io/docs#/operations/list-servers-v0.1#Query-Parameters - dirctl import --type=mcp --url=https://registry.modelcontextprotocol.io --filter=updated_since=2025-08-07T13:15:04.280Z - - # Preview without importing - dirctl import --type=mcp --url=https://registry.modelcontextprotocol.io --dry-run - - # Enable LLM-based enrichment with default configuration - dirctl import --type=mcp --url=https://registry.modelcontextprotocol.io --enrich - - # Use custom MCPHost configuration and prompt template - dirctl import --type=mcp --url=https://registry.modelcontextprotocol.io --enrich \ - --enrich-config=/path/to/mcphost.json \ - --enrich-prompt=/path/to/custom-prompt.md -`, - RunE: func(cmd *cobra.Command, args []string) error { - return runImport(cmd) - }, -} - -func init() { - // Add flags - Command.Flags().StringVar(®istryType, "type", "", "Registry type (mcp, a2a)") - Command.Flags().StringVar(&cfg.RegistryURL, "url", "", "Registry base URL") - Command.Flags().StringToStringVar(&cfg.Filters, "filter", nil, "Filters (key=value)") - Command.Flags().IntVar(&cfg.Limit, "limit", 0, "Maximum number of records to import (0 = no limit)") - Command.Flags().BoolVar(&cfg.DryRun, "dry-run", false, "Preview without importing") - Command.Flags().BoolVar(&cfg.Force, "force", false, "Force push even if record already exists") - Command.Flags().BoolVar(&cfg.Debug, "debug", false, "Enable debug output for deduplication and validation failures") - - Command.Flags().BoolVar(&cfg.Enrich, "enrich", false, "Enrich the records with LLM") - Command.Flags().StringVar(&cfg.EnricherConfigFile, "enrich-config", enricher.DefaultConfigFile, "Path to MCPHost configuration file (mcphost.json)") - Command.Flags().StringVar(&cfg.EnricherSkillsPromptTemplate, "enrich-skills-prompt", "", "Optional: path to custom skills prompt template file or inline prompt (empty = use default)") - Command.Flags().StringVar(&cfg.EnricherDomainsPromptTemplate, "enrich-domains-prompt", "", "Optional: path to custom domains prompt template file or inline prompt (empty = use default)") - - // Mark required flags - Command.MarkFlagRequired("type") //nolint:errcheck - Command.MarkFlagRequired("url") //nolint:errcheck -} - -func runImport(cmd *cobra.Command) error { - // Get the client from the context - c, ok := ctxUtils.GetClientFromContext(cmd.Context()) - if !ok { - return errors.New("failed to get client from context") - } - - // Set the registry type from the string flag - cfg.RegistryType = config.RegistryType(registryType) - - // Validate configuration - if err := cfg.Validate(); err != nil { - return fmt.Errorf("invalid configuration: %w", err) - } - - // Create importer instance from pre-initialized factory, passing client separately - importer, err := factory.Create(c, cfg) - if err != nil { - return fmt.Errorf("failed to create importer: %w", err) - } - - // Run import with progress reporting - presenter.Printf(cmd, "Starting import from %s registry at %s...\n", cfg.RegistryType, cfg.RegistryURL) - - if cfg.DryRun { - presenter.Printf(cmd, "Mode: DRY RUN (preview only)\n") - } - - presenter.Printf(cmd, "\n") - - result, err := importer.Run(cmd.Context(), cfg) - if err != nil { - return fmt.Errorf("import failed: %w", err) - } - - // Print summary - printSummary(cmd, result) - - return nil -} - -func printSummary(cmd *cobra.Command, result *types.ImportResult) { - maxErrors := 10 - - presenter.Printf(cmd, "\n=== Import Summary ===\n") - presenter.Printf(cmd, "Total records: %d\n", result.TotalRecords) - presenter.Printf(cmd, "Imported: %d\n", result.ImportedCount) - presenter.Printf(cmd, "Skipped: %d\n", result.SkippedCount) - presenter.Printf(cmd, "Failed: %d\n", result.FailedCount) - - if len(result.Errors) > 0 { - presenter.Printf(cmd, "\n=== Errors ===\n") - - for i, err := range result.Errors { - if i < maxErrors { // Show only first 10 errors - presenter.Printf(cmd, " - %v\n", err) - } - } - - if len(result.Errors) > maxErrors { - presenter.Printf(cmd, " ... and %d more errors\n", len(result.Errors)-maxErrors) - } - } - - if cfg.DryRun { - presenter.Printf(cmd, "\nNote: This was a dry run. No records were actually imported.\n") - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package importcmd + +import ( + "errors" + "fmt" + + "github.com/agntcy/dir/cli/presenter" + ctxUtils "github.com/agntcy/dir/cli/util/context" + "github.com/agntcy/dir/importer/config" + "github.com/agntcy/dir/importer/enricher" + _ "github.com/agntcy/dir/importer/mcp" // Import MCP importer to trigger its init() function for auto-registration. + "github.com/agntcy/dir/importer/types" + "github.com/agntcy/dir/importer/types/factory" + "github.com/spf13/cobra" +) + +var ( + cfg config.Config + registryType string +) + +var Command = &cobra.Command{ + Use: "import", + Short: "Import records from external registries", + Long: `Import records from external registries into DIR. + +Supported registries: + - mcp: Model Context Protocol registry v0.1 + +The import command fetches records from the specified registry and pushes +them to DIR. + +Examples: + # Import from MCP registry + dirctl import --type=mcp --url=https://registry.modelcontextprotocol.io + + # Import with filters + # Available filters: https://registry.modelcontextprotocol.io/docs#/operations/list-servers-v0.1#Query-Parameters + dirctl import --type=mcp --url=https://registry.modelcontextprotocol.io --filter=updated_since=2025-08-07T13:15:04.280Z + + # Preview without importing + dirctl import --type=mcp --url=https://registry.modelcontextprotocol.io --dry-run + + # Enable LLM-based enrichment with default configuration + dirctl import --type=mcp --url=https://registry.modelcontextprotocol.io --enrich + + # Use custom MCPHost configuration and prompt template + dirctl import --type=mcp --url=https://registry.modelcontextprotocol.io --enrich \ + --enrich-config=/path/to/mcphost.json \ + --enrich-prompt=/path/to/custom-prompt.md +`, + RunE: func(cmd *cobra.Command, args []string) error { + return runImport(cmd) + }, +} + +func init() { + // Add flags + Command.Flags().StringVar(®istryType, "type", "", "Registry type (mcp, a2a)") + Command.Flags().StringVar(&cfg.RegistryURL, "url", "", "Registry base URL") + Command.Flags().StringToStringVar(&cfg.Filters, "filter", nil, "Filters (key=value)") + Command.Flags().IntVar(&cfg.Limit, "limit", 0, "Maximum number of records to import (0 = no limit)") + Command.Flags().BoolVar(&cfg.DryRun, "dry-run", false, "Preview without importing") + Command.Flags().BoolVar(&cfg.Force, "force", false, "Force push even if record already exists") + Command.Flags().BoolVar(&cfg.Debug, "debug", false, "Enable debug output for deduplication and validation failures") + + Command.Flags().BoolVar(&cfg.Enrich, "enrich", false, "Enrich the records with LLM") + Command.Flags().StringVar(&cfg.EnricherConfigFile, "enrich-config", enricher.DefaultConfigFile, "Path to MCPHost configuration file (mcphost.json)") + Command.Flags().StringVar(&cfg.EnricherSkillsPromptTemplate, "enrich-skills-prompt", "", "Optional: path to custom skills prompt template file or inline prompt (empty = use default)") + Command.Flags().StringVar(&cfg.EnricherDomainsPromptTemplate, "enrich-domains-prompt", "", "Optional: path to custom domains prompt template file or inline prompt (empty = use default)") + + // Mark required flags + Command.MarkFlagRequired("type") //nolint:errcheck + Command.MarkFlagRequired("url") //nolint:errcheck +} + +func runImport(cmd *cobra.Command) error { + // Get the client from the context + c, ok := ctxUtils.GetClientFromContext(cmd.Context()) + if !ok { + return errors.New("failed to get client from context") + } + + // Set the registry type from the string flag + cfg.RegistryType = config.RegistryType(registryType) + + // Validate configuration + if err := cfg.Validate(); err != nil { + return fmt.Errorf("invalid configuration: %w", err) + } + + // Create importer instance from pre-initialized factory, passing client separately + importer, err := factory.Create(c, cfg) + if err != nil { + return fmt.Errorf("failed to create importer: %w", err) + } + + // Run import with progress reporting + presenter.Printf(cmd, "Starting import from %s registry at %s...\n", cfg.RegistryType, cfg.RegistryURL) + + if cfg.DryRun { + presenter.Printf(cmd, "Mode: DRY RUN (preview only)\n") + } + + presenter.Printf(cmd, "\n") + + result, err := importer.Run(cmd.Context(), cfg) + if err != nil { + return fmt.Errorf("import failed: %w", err) + } + + // Print summary + printSummary(cmd, result) + + return nil +} + +func printSummary(cmd *cobra.Command, result *types.ImportResult) { + maxErrors := 10 + + presenter.Printf(cmd, "\n=== Import Summary ===\n") + presenter.Printf(cmd, "Total records: %d\n", result.TotalRecords) + presenter.Printf(cmd, "Imported: %d\n", result.ImportedCount) + presenter.Printf(cmd, "Skipped: %d\n", result.SkippedCount) + presenter.Printf(cmd, "Failed: %d\n", result.FailedCount) + + if len(result.Errors) > 0 { + presenter.Printf(cmd, "\n=== Errors ===\n") + + for i, err := range result.Errors { + if i < maxErrors { // Show only first 10 errors + presenter.Printf(cmd, " - %v\n", err) + } + } + + if len(result.Errors) > maxErrors { + presenter.Printf(cmd, " ... and %d more errors\n", len(result.Errors)-maxErrors) + } + } + + if cfg.DryRun { + presenter.Printf(cmd, "\nNote: This was a dry run. No records were actually imported.\n") + } +} diff --git a/cli/cmd/info/info.go b/cli/cmd/info/info.go index fa50e0965..827a924a8 100644 --- a/cli/cmd/info/info.go +++ b/cli/cmd/info/info.go @@ -1,68 +1,68 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:wrapcheck -package info - -import ( - "errors" - "fmt" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/cli/presenter" - ctxUtils "github.com/agntcy/dir/cli/util/context" - "github.com/spf13/cobra" -) - -func init() { - // Add output format flags - presenter.AddOutputFlags(Command) -} - -var Command = &cobra.Command{ - Use: "info", - Short: "Check info about an object in Directory store", - Long: `Lookup and get basic metadata about an object pushed to the Directory store. - -Usage examples: - -1. Get info about a record: - - dirctl info - -2. Output formats: - - # Get info as JSON - dirctl info --output json - - # Get raw info data - dirctl info --output raw - -`, - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) != 1 { - return errors.New("exactly one argument is required which is the cid of the object") - } - - return runCommand(cmd, args[0]) - }, -} - -func runCommand(cmd *cobra.Command, cid string) error { - // Get the client from the context. - c, ok := ctxUtils.GetClientFromContext(cmd.Context()) - if !ok { - return errors.New("failed to get client from context") - } - - // Fetch info from store - info, err := c.Lookup(cmd.Context(), &corev1.RecordRef{ - Cid: cid, - }) - if err != nil { - return fmt.Errorf("failed to pull data: %w", err) - } - - // Output in the appropriate format - return presenter.PrintMessage(cmd, "info", "Record information", info) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:wrapcheck +package info + +import ( + "errors" + "fmt" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/cli/presenter" + ctxUtils "github.com/agntcy/dir/cli/util/context" + "github.com/spf13/cobra" +) + +func init() { + // Add output format flags + presenter.AddOutputFlags(Command) +} + +var Command = &cobra.Command{ + Use: "info", + Short: "Check info about an object in Directory store", + Long: `Lookup and get basic metadata about an object pushed to the Directory store. + +Usage examples: + +1. Get info about a record: + + dirctl info + +2. Output formats: + + # Get info as JSON + dirctl info --output json + + # Get raw info data + dirctl info --output raw + +`, + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 1 { + return errors.New("exactly one argument is required which is the cid of the object") + } + + return runCommand(cmd, args[0]) + }, +} + +func runCommand(cmd *cobra.Command, cid string) error { + // Get the client from the context. + c, ok := ctxUtils.GetClientFromContext(cmd.Context()) + if !ok { + return errors.New("failed to get client from context") + } + + // Fetch info from store + info, err := c.Lookup(cmd.Context(), &corev1.RecordRef{ + Cid: cid, + }) + if err != nil { + return fmt.Errorf("failed to pull data: %w", err) + } + + // Output in the appropriate format + return presenter.PrintMessage(cmd, "info", "Record information", info) +} diff --git a/cli/cmd/mcp/mcp.go b/cli/cmd/mcp/mcp.go index 23f8bbb79..939970e8d 100644 --- a/cli/cmd/mcp/mcp.go +++ b/cli/cmd/mcp/mcp.go @@ -1,32 +1,32 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package mcp - -import ( - "github.com/spf13/cobra" -) - -var Command = &cobra.Command{ - Use: "mcp", - Short: "Model Context Protocol (MCP) server operations", - Long: `Model Context Protocol (MCP) server operations. - -This command group provides access to MCP server functionality: - -- serve: Run the MCP server for Directory operations - -The MCP server enables AI assistants and other tools to interact with -the Directory through a standardized protocol. - -Examples: - -1. Start the MCP server: - dirctl mcp serve -`, -} - -func init() { - // Add MCP subcommands - Command.AddCommand(serveCmd) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package mcp + +import ( + "github.com/spf13/cobra" +) + +var Command = &cobra.Command{ + Use: "mcp", + Short: "Model Context Protocol (MCP) server operations", + Long: `Model Context Protocol (MCP) server operations. + +This command group provides access to MCP server functionality: + +- serve: Run the MCP server for Directory operations + +The MCP server enables AI assistants and other tools to interact with +the Directory through a standardized protocol. + +Examples: + +1. Start the MCP server: + dirctl mcp serve +`, +} + +func init() { + // Add MCP subcommands + Command.AddCommand(serveCmd) +} diff --git a/cli/cmd/mcp/serve.go b/cli/cmd/mcp/serve.go index 43533660e..79058855c 100644 --- a/cli/cmd/mcp/serve.go +++ b/cli/cmd/mcp/serve.go @@ -1,27 +1,27 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package mcp - -import ( - "github.com/agntcy/dir/mcp/server" - "github.com/spf13/cobra" -) - -var serveCmd = &cobra.Command{ - Use: "serve", - Short: "Start the MCP server", - Long: `Start the Model Context Protocol (MCP) server for Directory operations. - -The MCP server enables AI assistants and other tools to interact with -the Directory through a standardized protocol over stdin/stdout. - -Examples: - -1. Start the MCP server: - dirctl mcp serve -`, - RunE: func(cmd *cobra.Command, _ []string) error { - return server.Serve(cmd.Context()) - }, -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package mcp + +import ( + "github.com/agntcy/dir/mcp/server" + "github.com/spf13/cobra" +) + +var serveCmd = &cobra.Command{ + Use: "serve", + Short: "Start the MCP server", + Long: `Start the Model Context Protocol (MCP) server for Directory operations. + +The MCP server enables AI assistants and other tools to interact with +the Directory through a standardized protocol over stdin/stdout. + +Examples: + +1. Start the MCP server: + dirctl mcp serve +`, + RunE: func(cmd *cobra.Command, _ []string) error { + return server.Serve(cmd.Context()) + }, +} diff --git a/cli/cmd/network/info/info.go b/cli/cmd/network/info/info.go index 70924bb4f..514db9223 100644 --- a/cli/cmd/network/info/info.go +++ b/cli/cmd/network/info/info.go @@ -1,79 +1,79 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package info - -import ( - "crypto/ed25519" - "errors" - "fmt" - "os" - - "github.com/agntcy/dir/cli/presenter" - "github.com/libp2p/go-libp2p/core/crypto" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/spf13/cobra" - "golang.org/x/crypto/ssh" -) - -var Command = &cobra.Command{ - Use: "info", - Short: "Generates the peer id from a private key, enabling connection to the DHT network", - Long: `This command requires a private key stored on the host filesystem. From this key -a peer id will be generated that is needed for the host to connect to the network. - -Usage examples: - -1. Get peer id from a private key: - - dirctl network info - -`, - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) != 1 { - return errors.New("expected exactly one argument") - } - - if args[0] == "" { - return errors.New("expected a non-empty argument") - } - - return runCommand(cmd, args[0]) - }, -} - -func runCommand(cmd *cobra.Command, path string) error { - // Read the SSH key file - keyBytes, err := os.ReadFile(path) - if err != nil { - return fmt.Errorf("failed to read key file: %w", err) - } - - // Parse the private key - key, err := ssh.ParseRawPrivateKey(keyBytes) - if err != nil { - return fmt.Errorf("failed to parse private key: %w", err) - } - - // Try to convert to ED25519 private key - ed25519Key, ok := key.(ed25519.PrivateKey) - if !ok { - return errors.New("key is not an ED25519 private key") - } - - // Generate a private key from bytes - generatedKey, err := crypto.UnmarshalEd25519PrivateKey(ed25519Key) - if err != nil { - return fmt.Errorf("failed to unmarshal identity key: %w", err) - } - - // Generate Peer ID from the public key - ID, err := peer.IDFromPublicKey(generatedKey.GetPublic()) - if err != nil { - return fmt.Errorf("failed to generate peer ID from public key: %w", err) - } - - presenter.Printf(cmd, "%s\n", ID) - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package info + +import ( + "crypto/ed25519" + "errors" + "fmt" + "os" + + "github.com/agntcy/dir/cli/presenter" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/spf13/cobra" + "golang.org/x/crypto/ssh" +) + +var Command = &cobra.Command{ + Use: "info", + Short: "Generates the peer id from a private key, enabling connection to the DHT network", + Long: `This command requires a private key stored on the host filesystem. From this key +a peer id will be generated that is needed for the host to connect to the network. + +Usage examples: + +1. Get peer id from a private key: + + dirctl network info + +`, + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 1 { + return errors.New("expected exactly one argument") + } + + if args[0] == "" { + return errors.New("expected a non-empty argument") + } + + return runCommand(cmd, args[0]) + }, +} + +func runCommand(cmd *cobra.Command, path string) error { + // Read the SSH key file + keyBytes, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("failed to read key file: %w", err) + } + + // Parse the private key + key, err := ssh.ParseRawPrivateKey(keyBytes) + if err != nil { + return fmt.Errorf("failed to parse private key: %w", err) + } + + // Try to convert to ED25519 private key + ed25519Key, ok := key.(ed25519.PrivateKey) + if !ok { + return errors.New("key is not an ED25519 private key") + } + + // Generate a private key from bytes + generatedKey, err := crypto.UnmarshalEd25519PrivateKey(ed25519Key) + if err != nil { + return fmt.Errorf("failed to unmarshal identity key: %w", err) + } + + // Generate Peer ID from the public key + ID, err := peer.IDFromPublicKey(generatedKey.GetPublic()) + if err != nil { + return fmt.Errorf("failed to generate peer ID from public key: %w", err) + } + + presenter.Printf(cmd, "%s\n", ID) + + return nil +} diff --git a/cli/cmd/network/init/init.go b/cli/cmd/network/init/init.go index a241fddd7..4e1f77a9d 100644 --- a/cli/cmd/network/init/init.go +++ b/cli/cmd/network/init/init.go @@ -1,106 +1,106 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package init - -import ( - "crypto/ed25519" - "crypto/rand" - "crypto/x509" - "encoding/pem" - "fmt" - "os" - "path/filepath" - - "github.com/agntcy/dir/cli/presenter" - "github.com/libp2p/go-libp2p/core/crypto" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/spf13/cobra" -) - -var Command = &cobra.Command{ - Use: "init", - Short: "Generates the peer id from a newly generated private key, enabling connection to the DHT network", - Long: `This command generates a peer id from a newly generated private key. From this key -a peer id will be generated that is needed for the host to connect to the network. - -Usage examples: - -1. Generate peer id from a newly generated private key and save the key to the default location (~/.agntcy/dir/generated.key): - - dirctl network init - -2. Generate peer id from a newly generated private key and save the key to a file: - - dirctl network init --output /path/to/private/key.pem - -`, - RunE: func(cmd *cobra.Command, _ []string) error { - return runCommand(cmd) - }, -} - -func runCommand(cmd *cobra.Command) error { - publicKey, privateKey, err := GenerateED25519OpenSSLKey() - if err != nil { - return fmt.Errorf("failed to generate ED25519 key pair: %w", err) - } - - var filePath string - if opts.Output != "" { - filePath = opts.Output - } else { - homeDir, err := os.UserHomeDir() - if err != nil { - return fmt.Errorf("error getting home directory: %w", err) - } - - filePath = filepath.Join(homeDir, ".agntcy/dir/generated.key") - } - - err = os.MkdirAll(filepath.Dir(filePath), 0o0755) //nolint:mnd - if err != nil { - return fmt.Errorf("failed to create directory: %w", err) - } - - err = os.WriteFile(filePath, privateKey, 0o600) //nolint:mnd - if err != nil { - return fmt.Errorf("failed to write private key to file: %w", err) - } - - pubKey, err := crypto.UnmarshalEd25519PublicKey(publicKey) - if err != nil { - return fmt.Errorf("failed to unmarshal public key: %w", err) - } - - ID, err := peer.IDFromPublicKey(pubKey) - if err != nil { - return fmt.Errorf("failed to generate peer ID from public key: %w", err) - } - - presenter.Printf(cmd, "%s\n", ID) - - return nil -} - -func GenerateED25519OpenSSLKey() (ed25519.PublicKey, []byte, error) { - // Generate ED25519 key pair - publicKey, privateKey, err := ed25519.GenerateKey(rand.Reader) - if err != nil { - return nil, nil, fmt.Errorf("failed to generate ED25519 key pair: %w", err) - } - - // Marshal the private key to PKCS#8 format - privBytes, err := x509.MarshalPKCS8PrivateKey(privateKey) - if err != nil { - return nil, nil, fmt.Errorf("failed to marshal private key: %w", err) - } - - // Create PEM block - pemBlock := &pem.Block{ - Type: "PRIVATE KEY", - Bytes: privBytes, - } - - return publicKey, pem.EncodeToMemory(pemBlock), nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package init + +import ( + "crypto/ed25519" + "crypto/rand" + "crypto/x509" + "encoding/pem" + "fmt" + "os" + "path/filepath" + + "github.com/agntcy/dir/cli/presenter" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/spf13/cobra" +) + +var Command = &cobra.Command{ + Use: "init", + Short: "Generates the peer id from a newly generated private key, enabling connection to the DHT network", + Long: `This command generates a peer id from a newly generated private key. From this key +a peer id will be generated that is needed for the host to connect to the network. + +Usage examples: + +1. Generate peer id from a newly generated private key and save the key to the default location (~/.agntcy/dir/generated.key): + + dirctl network init + +2. Generate peer id from a newly generated private key and save the key to a file: + + dirctl network init --output /path/to/private/key.pem + +`, + RunE: func(cmd *cobra.Command, _ []string) error { + return runCommand(cmd) + }, +} + +func runCommand(cmd *cobra.Command) error { + publicKey, privateKey, err := GenerateED25519OpenSSLKey() + if err != nil { + return fmt.Errorf("failed to generate ED25519 key pair: %w", err) + } + + var filePath string + if opts.Output != "" { + filePath = opts.Output + } else { + homeDir, err := os.UserHomeDir() + if err != nil { + return fmt.Errorf("error getting home directory: %w", err) + } + + filePath = filepath.Join(homeDir, ".agntcy/dir/generated.key") + } + + err = os.MkdirAll(filepath.Dir(filePath), 0o0755) //nolint:mnd + if err != nil { + return fmt.Errorf("failed to create directory: %w", err) + } + + err = os.WriteFile(filePath, privateKey, 0o600) //nolint:mnd + if err != nil { + return fmt.Errorf("failed to write private key to file: %w", err) + } + + pubKey, err := crypto.UnmarshalEd25519PublicKey(publicKey) + if err != nil { + return fmt.Errorf("failed to unmarshal public key: %w", err) + } + + ID, err := peer.IDFromPublicKey(pubKey) + if err != nil { + return fmt.Errorf("failed to generate peer ID from public key: %w", err) + } + + presenter.Printf(cmd, "%s\n", ID) + + return nil +} + +func GenerateED25519OpenSSLKey() (ed25519.PublicKey, []byte, error) { + // Generate ED25519 key pair + publicKey, privateKey, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + return nil, nil, fmt.Errorf("failed to generate ED25519 key pair: %w", err) + } + + // Marshal the private key to PKCS#8 format + privBytes, err := x509.MarshalPKCS8PrivateKey(privateKey) + if err != nil { + return nil, nil, fmt.Errorf("failed to marshal private key: %w", err) + } + + // Create PEM block + pemBlock := &pem.Block{ + Type: "PRIVATE KEY", + Bytes: privBytes, + } + + return publicKey, pem.EncodeToMemory(pemBlock), nil +} diff --git a/cli/cmd/network/init/options.go b/cli/cmd/network/init/options.go index 93647cd19..fc8d3ba6f 100644 --- a/cli/cmd/network/init/options.go +++ b/cli/cmd/network/init/options.go @@ -1,15 +1,15 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package init - -var opts = &options{} - -type options struct { - Output string -} - -func init() { - flags := Command.Flags() - flags.StringVarP(&opts.Output, "output", "o", "", "Path to the output file, where the generated private key will be stored.") -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package init + +var opts = &options{} + +type options struct { + Output string +} + +func init() { + flags := Command.Flags() + flags.StringVarP(&opts.Output, "output", "o", "", "Path to the output file, where the generated private key will be stored.") +} diff --git a/cli/cmd/network/network.go b/cli/cmd/network/network.go index 1270fde3d..50be870a4 100644 --- a/cli/cmd/network/network.go +++ b/cli/cmd/network/network.go @@ -1,23 +1,23 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package network - -import ( - infoCmd "github.com/agntcy/dir/cli/cmd/network/info" - initCmd "github.com/agntcy/dir/cli/cmd/network/init" - "github.com/spf13/cobra" -) - -var Command = &cobra.Command{ - Use: "network", - Short: "CLI tool to interact with routing network", - Long: `This command provides a set of subcommands to interact with the routing network.`, -} - -func init() { - Command.AddCommand( - infoCmd.Command, - initCmd.Command, - ) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package network + +import ( + infoCmd "github.com/agntcy/dir/cli/cmd/network/info" + initCmd "github.com/agntcy/dir/cli/cmd/network/init" + "github.com/spf13/cobra" +) + +var Command = &cobra.Command{ + Use: "network", + Short: "CLI tool to interact with routing network", + Long: `This command provides a set of subcommands to interact with the routing network.`, +} + +func init() { + Command.AddCommand( + infoCmd.Command, + initCmd.Command, + ) +} diff --git a/cli/cmd/options.go b/cli/cmd/options.go index 54e088618..e96c3e5a8 100644 --- a/cli/cmd/options.go +++ b/cli/cmd/options.go @@ -1,32 +1,32 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package cmd - -import ( - "github.com/agntcy/dir/client" -) - -var clientConfig = &client.DefaultConfig - -func init() { - // load config - if cfg, err := client.LoadConfig(); err == nil { - clientConfig = cfg - } - - // set flags - flags := RootCmd.PersistentFlags() - flags.StringVar(&clientConfig.ServerAddress, "server-addr", clientConfig.ServerAddress, "Directory Server API address") - flags.StringVar(&clientConfig.AuthMode, "auth-mode", clientConfig.AuthMode, "Authentication mode: none, x509, jwt, token, tls") - flags.StringVar(&clientConfig.SpiffeSocketPath, "spiffe-socket-path", clientConfig.SpiffeSocketPath, "Path to SPIFFE Workload API socket (for x509 or JWT authentication)") - flags.StringVar(&clientConfig.SpiffeToken, "spiffe-token", clientConfig.SpiffeToken, "Path to file containing SPIFFE X509 SVID token (for token authentication)") - flags.StringVar(&clientConfig.JWTAudience, "jwt-audience", clientConfig.JWTAudience, "JWT audience (for JWT authentication mode)") - flags.BoolVar(&clientConfig.TlsSkipVerify, "tls-skip-verify", clientConfig.TlsSkipVerify, "Skip TLS verification (for TLS authentication mode)") - flags.StringVar(&clientConfig.TlsCAFile, "tls-ca-file", clientConfig.TlsCAFile, "Path to TLS CA file (for TLS authentication mode)") - flags.StringVar(&clientConfig.TlsCertFile, "tls-cert-file", clientConfig.TlsCertFile, "Path to TLS certificate file (for TLS authentication mode)") - flags.StringVar(&clientConfig.TlsKeyFile, "tls-key-file", clientConfig.TlsKeyFile, "Path to TLS key file (for TLS authentication mode)") - - // mark required flags - RootCmd.MarkFlagRequired("server-addr") //nolint:errcheck -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package cmd + +import ( + "github.com/agntcy/dir/client" +) + +var clientConfig = &client.DefaultConfig + +func init() { + // load config + if cfg, err := client.LoadConfig(); err == nil { + clientConfig = cfg + } + + // set flags + flags := RootCmd.PersistentFlags() + flags.StringVar(&clientConfig.ServerAddress, "server-addr", clientConfig.ServerAddress, "Directory Server API address") + flags.StringVar(&clientConfig.AuthMode, "auth-mode", clientConfig.AuthMode, "Authentication mode: none, x509, jwt, token, tls") + flags.StringVar(&clientConfig.SpiffeSocketPath, "spiffe-socket-path", clientConfig.SpiffeSocketPath, "Path to SPIFFE Workload API socket (for x509 or JWT authentication)") + flags.StringVar(&clientConfig.SpiffeToken, "spiffe-token", clientConfig.SpiffeToken, "Path to file containing SPIFFE X509 SVID token (for token authentication)") + flags.StringVar(&clientConfig.JWTAudience, "jwt-audience", clientConfig.JWTAudience, "JWT audience (for JWT authentication mode)") + flags.BoolVar(&clientConfig.TlsSkipVerify, "tls-skip-verify", clientConfig.TlsSkipVerify, "Skip TLS verification (for TLS authentication mode)") + flags.StringVar(&clientConfig.TlsCAFile, "tls-ca-file", clientConfig.TlsCAFile, "Path to TLS CA file (for TLS authentication mode)") + flags.StringVar(&clientConfig.TlsCertFile, "tls-cert-file", clientConfig.TlsCertFile, "Path to TLS certificate file (for TLS authentication mode)") + flags.StringVar(&clientConfig.TlsKeyFile, "tls-key-file", clientConfig.TlsKeyFile, "Path to TLS key file (for TLS authentication mode)") + + // mark required flags + RootCmd.MarkFlagRequired("server-addr") //nolint:errcheck +} diff --git a/cli/cmd/pull/options.go b/cli/cmd/pull/options.go index 79b91e025..0f26145c3 100644 --- a/cli/cmd/pull/options.go +++ b/cli/cmd/pull/options.go @@ -1,22 +1,22 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package pull - -import "github.com/agntcy/dir/cli/presenter" - -var opts = &options{} - -type options struct { - PublicKey bool - Signature bool -} - -func init() { - flags := Command.Flags() - flags.BoolVar(&opts.PublicKey, "public-key", false, "Pull the public key for the record.") - flags.BoolVar(&opts.Signature, "signature", false, "Pull the signature for the record.") - - // Add output format flags - presenter.AddOutputFlags(Command) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package pull + +import "github.com/agntcy/dir/cli/presenter" + +var opts = &options{} + +type options struct { + PublicKey bool + Signature bool +} + +func init() { + flags := Command.Flags() + flags.BoolVar(&opts.PublicKey, "public-key", false, "Pull the public key for the record.") + flags.BoolVar(&opts.Signature, "signature", false, "Pull the signature for the record.") + + // Add output format flags + presenter.AddOutputFlags(Command) +} diff --git a/cli/cmd/pull/pull.go b/cli/cmd/pull/pull.go index 788eff04a..bb15fb7a1 100644 --- a/cli/cmd/pull/pull.go +++ b/cli/cmd/pull/pull.go @@ -1,167 +1,167 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:wrapcheck -package pull - -import ( - "errors" - "fmt" - - corev1 "github.com/agntcy/dir/api/core/v1" - signv1 "github.com/agntcy/dir/api/sign/v1" - storev1 "github.com/agntcy/dir/api/store/v1" - "github.com/agntcy/dir/cli/presenter" - ctxUtils "github.com/agntcy/dir/cli/util/context" - "github.com/spf13/cobra" -) - -var Command = &cobra.Command{ - Use: "pull", - Short: "Pull record from Directory server", - Long: `This command pulls the record from Directory API. The data can be validated against its hash, as -the returned object is content-addressable. - -Usage examples: - -1. Pull by cid and output - - dirctl pull - -2. Pull by cid and output public key - - dirctl pull --public-key - -3. Pull by cid and output signature - - dirctl pull --signature - -4. Output formats: - - # Get record as JSON - dirctl pull --output json - - # Get record with public key as JSON - dirctl pull --public-key --output json - - # Get raw record data for piping - dirctl pull --output raw > record.json -`, - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) != 1 { - return errors.New("cid is a required argument") - } - - return runCommand(cmd, args[0]) - }, -} - -//nolint:cyclop,gocognit -func runCommand(cmd *cobra.Command, cid string) error { - // Get the client from the context. - c, ok := ctxUtils.GetClientFromContext(cmd.Context()) - if !ok { - return errors.New("failed to get client from context") - } - - // Fetch record from store - record, err := c.Pull(cmd.Context(), &corev1.RecordRef{ - Cid: cid, - }) - if err != nil { - return fmt.Errorf("failed to pull data: %w", err) - } - - if !opts.PublicKey && !opts.Signature { - // Handle different output formats - return presenter.PrintMessage(cmd, "record", "Record data", record.GetData()) - } - - publicKeys := make([]*signv1.PublicKey, 0) - - if opts.PublicKey { - publicKeyType := corev1.PublicKeyReferrerType - - resultCh, err := c.PullReferrer(cmd.Context(), &storev1.PullReferrerRequest{ - RecordRef: &corev1.RecordRef{ - Cid: cid, - }, - ReferrerType: &publicKeyType, - }) - if err != nil { - return fmt.Errorf("failed to pull public key: %w", err) - } - - for response := range resultCh { - publicKey := &signv1.PublicKey{} - if err := publicKey.UnmarshalReferrer(response.GetReferrer()); err != nil { - return fmt.Errorf("failed to decode public key from referrer: %w", err) - } - - if publicKey.GetKey() != "" { - publicKeys = append(publicKeys, publicKey) - } - } - } - - signatures := make([]*signv1.Signature, 0) - - if opts.Signature { - signatureType := corev1.SignatureReferrerType - - resultCh, err := c.PullReferrer(cmd.Context(), &storev1.PullReferrerRequest{ - RecordRef: &corev1.RecordRef{ - Cid: cid, - }, - ReferrerType: &signatureType, - }) - if err != nil { - return fmt.Errorf("failed to pull signature: %w", err) - } - - for response := range resultCh { - signature := &signv1.Signature{} - if err := signature.UnmarshalReferrer(response.GetReferrer()); err != nil { - return fmt.Errorf("failed to decode signature from referrer: %w", err) - } - - if signature.GetSignature() != "" { - signatures = append(signatures, signature) - } - } - } - - // Create structured data object - structuredData := map[string]interface{}{ - "record": map[string]interface{}{ - "data": record.GetData(), - }, - } - - // Add public keys if any - if len(publicKeys) > 0 { - publicKeyData := make([]map[string]interface{}, len(publicKeys)) - for i, pk := range publicKeys { - publicKeyData[i] = map[string]interface{}{ - "key": pk.GetKey(), - } - } - - structuredData["publicKeys"] = publicKeyData - } - - // Add signatures if any - if len(signatures) > 0 { - signatureData := make([]map[string]interface{}, len(signatures)) - for i, sig := range signatures { - signatureData[i] = map[string]interface{}{ - "signature": sig.GetSignature(), - } - } - - structuredData["signatures"] = signatureData - } - - // Output the structured data - return presenter.PrintMessage(cmd, "record", "Record data with keys and signatures", structuredData) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:wrapcheck +package pull + +import ( + "errors" + "fmt" + + corev1 "github.com/agntcy/dir/api/core/v1" + signv1 "github.com/agntcy/dir/api/sign/v1" + storev1 "github.com/agntcy/dir/api/store/v1" + "github.com/agntcy/dir/cli/presenter" + ctxUtils "github.com/agntcy/dir/cli/util/context" + "github.com/spf13/cobra" +) + +var Command = &cobra.Command{ + Use: "pull", + Short: "Pull record from Directory server", + Long: `This command pulls the record from Directory API. The data can be validated against its hash, as +the returned object is content-addressable. + +Usage examples: + +1. Pull by cid and output + + dirctl pull + +2. Pull by cid and output public key + + dirctl pull --public-key + +3. Pull by cid and output signature + + dirctl pull --signature + +4. Output formats: + + # Get record as JSON + dirctl pull --output json + + # Get record with public key as JSON + dirctl pull --public-key --output json + + # Get raw record data for piping + dirctl pull --output raw > record.json +`, + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) != 1 { + return errors.New("cid is a required argument") + } + + return runCommand(cmd, args[0]) + }, +} + +//nolint:cyclop,gocognit +func runCommand(cmd *cobra.Command, cid string) error { + // Get the client from the context. + c, ok := ctxUtils.GetClientFromContext(cmd.Context()) + if !ok { + return errors.New("failed to get client from context") + } + + // Fetch record from store + record, err := c.Pull(cmd.Context(), &corev1.RecordRef{ + Cid: cid, + }) + if err != nil { + return fmt.Errorf("failed to pull data: %w", err) + } + + if !opts.PublicKey && !opts.Signature { + // Handle different output formats + return presenter.PrintMessage(cmd, "record", "Record data", record.GetData()) + } + + publicKeys := make([]*signv1.PublicKey, 0) + + if opts.PublicKey { + publicKeyType := corev1.PublicKeyReferrerType + + resultCh, err := c.PullReferrer(cmd.Context(), &storev1.PullReferrerRequest{ + RecordRef: &corev1.RecordRef{ + Cid: cid, + }, + ReferrerType: &publicKeyType, + }) + if err != nil { + return fmt.Errorf("failed to pull public key: %w", err) + } + + for response := range resultCh { + publicKey := &signv1.PublicKey{} + if err := publicKey.UnmarshalReferrer(response.GetReferrer()); err != nil { + return fmt.Errorf("failed to decode public key from referrer: %w", err) + } + + if publicKey.GetKey() != "" { + publicKeys = append(publicKeys, publicKey) + } + } + } + + signatures := make([]*signv1.Signature, 0) + + if opts.Signature { + signatureType := corev1.SignatureReferrerType + + resultCh, err := c.PullReferrer(cmd.Context(), &storev1.PullReferrerRequest{ + RecordRef: &corev1.RecordRef{ + Cid: cid, + }, + ReferrerType: &signatureType, + }) + if err != nil { + return fmt.Errorf("failed to pull signature: %w", err) + } + + for response := range resultCh { + signature := &signv1.Signature{} + if err := signature.UnmarshalReferrer(response.GetReferrer()); err != nil { + return fmt.Errorf("failed to decode signature from referrer: %w", err) + } + + if signature.GetSignature() != "" { + signatures = append(signatures, signature) + } + } + } + + // Create structured data object + structuredData := map[string]interface{}{ + "record": map[string]interface{}{ + "data": record.GetData(), + }, + } + + // Add public keys if any + if len(publicKeys) > 0 { + publicKeyData := make([]map[string]interface{}, len(publicKeys)) + for i, pk := range publicKeys { + publicKeyData[i] = map[string]interface{}{ + "key": pk.GetKey(), + } + } + + structuredData["publicKeys"] = publicKeyData + } + + // Add signatures if any + if len(signatures) > 0 { + signatureData := make([]map[string]interface{}, len(signatures)) + for i, sig := range signatures { + signatureData[i] = map[string]interface{}{ + "signature": sig.GetSignature(), + } + } + + structuredData["signatures"] = signatureData + } + + // Output the structured data + return presenter.PrintMessage(cmd, "record", "Record data with keys and signatures", structuredData) +} diff --git a/cli/cmd/push/options.go b/cli/cmd/push/options.go index c9d27a278..85212993c 100644 --- a/cli/cmd/push/options.go +++ b/cli/cmd/push/options.go @@ -1,36 +1,36 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package push - -import ( - signcmd "github.com/agntcy/dir/cli/cmd/sign" - "github.com/agntcy/dir/cli/presenter" - "github.com/agntcy/dir/client" -) - -var opts = &options{} - -type options struct { - FromStdin bool - Sign bool - - // Signing options - client.SignOpts -} - -func init() { - flags := Command.Flags() - flags.BoolVar(&opts.FromStdin, "stdin", false, - "Read compiled data from standard input. Useful for piping. Reads from file if empty. "+ - "Ignored if file is provided as an argument.", - ) - flags.BoolVar(&opts.Sign, "sign", false, - "Sign the record with the specified signing options.", - ) - - signcmd.AddSigningFlags(flags) - - // Add output format flags - presenter.AddOutputFlags(Command) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package push + +import ( + signcmd "github.com/agntcy/dir/cli/cmd/sign" + "github.com/agntcy/dir/cli/presenter" + "github.com/agntcy/dir/client" +) + +var opts = &options{} + +type options struct { + FromStdin bool + Sign bool + + // Signing options + client.SignOpts +} + +func init() { + flags := Command.Flags() + flags.BoolVar(&opts.FromStdin, "stdin", false, + "Read compiled data from standard input. Useful for piping. Reads from file if empty. "+ + "Ignored if file is provided as an argument.", + ) + flags.BoolVar(&opts.Sign, "sign", false, + "Sign the record with the specified signing options.", + ) + + signcmd.AddSigningFlags(flags) + + // Add output format flags + presenter.AddOutputFlags(Command) +} diff --git a/cli/cmd/push/push.go b/cli/cmd/push/push.go index 64df05d55..d89228fa1 100644 --- a/cli/cmd/push/push.go +++ b/cli/cmd/push/push.go @@ -1,117 +1,117 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:dupword,wrapcheck -package push - -import ( - "errors" - "fmt" - "io" - "os" - - corev1 "github.com/agntcy/dir/api/core/v1" - signcmd "github.com/agntcy/dir/cli/cmd/sign" - "github.com/agntcy/dir/cli/presenter" - ctxUtils "github.com/agntcy/dir/cli/util/context" - "github.com/spf13/cobra" -) - -var Command = &cobra.Command{ - Use: "push", - Short: "Push record to Directory server", - Long: `This command pushes the record to local storage layer via Directory API. The data is stored into -content-addressable object store. - -Usage examples: - -1. From record file: - - dirctl push model.json - -2. Data from standard input. Useful for piping: - - cat model.json | dirctl push --stdin - -3. Push with signature: - - dirctl push model.json --sign - -4. Output formats: - - # Get CID as JSON - dirctl push model.json --output json - - # Get raw CID for scripting - CID=$(dirctl push model.json --output raw) - - # Push and pipe to publish - dirctl push model.json --output raw | xargs dirctl routing publish - -`, - RunE: func(cmd *cobra.Command, args []string) error { - var path string - if len(args) > 1 { - return errors.New("only one file path is allowed") - } else if len(args) == 1 { - path = args[0] - } - - // get source - if path == "" && !opts.FromStdin { - return errors.New("if no path defined --stdin flag must be set") - } - - // if path is empty, read from stdin - if path == "" { - return runCommand(cmd, cmd.InOrStdin()) - } - - // otherwise, read from file - source, err := os.Open(path) - if err != nil { - return fmt.Errorf("could not open file %s: %w", path, err) - } - defer source.Close() - - return runCommand(cmd, source) - }, -} - -func runCommand(cmd *cobra.Command, source io.Reader) error { - // Get the client from the context. - c, ok := ctxUtils.GetClientFromContext(cmd.Context()) - if !ok { - return errors.New("failed to get client from context") - } - - // Read and close the source - sourceData, err := io.ReadAll(source) - if err != nil { - return fmt.Errorf("failed to read source data: %w", err) - } - - // Load OASF data into a Record - record, err := corev1.UnmarshalRecord(sourceData) - if err != nil { - return fmt.Errorf("failed to load OASF: %w", err) - } - - var recordRef *corev1.RecordRef - - // Use the client's Push method to send the record - recordRef, err = c.Push(cmd.Context(), record) - if err != nil { - return fmt.Errorf("failed to push data: %w", err) - } - - if opts.Sign { - err = signcmd.Sign(cmd.Context(), c, recordRef.GetCid()) - if err != nil { - return fmt.Errorf("failed to sign record: %w", err) - } - } - - // Output in the appropriate format - return presenter.PrintMessage(cmd, "record", "Pushed record with CID", recordRef.GetCid()) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:dupword,wrapcheck +package push + +import ( + "errors" + "fmt" + "io" + "os" + + corev1 "github.com/agntcy/dir/api/core/v1" + signcmd "github.com/agntcy/dir/cli/cmd/sign" + "github.com/agntcy/dir/cli/presenter" + ctxUtils "github.com/agntcy/dir/cli/util/context" + "github.com/spf13/cobra" +) + +var Command = &cobra.Command{ + Use: "push", + Short: "Push record to Directory server", + Long: `This command pushes the record to local storage layer via Directory API. The data is stored into +content-addressable object store. + +Usage examples: + +1. From record file: + + dirctl push model.json + +2. Data from standard input. Useful for piping: + + cat model.json | dirctl push --stdin + +3. Push with signature: + + dirctl push model.json --sign + +4. Output formats: + + # Get CID as JSON + dirctl push model.json --output json + + # Get raw CID for scripting + CID=$(dirctl push model.json --output raw) + + # Push and pipe to publish + dirctl push model.json --output raw | xargs dirctl routing publish + +`, + RunE: func(cmd *cobra.Command, args []string) error { + var path string + if len(args) > 1 { + return errors.New("only one file path is allowed") + } else if len(args) == 1 { + path = args[0] + } + + // get source + if path == "" && !opts.FromStdin { + return errors.New("if no path defined --stdin flag must be set") + } + + // if path is empty, read from stdin + if path == "" { + return runCommand(cmd, cmd.InOrStdin()) + } + + // otherwise, read from file + source, err := os.Open(path) + if err != nil { + return fmt.Errorf("could not open file %s: %w", path, err) + } + defer source.Close() + + return runCommand(cmd, source) + }, +} + +func runCommand(cmd *cobra.Command, source io.Reader) error { + // Get the client from the context. + c, ok := ctxUtils.GetClientFromContext(cmd.Context()) + if !ok { + return errors.New("failed to get client from context") + } + + // Read and close the source + sourceData, err := io.ReadAll(source) + if err != nil { + return fmt.Errorf("failed to read source data: %w", err) + } + + // Load OASF data into a Record + record, err := corev1.UnmarshalRecord(sourceData) + if err != nil { + return fmt.Errorf("failed to load OASF: %w", err) + } + + var recordRef *corev1.RecordRef + + // Use the client's Push method to send the record + recordRef, err = c.Push(cmd.Context(), record) + if err != nil { + return fmt.Errorf("failed to push data: %w", err) + } + + if opts.Sign { + err = signcmd.Sign(cmd.Context(), c, recordRef.GetCid()) + if err != nil { + return fmt.Errorf("failed to sign record: %w", err) + } + } + + // Output in the appropriate format + return presenter.PrintMessage(cmd, "record", "Pushed record with CID", recordRef.GetCid()) +} diff --git a/cli/cmd/root.go b/cli/cmd/root.go index e6545124a..02cb10461 100644 --- a/cli/cmd/root.go +++ b/cli/cmd/root.go @@ -1,91 +1,91 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package cmd - -import ( - "context" - "fmt" - - "github.com/agntcy/dir/cli/cmd/delete" - "github.com/agntcy/dir/cli/cmd/events" - importcmd "github.com/agntcy/dir/cli/cmd/import" - "github.com/agntcy/dir/cli/cmd/info" - "github.com/agntcy/dir/cli/cmd/mcp" - "github.com/agntcy/dir/cli/cmd/network" - "github.com/agntcy/dir/cli/cmd/pull" - "github.com/agntcy/dir/cli/cmd/push" - "github.com/agntcy/dir/cli/cmd/routing" - "github.com/agntcy/dir/cli/cmd/search" - "github.com/agntcy/dir/cli/cmd/sign" - "github.com/agntcy/dir/cli/cmd/sync" - "github.com/agntcy/dir/cli/cmd/verify" - "github.com/agntcy/dir/cli/cmd/version" - ctxUtils "github.com/agntcy/dir/cli/util/context" - "github.com/agntcy/dir/client" - "github.com/spf13/cobra" -) - -var RootCmd = &cobra.Command{ - Use: "dirctl", - Short: "CLI tool to interact with Directory", - Long: ``, - SilenceUsage: true, - PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { - // Set client via context for all requests - // TODO: make client config configurable via CLI args - c, err := client.New(cmd.Context(), client.WithConfig(clientConfig)) - if err != nil { - return fmt.Errorf("failed to create client: %w", err) - } - - ctx := ctxUtils.SetClientForContext(cmd.Context(), c) - cmd.SetContext(ctx) - - cobra.OnFinalize(func() { - // Silently close the client. Errors during cleanup are not actionable - // and typically occur due to context cancellation after command completion. - _ = c.Close() - }) - - return nil - }, -} - -func init() { - network.Command.Hidden = true - - RootCmd.AddCommand( - // local commands - version.Command, - // initialize.Command, // REMOVED: Initialize functionality - sign.Command, - verify.Command, - // storage commands - info.Command, - pull.Command, - push.Command, - delete.Command, - // import commands - importcmd.Command, - // routing commands (all under routing subcommand) - routing.Command, // Contains: publish, unpublish, list, search - network.Command, - // search commands - search.Command, // General search (searchv1) - // sync commands - sync.Command, - // events commands - events.Command, // Contains: listen - // mcp commands - mcp.Command, // Contains: serve - ) -} - -func Run(ctx context.Context) error { - if err := RootCmd.ExecuteContext(ctx); err != nil { - return fmt.Errorf("failed to execute command: %w", err) - } - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package cmd + +import ( + "context" + "fmt" + + "github.com/agntcy/dir/cli/cmd/delete" + "github.com/agntcy/dir/cli/cmd/events" + importcmd "github.com/agntcy/dir/cli/cmd/import" + "github.com/agntcy/dir/cli/cmd/info" + "github.com/agntcy/dir/cli/cmd/mcp" + "github.com/agntcy/dir/cli/cmd/network" + "github.com/agntcy/dir/cli/cmd/pull" + "github.com/agntcy/dir/cli/cmd/push" + "github.com/agntcy/dir/cli/cmd/routing" + "github.com/agntcy/dir/cli/cmd/search" + "github.com/agntcy/dir/cli/cmd/sign" + "github.com/agntcy/dir/cli/cmd/sync" + "github.com/agntcy/dir/cli/cmd/verify" + "github.com/agntcy/dir/cli/cmd/version" + ctxUtils "github.com/agntcy/dir/cli/util/context" + "github.com/agntcy/dir/client" + "github.com/spf13/cobra" +) + +var RootCmd = &cobra.Command{ + Use: "dirctl", + Short: "CLI tool to interact with Directory", + Long: ``, + SilenceUsage: true, + PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { + // Set client via context for all requests + // TODO: make client config configurable via CLI args + c, err := client.New(cmd.Context(), client.WithConfig(clientConfig)) + if err != nil { + return fmt.Errorf("failed to create client: %w", err) + } + + ctx := ctxUtils.SetClientForContext(cmd.Context(), c) + cmd.SetContext(ctx) + + cobra.OnFinalize(func() { + // Silently close the client. Errors during cleanup are not actionable + // and typically occur due to context cancellation after command completion. + _ = c.Close() + }) + + return nil + }, +} + +func init() { + network.Command.Hidden = true + + RootCmd.AddCommand( + // local commands + version.Command, + // initialize.Command, // REMOVED: Initialize functionality + sign.Command, + verify.Command, + // storage commands + info.Command, + pull.Command, + push.Command, + delete.Command, + // import commands + importcmd.Command, + // routing commands (all under routing subcommand) + routing.Command, // Contains: publish, unpublish, list, search + network.Command, + // search commands + search.Command, // General search (searchv1) + // sync commands + sync.Command, + // events commands + events.Command, // Contains: listen + // mcp commands + mcp.Command, // Contains: serve + ) +} + +func Run(ctx context.Context) error { + if err := RootCmd.ExecuteContext(ctx); err != nil { + return fmt.Errorf("failed to execute command: %w", err) + } + + return nil +} diff --git a/cli/cmd/routing/info.go b/cli/cmd/routing/info.go index 66bc2a0b3..a9a9955a7 100644 --- a/cli/cmd/routing/info.go +++ b/cli/cmd/routing/info.go @@ -1,218 +1,218 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package routing - -import ( - "errors" - "fmt" - "strings" - - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/cli/presenter" - ctxUtils "github.com/agntcy/dir/cli/util/context" - "github.com/spf13/cobra" -) - -var infoCmd = &cobra.Command{ - Use: "info", - Short: "Show routing statistics and summary information", - Long: `Show routing statistics and summary information for local records. - -This command provides aggregated statistics about locally published records, -including record counts and label distribution. - -Key Features: -- Record count: Total number of locally published records -- Label distribution: Frequency of each label across records -- Local-only: Shows statistics for local routing data only -- Fast: Uses local storage index for efficient counting - -Usage examples: - -1. Show local routing statistics: - dirctl routing info - -2. Output formats: - # Get routing statistics as JSON - dirctl routing info --output json - - # Get raw statistics data - dirctl routing info --output raw - -Note: For network-wide statistics, use 'dirctl routing search' with broad queries. -`, - //nolint:gocritic // Lambda required due to signature mismatch - runInfoCommand doesn't use args - RunE: func(cmd *cobra.Command, _ []string) error { - return runInfoCommand(cmd) - }, -} - -func init() { - // Add output format flags - presenter.AddOutputFlags(infoCmd) -} - -func runInfoCommand(cmd *cobra.Command) error { - // Get the client from the context - c, ok := ctxUtils.GetClientFromContext(cmd.Context()) - if !ok { - return errors.New("failed to get client from context") - } - - // Get output options - outputOpts := presenter.GetOutputOptions(cmd) - - // Get all local records - resultCh, err := c.List(cmd.Context(), &routingv1.ListRequest{ - // No queries = list all local records - }) - if err != nil { - return fmt.Errorf("failed to list local records: %w", err) - } - - // Collect statistics - stats := collectRoutingStatistics(resultCh) - - // Build structured result for all output formats - result := map[string]interface{}{ - "totalRecords": stats.totalRecords, - "skills": stats.skillCounts, - "locators": stats.locatorCounts, - "otherLabels": stats.otherLabels, - } - - // Use common PrintMessage for structured formats (json, jsonl, raw) - if outputOpts.Format != presenter.FormatHuman { - if err := presenter.PrintMessage(cmd, "routing statistics", "Routing statistics", result); err != nil { - return fmt.Errorf("failed to print routing statistics: %w", err) - } - - return nil - } - - // Human-readable format - presenter.Printf(cmd, "Local Routing Summary:\n\n") - displayRoutingStatistics(cmd, stats) - - return nil -} - -// routingStatistics holds collected routing statistics. -type routingStatistics struct { - totalRecords int - skillCounts map[string]int - locatorCounts map[string]int - otherLabels map[string]int -} - -// collectRoutingStatistics processes routing results and collects statistics. -func collectRoutingStatistics(resultCh <-chan *routingv1.ListResponse) *routingStatistics { - stats := &routingStatistics{ - skillCounts: make(map[string]int), - locatorCounts: make(map[string]int), - otherLabels: make(map[string]int), - } - - labelCounts := make(map[string]int) - - for result := range resultCh { - stats.totalRecords++ - - // Count and categorize labels - for _, label := range result.GetLabels() { - labelCounts[label]++ - categorizeLabel(label, stats) - } - } - - // Process other labels - for label, count := range labelCounts { - if !strings.HasPrefix(label, "/skills/") && !strings.HasPrefix(label, "/locators/") { - stats.otherLabels[label] = count - } - } - - return stats -} - -// categorizeLabel categorizes a label into the appropriate statistics bucket. -func categorizeLabel(label string, stats *routingStatistics) { - switch { - case strings.HasPrefix(label, "/skills/"): - skillName := strings.TrimPrefix(label, "/skills/") - stats.skillCounts[skillName]++ - case strings.HasPrefix(label, "/locators/"): - locatorType := strings.TrimPrefix(label, "/locators/") - stats.locatorCounts[locatorType]++ - } -} - -// displayRoutingStatistics displays the collected statistics. -func displayRoutingStatistics(cmd *cobra.Command, stats *routingStatistics) { - presenter.Printf(cmd, "📊 Record Statistics:\n") - presenter.Printf(cmd, " Total Records: %d\n\n", stats.totalRecords) - - if stats.totalRecords == 0 { - displayEmptyStatistics(cmd) - - return - } - - displaySkillStatistics(cmd, stats.skillCounts) - displayLocatorStatistics(cmd, stats.locatorCounts) - displayOtherLabels(cmd, stats.otherLabels) - displayHelpfulTips(cmd) -} - -// displayEmptyStatistics shows guidance when no records are found. -func displayEmptyStatistics(cmd *cobra.Command) { - presenter.Printf(cmd, "No local records found.\n") - presenter.Printf(cmd, "Use 'dirctl push' and 'dirctl routing publish' to add records.\n") -} - -// displaySkillStatistics shows skill distribution. -func displaySkillStatistics(cmd *cobra.Command, skillCounts map[string]int) { - if len(skillCounts) > 0 { - presenter.Printf(cmd, "🎯 Skills Distribution:\n") - - for skill, count := range skillCounts { - presenter.Printf(cmd, " %s: %d record(s)\n", skill, count) - } - - presenter.Printf(cmd, "\n") - } -} - -// displayLocatorStatistics shows locator distribution. -func displayLocatorStatistics(cmd *cobra.Command, locatorCounts map[string]int) { - if len(locatorCounts) > 0 { - presenter.Printf(cmd, "📍 Locators Distribution:\n") - - for locator, count := range locatorCounts { - presenter.Printf(cmd, " %s: %d record(s)\n", locator, count) - } - - presenter.Printf(cmd, "\n") - } -} - -// displayOtherLabels shows other label types. -func displayOtherLabels(cmd *cobra.Command, otherLabels map[string]int) { - if len(otherLabels) > 0 { - presenter.Printf(cmd, "🏷️ Other Labels:\n") - - for label, count := range otherLabels { - presenter.Printf(cmd, " %s: %d record(s)\n", label, count) - } - - presenter.Printf(cmd, "\n") - } -} - -// displayHelpfulTips shows usage suggestions. -func displayHelpfulTips(cmd *cobra.Command) { - presenter.Printf(cmd, "💡 Tips:\n") - presenter.Printf(cmd, " - Use 'dirctl routing list --skill ' to filter by skill\n") - presenter.Printf(cmd, " - Use 'dirctl routing search --skill ' to find remote records\n") -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package routing + +import ( + "errors" + "fmt" + "strings" + + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/cli/presenter" + ctxUtils "github.com/agntcy/dir/cli/util/context" + "github.com/spf13/cobra" +) + +var infoCmd = &cobra.Command{ + Use: "info", + Short: "Show routing statistics and summary information", + Long: `Show routing statistics and summary information for local records. + +This command provides aggregated statistics about locally published records, +including record counts and label distribution. + +Key Features: +- Record count: Total number of locally published records +- Label distribution: Frequency of each label across records +- Local-only: Shows statistics for local routing data only +- Fast: Uses local storage index for efficient counting + +Usage examples: + +1. Show local routing statistics: + dirctl routing info + +2. Output formats: + # Get routing statistics as JSON + dirctl routing info --output json + + # Get raw statistics data + dirctl routing info --output raw + +Note: For network-wide statistics, use 'dirctl routing search' with broad queries. +`, + //nolint:gocritic // Lambda required due to signature mismatch - runInfoCommand doesn't use args + RunE: func(cmd *cobra.Command, _ []string) error { + return runInfoCommand(cmd) + }, +} + +func init() { + // Add output format flags + presenter.AddOutputFlags(infoCmd) +} + +func runInfoCommand(cmd *cobra.Command) error { + // Get the client from the context + c, ok := ctxUtils.GetClientFromContext(cmd.Context()) + if !ok { + return errors.New("failed to get client from context") + } + + // Get output options + outputOpts := presenter.GetOutputOptions(cmd) + + // Get all local records + resultCh, err := c.List(cmd.Context(), &routingv1.ListRequest{ + // No queries = list all local records + }) + if err != nil { + return fmt.Errorf("failed to list local records: %w", err) + } + + // Collect statistics + stats := collectRoutingStatistics(resultCh) + + // Build structured result for all output formats + result := map[string]interface{}{ + "totalRecords": stats.totalRecords, + "skills": stats.skillCounts, + "locators": stats.locatorCounts, + "otherLabels": stats.otherLabels, + } + + // Use common PrintMessage for structured formats (json, jsonl, raw) + if outputOpts.Format != presenter.FormatHuman { + if err := presenter.PrintMessage(cmd, "routing statistics", "Routing statistics", result); err != nil { + return fmt.Errorf("failed to print routing statistics: %w", err) + } + + return nil + } + + // Human-readable format + presenter.Printf(cmd, "Local Routing Summary:\n\n") + displayRoutingStatistics(cmd, stats) + + return nil +} + +// routingStatistics holds collected routing statistics. +type routingStatistics struct { + totalRecords int + skillCounts map[string]int + locatorCounts map[string]int + otherLabels map[string]int +} + +// collectRoutingStatistics processes routing results and collects statistics. +func collectRoutingStatistics(resultCh <-chan *routingv1.ListResponse) *routingStatistics { + stats := &routingStatistics{ + skillCounts: make(map[string]int), + locatorCounts: make(map[string]int), + otherLabels: make(map[string]int), + } + + labelCounts := make(map[string]int) + + for result := range resultCh { + stats.totalRecords++ + + // Count and categorize labels + for _, label := range result.GetLabels() { + labelCounts[label]++ + categorizeLabel(label, stats) + } + } + + // Process other labels + for label, count := range labelCounts { + if !strings.HasPrefix(label, "/skills/") && !strings.HasPrefix(label, "/locators/") { + stats.otherLabels[label] = count + } + } + + return stats +} + +// categorizeLabel categorizes a label into the appropriate statistics bucket. +func categorizeLabel(label string, stats *routingStatistics) { + switch { + case strings.HasPrefix(label, "/skills/"): + skillName := strings.TrimPrefix(label, "/skills/") + stats.skillCounts[skillName]++ + case strings.HasPrefix(label, "/locators/"): + locatorType := strings.TrimPrefix(label, "/locators/") + stats.locatorCounts[locatorType]++ + } +} + +// displayRoutingStatistics displays the collected statistics. +func displayRoutingStatistics(cmd *cobra.Command, stats *routingStatistics) { + presenter.Printf(cmd, "📊 Record Statistics:\n") + presenter.Printf(cmd, " Total Records: %d\n\n", stats.totalRecords) + + if stats.totalRecords == 0 { + displayEmptyStatistics(cmd) + + return + } + + displaySkillStatistics(cmd, stats.skillCounts) + displayLocatorStatistics(cmd, stats.locatorCounts) + displayOtherLabels(cmd, stats.otherLabels) + displayHelpfulTips(cmd) +} + +// displayEmptyStatistics shows guidance when no records are found. +func displayEmptyStatistics(cmd *cobra.Command) { + presenter.Printf(cmd, "No local records found.\n") + presenter.Printf(cmd, "Use 'dirctl push' and 'dirctl routing publish' to add records.\n") +} + +// displaySkillStatistics shows skill distribution. +func displaySkillStatistics(cmd *cobra.Command, skillCounts map[string]int) { + if len(skillCounts) > 0 { + presenter.Printf(cmd, "🎯 Skills Distribution:\n") + + for skill, count := range skillCounts { + presenter.Printf(cmd, " %s: %d record(s)\n", skill, count) + } + + presenter.Printf(cmd, "\n") + } +} + +// displayLocatorStatistics shows locator distribution. +func displayLocatorStatistics(cmd *cobra.Command, locatorCounts map[string]int) { + if len(locatorCounts) > 0 { + presenter.Printf(cmd, "📍 Locators Distribution:\n") + + for locator, count := range locatorCounts { + presenter.Printf(cmd, " %s: %d record(s)\n", locator, count) + } + + presenter.Printf(cmd, "\n") + } +} + +// displayOtherLabels shows other label types. +func displayOtherLabels(cmd *cobra.Command, otherLabels map[string]int) { + if len(otherLabels) > 0 { + presenter.Printf(cmd, "🏷️ Other Labels:\n") + + for label, count := range otherLabels { + presenter.Printf(cmd, " %s: %d record(s)\n", label, count) + } + + presenter.Printf(cmd, "\n") + } +} + +// displayHelpfulTips shows usage suggestions. +func displayHelpfulTips(cmd *cobra.Command) { + presenter.Printf(cmd, "💡 Tips:\n") + presenter.Printf(cmd, " - Use 'dirctl routing list --skill ' to filter by skill\n") + presenter.Printf(cmd, " - Use 'dirctl routing search --skill ' to find remote records\n") +} diff --git a/cli/cmd/routing/info_test.go b/cli/cmd/routing/info_test.go index a1de8b641..c5d138155 100644 --- a/cli/cmd/routing/info_test.go +++ b/cli/cmd/routing/info_test.go @@ -1,594 +1,594 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package routing - -import ( - "bytes" - "strings" - "testing" - - corev1 "github.com/agntcy/dir/api/core/v1" - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/spf13/cobra" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestCategorizeLabel tests label categorization logic. -func TestCategorizeLabel(t *testing.T) { - tests := []struct { - name string - label string - expectedSkills map[string]int - expectedLocs map[string]int - }{ - { - name: "skill label", - label: "/skills/AI", - expectedSkills: map[string]int{"AI": 1}, - expectedLocs: map[string]int{}, - }, - { - name: "skill with spaces", - label: "/skills/Natural Language Processing", - expectedSkills: map[string]int{"Natural Language Processing": 1}, - expectedLocs: map[string]int{}, - }, - { - name: "locator label", - label: "/locators/docker-image", - expectedSkills: map[string]int{}, - expectedLocs: map[string]int{"docker-image": 1}, - }, - { - name: "locator with path", - label: "/locators/http/endpoint", - expectedSkills: map[string]int{}, - expectedLocs: map[string]int{"http/endpoint": 1}, - }, - { - name: "other label - ignored", - label: "/domains/healthcare", - expectedSkills: map[string]int{}, - expectedLocs: map[string]int{}, - }, - { - name: "plain label - ignored", - label: "custom-label", - expectedSkills: map[string]int{}, - expectedLocs: map[string]int{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - stats := &routingStatistics{ - skillCounts: make(map[string]int), - locatorCounts: make(map[string]int), - otherLabels: make(map[string]int), - } - - categorizeLabel(tt.label, stats) - - assert.Equal(t, tt.expectedSkills, stats.skillCounts) - assert.Equal(t, tt.expectedLocs, stats.locatorCounts) - }) - } -} - -// TestCategorizeLabel_MultipleCallsSameLabel tests counting logic. -func TestCategorizeLabel_MultipleCallsSameLabel(t *testing.T) { - stats := &routingStatistics{ - skillCounts: make(map[string]int), - locatorCounts: make(map[string]int), - otherLabels: make(map[string]int), - } - - // Call three times with same skill - categorizeLabel("/skills/AI", stats) - categorizeLabel("/skills/AI", stats) - categorizeLabel("/skills/AI", stats) - - assert.Equal(t, map[string]int{"AI": 3}, stats.skillCounts) - assert.Equal(t, map[string]int{}, stats.locatorCounts) -} - -// TestCategorizeLabel_MultipleDifferentLabels tests mixed labels. -func TestCategorizeLabel_MultipleDifferentLabels(t *testing.T) { - stats := &routingStatistics{ - skillCounts: make(map[string]int), - locatorCounts: make(map[string]int), - otherLabels: make(map[string]int), - } - - categorizeLabel("/skills/AI", stats) - categorizeLabel("/skills/ML", stats) - categorizeLabel("/locators/docker-image", stats) - categorizeLabel("/locators/http", stats) - - assert.Equal(t, map[string]int{"AI": 1, "ML": 1}, stats.skillCounts) - assert.Equal(t, map[string]int{"docker-image": 1, "http": 1}, stats.locatorCounts) -} - -// TestCollectRoutingStatistics_EmptyChannel tests with no records. -func TestCollectRoutingStatistics_EmptyChannel(t *testing.T) { - ch := make(chan *routingv1.ListResponse) - close(ch) - - stats := collectRoutingStatistics(ch) - - assert.Equal(t, 0, stats.totalRecords) - assert.Empty(t, stats.skillCounts) - assert.Empty(t, stats.locatorCounts) - assert.Empty(t, stats.otherLabels) -} - -// TestCollectRoutingStatistics_SingleRecord tests with one record. -func TestCollectRoutingStatistics_SingleRecord(t *testing.T) { - ch := make(chan *routingv1.ListResponse, 1) - ch <- &routingv1.ListResponse{ - RecordRef: &corev1.RecordRef{Cid: "test-cid"}, - Labels: []string{"/skills/AI", "/locators/docker-image"}, - } - - close(ch) - - stats := collectRoutingStatistics(ch) - - assert.Equal(t, 1, stats.totalRecords) - assert.Equal(t, map[string]int{"AI": 1}, stats.skillCounts) - assert.Equal(t, map[string]int{"docker-image": 1}, stats.locatorCounts) - assert.Empty(t, stats.otherLabels) -} - -// TestCollectRoutingStatistics_MultipleRecords tests with multiple records. -func TestCollectRoutingStatistics_MultipleRecords(t *testing.T) { - ch := make(chan *routingv1.ListResponse, 3) - ch <- &routingv1.ListResponse{ - RecordRef: &corev1.RecordRef{Cid: "cid1"}, - Labels: []string{"/skills/AI", "/locators/docker-image"}, - } - - ch <- &routingv1.ListResponse{ - RecordRef: &corev1.RecordRef{Cid: "cid2"}, - Labels: []string{"/skills/AI", "/skills/ML"}, - } - - ch <- &routingv1.ListResponse{ - RecordRef: &corev1.RecordRef{Cid: "cid3"}, - Labels: []string{"/skills/ML", "/locators/http"}, - } - - close(ch) - - stats := collectRoutingStatistics(ch) - - assert.Equal(t, 3, stats.totalRecords) - assert.Equal(t, map[string]int{"AI": 2, "ML": 2}, stats.skillCounts) - assert.Equal(t, map[string]int{"docker-image": 1, "http": 1}, stats.locatorCounts) - assert.Empty(t, stats.otherLabels) -} - -// TestCollectRoutingStatistics_WithOtherLabels tests other label categories. -func TestCollectRoutingStatistics_WithOtherLabels(t *testing.T) { - ch := make(chan *routingv1.ListResponse, 2) - ch <- &routingv1.ListResponse{ - RecordRef: &corev1.RecordRef{Cid: "cid1"}, - Labels: []string{"/skills/AI", "/domains/healthcare", "/custom/label"}, - } - - ch <- &routingv1.ListResponse{ - RecordRef: &corev1.RecordRef{Cid: "cid2"}, - Labels: []string{"/domains/healthcare", "/modules/runtime"}, - } - - close(ch) - - stats := collectRoutingStatistics(ch) - - assert.Equal(t, 2, stats.totalRecords) - assert.Equal(t, map[string]int{"AI": 1}, stats.skillCounts) - assert.Empty(t, stats.locatorCounts) - assert.Equal(t, map[string]int{ - "/domains/healthcare": 2, - "/custom/label": 1, - "/modules/runtime": 1, - }, stats.otherLabels) -} - -// TestCollectRoutingStatistics_RecordWithNoLabels tests records without labels. -func TestCollectRoutingStatistics_RecordWithNoLabels(t *testing.T) { - ch := make(chan *routingv1.ListResponse, 2) - ch <- &routingv1.ListResponse{ - RecordRef: &corev1.RecordRef{Cid: "cid1"}, - Labels: []string{}, - } - - ch <- &routingv1.ListResponse{ - RecordRef: &corev1.RecordRef{Cid: "cid2"}, - Labels: nil, - } - - close(ch) - - stats := collectRoutingStatistics(ch) - - assert.Equal(t, 2, stats.totalRecords) - assert.Empty(t, stats.skillCounts) - assert.Empty(t, stats.locatorCounts) - assert.Empty(t, stats.otherLabels) -} - -// TestDisplayEmptyStatistics tests empty statistics display. -func TestDisplayEmptyStatistics(t *testing.T) { - cmd := &cobra.Command{} - - var stdout bytes.Buffer - cmd.SetOut(&stdout) - - displayEmptyStatistics(cmd) - - output := stdout.String() - assert.Contains(t, output, "No local records found") - assert.Contains(t, output, "dirctl push") - assert.Contains(t, output, "dirctl routing publish") -} - -// TestDisplaySkillStatistics tests skill statistics display. -// -//nolint:dupl // Similar test structure for different display functions is intentional for clarity -func TestDisplaySkillStatistics(t *testing.T) { - tests := []struct { - name string - skills map[string]int - expected []string - }{ - { - name: "empty skills", - skills: map[string]int{}, - expected: []string{}, - }, - { - name: "single skill", - skills: map[string]int{"AI": 5}, - expected: []string{"Skills Distribution", "AI: 5 record(s)"}, - }, - { - name: "multiple skills", - skills: map[string]int{"AI": 3, "ML": 2}, - expected: []string{"Skills Distribution", "AI: 3 record(s)", "ML: 2 record(s)"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cmd := &cobra.Command{} - - var stdout bytes.Buffer - cmd.SetOut(&stdout) - - displaySkillStatistics(cmd, tt.skills) - - output := stdout.String() - for _, exp := range tt.expected { - assert.Contains(t, output, exp) - } - - if len(tt.skills) == 0 { - assert.Empty(t, output) - } - }) - } -} - -// TestDisplayLocatorStatistics tests locator statistics display. -// -//nolint:dupl // Similar test structure for different display functions is intentional for clarity -func TestDisplayLocatorStatistics(t *testing.T) { - tests := []struct { - name string - locators map[string]int - expected []string - }{ - { - name: "empty locators", - locators: map[string]int{}, - expected: []string{}, - }, - { - name: "single locator", - locators: map[string]int{"docker-image": 3}, - expected: []string{"Locators Distribution", "docker-image: 3 record(s)"}, - }, - { - name: "multiple locators", - locators: map[string]int{"docker-image": 2, "http": 1}, - expected: []string{"Locators Distribution", "docker-image: 2 record(s)", "http: 1 record(s)"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cmd := &cobra.Command{} - - var stdout bytes.Buffer - cmd.SetOut(&stdout) - - displayLocatorStatistics(cmd, tt.locators) - - output := stdout.String() - for _, exp := range tt.expected { - assert.Contains(t, output, exp) - } - - if len(tt.locators) == 0 { - assert.Empty(t, output) - } - }) - } -} - -// TestDisplayOtherLabels tests other labels display. -// -//nolint:dupl // Similar test structure for different display functions is intentional for clarity -func TestDisplayOtherLabels(t *testing.T) { - tests := []struct { - name string - labels map[string]int - expected []string - }{ - { - name: "empty labels", - labels: map[string]int{}, - expected: []string{}, - }, - { - name: "single label", - labels: map[string]int{"/domains/healthcare": 2}, - expected: []string{"Other Labels", "/domains/healthcare: 2 record(s)"}, - }, - { - name: "multiple labels", - labels: map[string]int{"/domains/healthcare": 2, "/modules/runtime": 1}, - expected: []string{"Other Labels", "/domains/healthcare: 2 record(s)", "/modules/runtime: 1 record(s)"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cmd := &cobra.Command{} - - var stdout bytes.Buffer - cmd.SetOut(&stdout) - - displayOtherLabels(cmd, tt.labels) - - output := stdout.String() - for _, exp := range tt.expected { - assert.Contains(t, output, exp) - } - - if len(tt.labels) == 0 { - assert.Empty(t, output) - } - }) - } -} - -// TestDisplayHelpfulTips tests helpful tips display. -func TestDisplayHelpfulTips(t *testing.T) { - cmd := &cobra.Command{} - - var stdout bytes.Buffer - cmd.SetOut(&stdout) - - displayHelpfulTips(cmd) - - output := stdout.String() - assert.Contains(t, output, "Tips") - assert.Contains(t, output, "dirctl routing list --skill") - assert.Contains(t, output, "dirctl routing search --skill") -} - -// TestDisplayRoutingStatistics_EmptyStats tests display with zero records. -func TestDisplayRoutingStatistics_EmptyStats(t *testing.T) { - cmd := &cobra.Command{} - - var stdout bytes.Buffer - cmd.SetOut(&stdout) - - stats := &routingStatistics{ - totalRecords: 0, - skillCounts: make(map[string]int), - locatorCounts: make(map[string]int), - otherLabels: make(map[string]int), - } - - displayRoutingStatistics(cmd, stats) - - output := stdout.String() - assert.Contains(t, output, "Total Records: 0") - assert.Contains(t, output, "No local records found") - // Should not display other sections - assert.NotContains(t, output, "Skills Distribution") - assert.NotContains(t, output, "Locators Distribution") - assert.NotContains(t, output, "Other Labels") - assert.NotContains(t, output, "Tips") -} - -// TestDisplayRoutingStatistics_WithData tests display with actual statistics. -func TestDisplayRoutingStatistics_WithData(t *testing.T) { - cmd := &cobra.Command{} - - var stdout bytes.Buffer - cmd.SetOut(&stdout) - - stats := &routingStatistics{ - totalRecords: 5, - skillCounts: map[string]int{ - "AI": 3, - "ML": 2, - }, - locatorCounts: map[string]int{ - "docker-image": 2, - }, - otherLabels: map[string]int{ - "/domains/healthcare": 1, - }, - } - - displayRoutingStatistics(cmd, stats) - - output := stdout.String() - // Check all sections are present - assert.Contains(t, output, "Total Records: 5") - assert.Contains(t, output, "Skills Distribution") - assert.Contains(t, output, "AI: 3 record(s)") - assert.Contains(t, output, "ML: 2 record(s)") - assert.Contains(t, output, "Locators Distribution") - assert.Contains(t, output, "docker-image: 2 record(s)") - assert.Contains(t, output, "Other Labels") - assert.Contains(t, output, "/domains/healthcare: 1 record(s)") - assert.Contains(t, output, "Tips") -} - -// TestDisplayRoutingStatistics_OnlySkills tests display with only skills. -func TestDisplayRoutingStatistics_OnlySkills(t *testing.T) { - cmd := &cobra.Command{} - - var stdout bytes.Buffer - cmd.SetOut(&stdout) - - stats := &routingStatistics{ - totalRecords: 2, - skillCounts: map[string]int{ - "AI": 2, - }, - locatorCounts: make(map[string]int), - otherLabels: make(map[string]int), - } - - displayRoutingStatistics(cmd, stats) - - output := stdout.String() - assert.Contains(t, output, "Total Records: 2") - assert.Contains(t, output, "Skills Distribution") - assert.Contains(t, output, "AI: 2 record(s)") - assert.NotContains(t, output, "Locators Distribution") - assert.NotContains(t, output, "Other Labels") - assert.Contains(t, output, "Tips") -} - -// TestInfoCmd_Initialization tests that infoCmd is properly initialized. -func TestInfoCmd_Initialization(t *testing.T) { - assert.NotNil(t, infoCmd) - assert.Equal(t, "info", infoCmd.Use) - assert.NotEmpty(t, infoCmd.Short) - assert.NotEmpty(t, infoCmd.Long) - assert.NotNil(t, infoCmd.RunE) - - // Check that examples are in the Long description - assert.Contains(t, infoCmd.Long, "dirctl routing info") -} - -// TestRoutingStatistics_Structure tests the statistics structure. -func TestRoutingStatistics_Structure(t *testing.T) { - stats := &routingStatistics{ - totalRecords: 10, - skillCounts: map[string]int{"AI": 5}, - locatorCounts: map[string]int{"docker": 3}, - otherLabels: map[string]int{"/custom": 2}, - } - - assert.Equal(t, 10, stats.totalRecords) - assert.Equal(t, 5, stats.skillCounts["AI"]) - assert.Equal(t, 3, stats.locatorCounts["docker"]) - assert.Equal(t, 2, stats.otherLabels["/custom"]) -} - -// TestCollectRoutingStatistics_LargeDataset tests with many records. -func TestCollectRoutingStatistics_LargeDataset(t *testing.T) { - ch := make(chan *routingv1.ListResponse, 100) - - // Add 100 records with varying labels - for i := range 100 { - labels := []string{"/skills/AI"} - if i%2 == 0 { - labels = append(labels, "/locators/docker-image") - } - - if i%3 == 0 { - labels = append(labels, "/domains/healthcare") - } - - ch <- &routingv1.ListResponse{ - RecordRef: &corev1.RecordRef{Cid: "cid-" + string(rune(i))}, - Labels: labels, - } - } - - close(ch) - - stats := collectRoutingStatistics(ch) - - assert.Equal(t, 100, stats.totalRecords) - assert.Equal(t, 100, stats.skillCounts["AI"]) - assert.Equal(t, 50, stats.locatorCounts["docker-image"]) - assert.Equal(t, 34, stats.otherLabels["/domains/healthcare"]) // 100/3 rounded up -} - -// TestCategorizeLabel_EdgeCases tests edge cases in label categorization. -func TestCategorizeLabel_EdgeCases(t *testing.T) { - tests := []struct { - name string - label string - }{ - {"empty string", ""}, - {"just slash", "/"}, - {"skills prefix only", "/skills/"}, - {"locators prefix only", "/locators/"}, - {"multiple slashes", "/skills//AI//ML"}, - {"trailing slash", "/skills/AI/"}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - stats := &routingStatistics{ - skillCounts: make(map[string]int), - locatorCounts: make(map[string]int), - otherLabels: make(map[string]int), - } - - // Should not panic - require.NotPanics(t, func() { - categorizeLabel(tt.label, stats) - }) - }) - } -} - -// TestDisplayFunctions_WithEmojis tests that display functions include emojis. -func TestDisplayFunctions_WithEmojis(t *testing.T) { - cmd := &cobra.Command{} - - var stdout bytes.Buffer - cmd.SetOut(&stdout) - - stats := &routingStatistics{ - totalRecords: 1, - skillCounts: map[string]int{"AI": 1}, - locatorCounts: map[string]int{"docker": 1}, - otherLabels: map[string]int{"/custom": 1}, - } - - displayRoutingStatistics(cmd, stats) - - output := stdout.String() - // Check for emojis (they make the output more user-friendly) - assert.True(t, strings.Contains(output, "📊") || strings.Contains(output, "Record Statistics")) - assert.True(t, strings.Contains(output, "🎯") || strings.Contains(output, "Skills")) - assert.True(t, strings.Contains(output, "📍") || strings.Contains(output, "Locators")) - assert.True(t, strings.Contains(output, "🏷️") || strings.Contains(output, "Other Labels") || strings.Contains(output, "Labels")) - assert.True(t, strings.Contains(output, "💡") || strings.Contains(output, "Tips")) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package routing + +import ( + "bytes" + "strings" + "testing" + + corev1 "github.com/agntcy/dir/api/core/v1" + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestCategorizeLabel tests label categorization logic. +func TestCategorizeLabel(t *testing.T) { + tests := []struct { + name string + label string + expectedSkills map[string]int + expectedLocs map[string]int + }{ + { + name: "skill label", + label: "/skills/AI", + expectedSkills: map[string]int{"AI": 1}, + expectedLocs: map[string]int{}, + }, + { + name: "skill with spaces", + label: "/skills/Natural Language Processing", + expectedSkills: map[string]int{"Natural Language Processing": 1}, + expectedLocs: map[string]int{}, + }, + { + name: "locator label", + label: "/locators/docker-image", + expectedSkills: map[string]int{}, + expectedLocs: map[string]int{"docker-image": 1}, + }, + { + name: "locator with path", + label: "/locators/http/endpoint", + expectedSkills: map[string]int{}, + expectedLocs: map[string]int{"http/endpoint": 1}, + }, + { + name: "other label - ignored", + label: "/domains/healthcare", + expectedSkills: map[string]int{}, + expectedLocs: map[string]int{}, + }, + { + name: "plain label - ignored", + label: "custom-label", + expectedSkills: map[string]int{}, + expectedLocs: map[string]int{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + stats := &routingStatistics{ + skillCounts: make(map[string]int), + locatorCounts: make(map[string]int), + otherLabels: make(map[string]int), + } + + categorizeLabel(tt.label, stats) + + assert.Equal(t, tt.expectedSkills, stats.skillCounts) + assert.Equal(t, tt.expectedLocs, stats.locatorCounts) + }) + } +} + +// TestCategorizeLabel_MultipleCallsSameLabel tests counting logic. +func TestCategorizeLabel_MultipleCallsSameLabel(t *testing.T) { + stats := &routingStatistics{ + skillCounts: make(map[string]int), + locatorCounts: make(map[string]int), + otherLabels: make(map[string]int), + } + + // Call three times with same skill + categorizeLabel("/skills/AI", stats) + categorizeLabel("/skills/AI", stats) + categorizeLabel("/skills/AI", stats) + + assert.Equal(t, map[string]int{"AI": 3}, stats.skillCounts) + assert.Equal(t, map[string]int{}, stats.locatorCounts) +} + +// TestCategorizeLabel_MultipleDifferentLabels tests mixed labels. +func TestCategorizeLabel_MultipleDifferentLabels(t *testing.T) { + stats := &routingStatistics{ + skillCounts: make(map[string]int), + locatorCounts: make(map[string]int), + otherLabels: make(map[string]int), + } + + categorizeLabel("/skills/AI", stats) + categorizeLabel("/skills/ML", stats) + categorizeLabel("/locators/docker-image", stats) + categorizeLabel("/locators/http", stats) + + assert.Equal(t, map[string]int{"AI": 1, "ML": 1}, stats.skillCounts) + assert.Equal(t, map[string]int{"docker-image": 1, "http": 1}, stats.locatorCounts) +} + +// TestCollectRoutingStatistics_EmptyChannel tests with no records. +func TestCollectRoutingStatistics_EmptyChannel(t *testing.T) { + ch := make(chan *routingv1.ListResponse) + close(ch) + + stats := collectRoutingStatistics(ch) + + assert.Equal(t, 0, stats.totalRecords) + assert.Empty(t, stats.skillCounts) + assert.Empty(t, stats.locatorCounts) + assert.Empty(t, stats.otherLabels) +} + +// TestCollectRoutingStatistics_SingleRecord tests with one record. +func TestCollectRoutingStatistics_SingleRecord(t *testing.T) { + ch := make(chan *routingv1.ListResponse, 1) + ch <- &routingv1.ListResponse{ + RecordRef: &corev1.RecordRef{Cid: "test-cid"}, + Labels: []string{"/skills/AI", "/locators/docker-image"}, + } + + close(ch) + + stats := collectRoutingStatistics(ch) + + assert.Equal(t, 1, stats.totalRecords) + assert.Equal(t, map[string]int{"AI": 1}, stats.skillCounts) + assert.Equal(t, map[string]int{"docker-image": 1}, stats.locatorCounts) + assert.Empty(t, stats.otherLabels) +} + +// TestCollectRoutingStatistics_MultipleRecords tests with multiple records. +func TestCollectRoutingStatistics_MultipleRecords(t *testing.T) { + ch := make(chan *routingv1.ListResponse, 3) + ch <- &routingv1.ListResponse{ + RecordRef: &corev1.RecordRef{Cid: "cid1"}, + Labels: []string{"/skills/AI", "/locators/docker-image"}, + } + + ch <- &routingv1.ListResponse{ + RecordRef: &corev1.RecordRef{Cid: "cid2"}, + Labels: []string{"/skills/AI", "/skills/ML"}, + } + + ch <- &routingv1.ListResponse{ + RecordRef: &corev1.RecordRef{Cid: "cid3"}, + Labels: []string{"/skills/ML", "/locators/http"}, + } + + close(ch) + + stats := collectRoutingStatistics(ch) + + assert.Equal(t, 3, stats.totalRecords) + assert.Equal(t, map[string]int{"AI": 2, "ML": 2}, stats.skillCounts) + assert.Equal(t, map[string]int{"docker-image": 1, "http": 1}, stats.locatorCounts) + assert.Empty(t, stats.otherLabels) +} + +// TestCollectRoutingStatistics_WithOtherLabels tests other label categories. +func TestCollectRoutingStatistics_WithOtherLabels(t *testing.T) { + ch := make(chan *routingv1.ListResponse, 2) + ch <- &routingv1.ListResponse{ + RecordRef: &corev1.RecordRef{Cid: "cid1"}, + Labels: []string{"/skills/AI", "/domains/healthcare", "/custom/label"}, + } + + ch <- &routingv1.ListResponse{ + RecordRef: &corev1.RecordRef{Cid: "cid2"}, + Labels: []string{"/domains/healthcare", "/modules/runtime"}, + } + + close(ch) + + stats := collectRoutingStatistics(ch) + + assert.Equal(t, 2, stats.totalRecords) + assert.Equal(t, map[string]int{"AI": 1}, stats.skillCounts) + assert.Empty(t, stats.locatorCounts) + assert.Equal(t, map[string]int{ + "/domains/healthcare": 2, + "/custom/label": 1, + "/modules/runtime": 1, + }, stats.otherLabels) +} + +// TestCollectRoutingStatistics_RecordWithNoLabels tests records without labels. +func TestCollectRoutingStatistics_RecordWithNoLabels(t *testing.T) { + ch := make(chan *routingv1.ListResponse, 2) + ch <- &routingv1.ListResponse{ + RecordRef: &corev1.RecordRef{Cid: "cid1"}, + Labels: []string{}, + } + + ch <- &routingv1.ListResponse{ + RecordRef: &corev1.RecordRef{Cid: "cid2"}, + Labels: nil, + } + + close(ch) + + stats := collectRoutingStatistics(ch) + + assert.Equal(t, 2, stats.totalRecords) + assert.Empty(t, stats.skillCounts) + assert.Empty(t, stats.locatorCounts) + assert.Empty(t, stats.otherLabels) +} + +// TestDisplayEmptyStatistics tests empty statistics display. +func TestDisplayEmptyStatistics(t *testing.T) { + cmd := &cobra.Command{} + + var stdout bytes.Buffer + cmd.SetOut(&stdout) + + displayEmptyStatistics(cmd) + + output := stdout.String() + assert.Contains(t, output, "No local records found") + assert.Contains(t, output, "dirctl push") + assert.Contains(t, output, "dirctl routing publish") +} + +// TestDisplaySkillStatistics tests skill statistics display. +// +//nolint:dupl // Similar test structure for different display functions is intentional for clarity +func TestDisplaySkillStatistics(t *testing.T) { + tests := []struct { + name string + skills map[string]int + expected []string + }{ + { + name: "empty skills", + skills: map[string]int{}, + expected: []string{}, + }, + { + name: "single skill", + skills: map[string]int{"AI": 5}, + expected: []string{"Skills Distribution", "AI: 5 record(s)"}, + }, + { + name: "multiple skills", + skills: map[string]int{"AI": 3, "ML": 2}, + expected: []string{"Skills Distribution", "AI: 3 record(s)", "ML: 2 record(s)"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cmd := &cobra.Command{} + + var stdout bytes.Buffer + cmd.SetOut(&stdout) + + displaySkillStatistics(cmd, tt.skills) + + output := stdout.String() + for _, exp := range tt.expected { + assert.Contains(t, output, exp) + } + + if len(tt.skills) == 0 { + assert.Empty(t, output) + } + }) + } +} + +// TestDisplayLocatorStatistics tests locator statistics display. +// +//nolint:dupl // Similar test structure for different display functions is intentional for clarity +func TestDisplayLocatorStatistics(t *testing.T) { + tests := []struct { + name string + locators map[string]int + expected []string + }{ + { + name: "empty locators", + locators: map[string]int{}, + expected: []string{}, + }, + { + name: "single locator", + locators: map[string]int{"docker-image": 3}, + expected: []string{"Locators Distribution", "docker-image: 3 record(s)"}, + }, + { + name: "multiple locators", + locators: map[string]int{"docker-image": 2, "http": 1}, + expected: []string{"Locators Distribution", "docker-image: 2 record(s)", "http: 1 record(s)"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cmd := &cobra.Command{} + + var stdout bytes.Buffer + cmd.SetOut(&stdout) + + displayLocatorStatistics(cmd, tt.locators) + + output := stdout.String() + for _, exp := range tt.expected { + assert.Contains(t, output, exp) + } + + if len(tt.locators) == 0 { + assert.Empty(t, output) + } + }) + } +} + +// TestDisplayOtherLabels tests other labels display. +// +//nolint:dupl // Similar test structure for different display functions is intentional for clarity +func TestDisplayOtherLabels(t *testing.T) { + tests := []struct { + name string + labels map[string]int + expected []string + }{ + { + name: "empty labels", + labels: map[string]int{}, + expected: []string{}, + }, + { + name: "single label", + labels: map[string]int{"/domains/healthcare": 2}, + expected: []string{"Other Labels", "/domains/healthcare: 2 record(s)"}, + }, + { + name: "multiple labels", + labels: map[string]int{"/domains/healthcare": 2, "/modules/runtime": 1}, + expected: []string{"Other Labels", "/domains/healthcare: 2 record(s)", "/modules/runtime: 1 record(s)"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cmd := &cobra.Command{} + + var stdout bytes.Buffer + cmd.SetOut(&stdout) + + displayOtherLabels(cmd, tt.labels) + + output := stdout.String() + for _, exp := range tt.expected { + assert.Contains(t, output, exp) + } + + if len(tt.labels) == 0 { + assert.Empty(t, output) + } + }) + } +} + +// TestDisplayHelpfulTips tests helpful tips display. +func TestDisplayHelpfulTips(t *testing.T) { + cmd := &cobra.Command{} + + var stdout bytes.Buffer + cmd.SetOut(&stdout) + + displayHelpfulTips(cmd) + + output := stdout.String() + assert.Contains(t, output, "Tips") + assert.Contains(t, output, "dirctl routing list --skill") + assert.Contains(t, output, "dirctl routing search --skill") +} + +// TestDisplayRoutingStatistics_EmptyStats tests display with zero records. +func TestDisplayRoutingStatistics_EmptyStats(t *testing.T) { + cmd := &cobra.Command{} + + var stdout bytes.Buffer + cmd.SetOut(&stdout) + + stats := &routingStatistics{ + totalRecords: 0, + skillCounts: make(map[string]int), + locatorCounts: make(map[string]int), + otherLabels: make(map[string]int), + } + + displayRoutingStatistics(cmd, stats) + + output := stdout.String() + assert.Contains(t, output, "Total Records: 0") + assert.Contains(t, output, "No local records found") + // Should not display other sections + assert.NotContains(t, output, "Skills Distribution") + assert.NotContains(t, output, "Locators Distribution") + assert.NotContains(t, output, "Other Labels") + assert.NotContains(t, output, "Tips") +} + +// TestDisplayRoutingStatistics_WithData tests display with actual statistics. +func TestDisplayRoutingStatistics_WithData(t *testing.T) { + cmd := &cobra.Command{} + + var stdout bytes.Buffer + cmd.SetOut(&stdout) + + stats := &routingStatistics{ + totalRecords: 5, + skillCounts: map[string]int{ + "AI": 3, + "ML": 2, + }, + locatorCounts: map[string]int{ + "docker-image": 2, + }, + otherLabels: map[string]int{ + "/domains/healthcare": 1, + }, + } + + displayRoutingStatistics(cmd, stats) + + output := stdout.String() + // Check all sections are present + assert.Contains(t, output, "Total Records: 5") + assert.Contains(t, output, "Skills Distribution") + assert.Contains(t, output, "AI: 3 record(s)") + assert.Contains(t, output, "ML: 2 record(s)") + assert.Contains(t, output, "Locators Distribution") + assert.Contains(t, output, "docker-image: 2 record(s)") + assert.Contains(t, output, "Other Labels") + assert.Contains(t, output, "/domains/healthcare: 1 record(s)") + assert.Contains(t, output, "Tips") +} + +// TestDisplayRoutingStatistics_OnlySkills tests display with only skills. +func TestDisplayRoutingStatistics_OnlySkills(t *testing.T) { + cmd := &cobra.Command{} + + var stdout bytes.Buffer + cmd.SetOut(&stdout) + + stats := &routingStatistics{ + totalRecords: 2, + skillCounts: map[string]int{ + "AI": 2, + }, + locatorCounts: make(map[string]int), + otherLabels: make(map[string]int), + } + + displayRoutingStatistics(cmd, stats) + + output := stdout.String() + assert.Contains(t, output, "Total Records: 2") + assert.Contains(t, output, "Skills Distribution") + assert.Contains(t, output, "AI: 2 record(s)") + assert.NotContains(t, output, "Locators Distribution") + assert.NotContains(t, output, "Other Labels") + assert.Contains(t, output, "Tips") +} + +// TestInfoCmd_Initialization tests that infoCmd is properly initialized. +func TestInfoCmd_Initialization(t *testing.T) { + assert.NotNil(t, infoCmd) + assert.Equal(t, "info", infoCmd.Use) + assert.NotEmpty(t, infoCmd.Short) + assert.NotEmpty(t, infoCmd.Long) + assert.NotNil(t, infoCmd.RunE) + + // Check that examples are in the Long description + assert.Contains(t, infoCmd.Long, "dirctl routing info") +} + +// TestRoutingStatistics_Structure tests the statistics structure. +func TestRoutingStatistics_Structure(t *testing.T) { + stats := &routingStatistics{ + totalRecords: 10, + skillCounts: map[string]int{"AI": 5}, + locatorCounts: map[string]int{"docker": 3}, + otherLabels: map[string]int{"/custom": 2}, + } + + assert.Equal(t, 10, stats.totalRecords) + assert.Equal(t, 5, stats.skillCounts["AI"]) + assert.Equal(t, 3, stats.locatorCounts["docker"]) + assert.Equal(t, 2, stats.otherLabels["/custom"]) +} + +// TestCollectRoutingStatistics_LargeDataset tests with many records. +func TestCollectRoutingStatistics_LargeDataset(t *testing.T) { + ch := make(chan *routingv1.ListResponse, 100) + + // Add 100 records with varying labels + for i := range 100 { + labels := []string{"/skills/AI"} + if i%2 == 0 { + labels = append(labels, "/locators/docker-image") + } + + if i%3 == 0 { + labels = append(labels, "/domains/healthcare") + } + + ch <- &routingv1.ListResponse{ + RecordRef: &corev1.RecordRef{Cid: "cid-" + string(rune(i))}, + Labels: labels, + } + } + + close(ch) + + stats := collectRoutingStatistics(ch) + + assert.Equal(t, 100, stats.totalRecords) + assert.Equal(t, 100, stats.skillCounts["AI"]) + assert.Equal(t, 50, stats.locatorCounts["docker-image"]) + assert.Equal(t, 34, stats.otherLabels["/domains/healthcare"]) // 100/3 rounded up +} + +// TestCategorizeLabel_EdgeCases tests edge cases in label categorization. +func TestCategorizeLabel_EdgeCases(t *testing.T) { + tests := []struct { + name string + label string + }{ + {"empty string", ""}, + {"just slash", "/"}, + {"skills prefix only", "/skills/"}, + {"locators prefix only", "/locators/"}, + {"multiple slashes", "/skills//AI//ML"}, + {"trailing slash", "/skills/AI/"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + stats := &routingStatistics{ + skillCounts: make(map[string]int), + locatorCounts: make(map[string]int), + otherLabels: make(map[string]int), + } + + // Should not panic + require.NotPanics(t, func() { + categorizeLabel(tt.label, stats) + }) + }) + } +} + +// TestDisplayFunctions_WithEmojis tests that display functions include emojis. +func TestDisplayFunctions_WithEmojis(t *testing.T) { + cmd := &cobra.Command{} + + var stdout bytes.Buffer + cmd.SetOut(&stdout) + + stats := &routingStatistics{ + totalRecords: 1, + skillCounts: map[string]int{"AI": 1}, + locatorCounts: map[string]int{"docker": 1}, + otherLabels: map[string]int{"/custom": 1}, + } + + displayRoutingStatistics(cmd, stats) + + output := stdout.String() + // Check for emojis (they make the output more user-friendly) + assert.True(t, strings.Contains(output, "📊") || strings.Contains(output, "Record Statistics")) + assert.True(t, strings.Contains(output, "🎯") || strings.Contains(output, "Skills")) + assert.True(t, strings.Contains(output, "📍") || strings.Contains(output, "Locators")) + assert.True(t, strings.Contains(output, "🏷️") || strings.Contains(output, "Other Labels") || strings.Contains(output, "Labels")) + assert.True(t, strings.Contains(output, "💡") || strings.Contains(output, "Tips")) +} diff --git a/cli/cmd/routing/list.go b/cli/cmd/routing/list.go index f6d267e50..99bb1daac 100644 --- a/cli/cmd/routing/list.go +++ b/cli/cmd/routing/list.go @@ -1,188 +1,188 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:wrapcheck -package routing - -import ( - "errors" - "fmt" - - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/cli/presenter" - ctxUtils "github.com/agntcy/dir/cli/util/context" - "github.com/agntcy/dir/client" - "github.com/spf13/cobra" -) - -var listCmd = &cobra.Command{ - Use: "list", - Short: "List local records with optional filtering", - Long: `List local records with optional filtering. - -This command queries records that are stored locally on this peer only. -It does NOT query the network or other peers. - -Key Features: -- Local-only: Only shows records published on this peer -- Fast: Uses local storage index, no network access -- Filtering: Supports skill and locator queries with AND logic -- Efficient: Extracts labels from storage keys, no content parsing - -Usage examples: - -1. List all local records: - dirctl routing list - -2. List records with specific skill: - dirctl routing list --skill "AI" - -3. List records with multiple criteria (AND logic): - dirctl routing list --skill "AI" --locator "docker-image" - -4. List specific record by CID: - dirctl routing list --cid - -5. Output formats: - # Get results as JSON - dirctl routing list --skill "AI" --output json - - # Get results as JSONL for streaming - dirctl routing list --output jsonl - - # Get raw CIDs only - dirctl routing list --skill "AI" --output raw - -Note: For network-wide discovery, use 'dirctl routing search' instead. -`, - //nolint:gocritic // Lambda required due to signature mismatch - runListCommand doesn't use args - RunE: func(cmd *cobra.Command, _ []string) error { - return runListCommand(cmd) - }, -} - -// List command options. -var listOpts struct { - Cid string - Skills []string - Locators []string - Domains []string - Modules []string - Limit uint32 -} - -func init() { - // Add flags for list options - listCmd.Flags().StringVar(&listOpts.Cid, "cid", "", "List specific record by CID") - listCmd.Flags().StringArrayVar(&listOpts.Skills, "skill", nil, "Filter by skill (can be repeated)") - listCmd.Flags().StringArrayVar(&listOpts.Locators, "locator", nil, "Filter by locator type (can be repeated)") - listCmd.Flags().StringArrayVar(&listOpts.Domains, "domain", nil, "Filter by domain (can be repeated)") - listCmd.Flags().StringArrayVar(&listOpts.Modules, "module", nil, "Filter by module (can be repeated)") - listCmd.Flags().Uint32Var(&listOpts.Limit, "limit", 0, "Maximum number of results (0 = no limit)") - - // Add examples in flag help - listCmd.Flags().Lookup("skill").Usage = "Filter by skill (e.g., --skill 'AI' --skill 'web-development')" - listCmd.Flags().Lookup("locator").Usage = "Filter by locator type (e.g., --locator 'docker-image')" - listCmd.Flags().Lookup("domain").Usage = "Filter by domain (e.g., --domain 'research' --domain 'analytics')" - listCmd.Flags().Lookup("module").Usage = "Filter by module (e.g., --module 'core/llm/model')" - listCmd.Flags().Lookup("cid").Usage = "List specific record by CID" - - // Add output format flags - presenter.AddOutputFlags(listCmd) -} - -func runListCommand(cmd *cobra.Command) error { - // Get the client from the context - c, ok := ctxUtils.GetClientFromContext(cmd.Context()) - if !ok { - return errors.New("failed to get client from context") - } - - // Handle CID-specific listing - if listOpts.Cid != "" { - return listByCID(cmd, c, listOpts.Cid) - } - - // Build queries from flags - queries := make([]*routingv1.RecordQuery, 0, len(listOpts.Skills)+len(listOpts.Locators)+len(listOpts.Domains)+len(listOpts.Modules)) - - // Add skill queries - for _, skill := range listOpts.Skills { - queries = append(queries, &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: skill, - }) - } - - // Add locator queries - for _, locator := range listOpts.Locators { - queries = append(queries, &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, - Value: locator, - }) - } - - // Add domain queries - for _, domain := range listOpts.Domains { - queries = append(queries, &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, - Value: domain, - }) - } - - // Add module queries - for _, module := range listOpts.Modules { - queries = append(queries, &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, - Value: module, - }) - } - - // Build list request - req := &routingv1.ListRequest{ - Queries: queries, - } - - // Add optional limit - if listOpts.Limit > 0 { - req.Limit = &listOpts.Limit - } - - // Execute list - resultCh, err := c.List(cmd.Context(), req) - if err != nil { - return fmt.Errorf("failed to list: %w", err) - } - - // Collect results and convert to interface{} slice in a single loop - results := make([]interface{}, 0, listOpts.Limit) - for result := range resultCh { - results = append(results, result) - } - - return presenter.PrintMessage(cmd, "local records", "Local records found", results) -} - -// listByCID lists a specific record by CID. -func listByCID(cmd *cobra.Command, c *client.Client, cid string) error { - // For CID-specific queries, we can use an empty query list - req := &routingv1.ListRequest{ - Queries: []*routingv1.RecordQuery{}, // Empty = list all, then we filter by CID match - } - - resultCh, err := c.List(cmd.Context(), req) - if err != nil { - return fmt.Errorf("failed to list: %w", err) - } - - // Collect results and convert to interface{} slice in a single loop - results := make([]interface{}, 0, listOpts.Limit) - - for result := range resultCh { - if result.GetRecordRef().GetCid() == cid { - results = append(results, result) - } - } - - return presenter.PrintMessage(cmd, "local records", "Local records found", results) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:wrapcheck +package routing + +import ( + "errors" + "fmt" + + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/cli/presenter" + ctxUtils "github.com/agntcy/dir/cli/util/context" + "github.com/agntcy/dir/client" + "github.com/spf13/cobra" +) + +var listCmd = &cobra.Command{ + Use: "list", + Short: "List local records with optional filtering", + Long: `List local records with optional filtering. + +This command queries records that are stored locally on this peer only. +It does NOT query the network or other peers. + +Key Features: +- Local-only: Only shows records published on this peer +- Fast: Uses local storage index, no network access +- Filtering: Supports skill and locator queries with AND logic +- Efficient: Extracts labels from storage keys, no content parsing + +Usage examples: + +1. List all local records: + dirctl routing list + +2. List records with specific skill: + dirctl routing list --skill "AI" + +3. List records with multiple criteria (AND logic): + dirctl routing list --skill "AI" --locator "docker-image" + +4. List specific record by CID: + dirctl routing list --cid + +5. Output formats: + # Get results as JSON + dirctl routing list --skill "AI" --output json + + # Get results as JSONL for streaming + dirctl routing list --output jsonl + + # Get raw CIDs only + dirctl routing list --skill "AI" --output raw + +Note: For network-wide discovery, use 'dirctl routing search' instead. +`, + //nolint:gocritic // Lambda required due to signature mismatch - runListCommand doesn't use args + RunE: func(cmd *cobra.Command, _ []string) error { + return runListCommand(cmd) + }, +} + +// List command options. +var listOpts struct { + Cid string + Skills []string + Locators []string + Domains []string + Modules []string + Limit uint32 +} + +func init() { + // Add flags for list options + listCmd.Flags().StringVar(&listOpts.Cid, "cid", "", "List specific record by CID") + listCmd.Flags().StringArrayVar(&listOpts.Skills, "skill", nil, "Filter by skill (can be repeated)") + listCmd.Flags().StringArrayVar(&listOpts.Locators, "locator", nil, "Filter by locator type (can be repeated)") + listCmd.Flags().StringArrayVar(&listOpts.Domains, "domain", nil, "Filter by domain (can be repeated)") + listCmd.Flags().StringArrayVar(&listOpts.Modules, "module", nil, "Filter by module (can be repeated)") + listCmd.Flags().Uint32Var(&listOpts.Limit, "limit", 0, "Maximum number of results (0 = no limit)") + + // Add examples in flag help + listCmd.Flags().Lookup("skill").Usage = "Filter by skill (e.g., --skill 'AI' --skill 'web-development')" + listCmd.Flags().Lookup("locator").Usage = "Filter by locator type (e.g., --locator 'docker-image')" + listCmd.Flags().Lookup("domain").Usage = "Filter by domain (e.g., --domain 'research' --domain 'analytics')" + listCmd.Flags().Lookup("module").Usage = "Filter by module (e.g., --module 'core/llm/model')" + listCmd.Flags().Lookup("cid").Usage = "List specific record by CID" + + // Add output format flags + presenter.AddOutputFlags(listCmd) +} + +func runListCommand(cmd *cobra.Command) error { + // Get the client from the context + c, ok := ctxUtils.GetClientFromContext(cmd.Context()) + if !ok { + return errors.New("failed to get client from context") + } + + // Handle CID-specific listing + if listOpts.Cid != "" { + return listByCID(cmd, c, listOpts.Cid) + } + + // Build queries from flags + queries := make([]*routingv1.RecordQuery, 0, len(listOpts.Skills)+len(listOpts.Locators)+len(listOpts.Domains)+len(listOpts.Modules)) + + // Add skill queries + for _, skill := range listOpts.Skills { + queries = append(queries, &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: skill, + }) + } + + // Add locator queries + for _, locator := range listOpts.Locators { + queries = append(queries, &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, + Value: locator, + }) + } + + // Add domain queries + for _, domain := range listOpts.Domains { + queries = append(queries, &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, + Value: domain, + }) + } + + // Add module queries + for _, module := range listOpts.Modules { + queries = append(queries, &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, + Value: module, + }) + } + + // Build list request + req := &routingv1.ListRequest{ + Queries: queries, + } + + // Add optional limit + if listOpts.Limit > 0 { + req.Limit = &listOpts.Limit + } + + // Execute list + resultCh, err := c.List(cmd.Context(), req) + if err != nil { + return fmt.Errorf("failed to list: %w", err) + } + + // Collect results and convert to interface{} slice in a single loop + results := make([]interface{}, 0, listOpts.Limit) + for result := range resultCh { + results = append(results, result) + } + + return presenter.PrintMessage(cmd, "local records", "Local records found", results) +} + +// listByCID lists a specific record by CID. +func listByCID(cmd *cobra.Command, c *client.Client, cid string) error { + // For CID-specific queries, we can use an empty query list + req := &routingv1.ListRequest{ + Queries: []*routingv1.RecordQuery{}, // Empty = list all, then we filter by CID match + } + + resultCh, err := c.List(cmd.Context(), req) + if err != nil { + return fmt.Errorf("failed to list: %w", err) + } + + // Collect results and convert to interface{} slice in a single loop + results := make([]interface{}, 0, listOpts.Limit) + + for result := range resultCh { + if result.GetRecordRef().GetCid() == cid { + results = append(results, result) + } + } + + return presenter.PrintMessage(cmd, "local records", "Local records found", results) +} diff --git a/cli/cmd/routing/publish.go b/cli/cmd/routing/publish.go index 491b79565..03116654d 100644 --- a/cli/cmd/routing/publish.go +++ b/cli/cmd/routing/publish.go @@ -1,96 +1,96 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:wrapcheck -package routing - -import ( - "errors" - "fmt" - "strings" - - corev1 "github.com/agntcy/dir/api/core/v1" - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/cli/presenter" - ctxUtils "github.com/agntcy/dir/cli/util/context" - "github.com/spf13/cobra" -) - -var publishCmd = &cobra.Command{ - Use: "publish ", - Short: "Publish record to the network for discovery", - Long: `Publish a record to the network to allow content discovery by other peers. - -This command announces a record that is already stored locally to the distributed -network, making it discoverable by other peers through the DHT. - -The record must already exist in local storage (use 'dirctl push' first if needed). - -Key Features: -- Network announcement: Makes record discoverable by other peers -- Local storage: Stores record in local routing index -- DHT announcement: Announces record and labels to distributed network -- Background retry: Failed announcements are retried automatically - -Usage examples: - -1. Publish a record to the network: - dirctl routing publish - -2. Output formats: - # Publish with JSON confirmation - dirctl routing publish --output json - - # Publish with raw output for scripting - dirctl routing publish --output raw - -Note: The record must already be pushed to storage before publishing. -`, - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runPublishCommand(cmd, args[0]) - }, -} - -func runPublishCommand(cmd *cobra.Command, cid string) error { - // Get the client from the context - c, ok := ctxUtils.GetClientFromContext(cmd.Context()) - if !ok { - return errors.New("failed to get client from context") - } - - // Create RecordRef from cid - recordRef := &corev1.RecordRef{ - Cid: cid, - } - - // Lookup metadata to verify record exists - _, err := c.Lookup(cmd.Context(), recordRef) - if err != nil { - return fmt.Errorf("failed to lookup: %w", err) - } - - // Start publishing using the same RecordRef - if err := c.Publish(cmd.Context(), &routingv1.PublishRequest{ - Request: &routingv1.PublishRequest_RecordRefs{ - RecordRefs: &routingv1.RecordRefs{ - Refs: []*corev1.RecordRef{recordRef}, - }, - }, - }); err != nil { - if strings.Contains(err.Error(), "failed to announce object") { - return errors.New("failed to announce object, it will be retried in the background on the API server") - } - - return fmt.Errorf("failed to publish: %w", err) - } - - // Output in the appropriate format - result := map[string]interface{}{ - "cid": recordRef.GetCid(), - "status": "Successfully submitted publication request", - "message": "Record will be discoverable by other peers once the publication service processes the request", - } - - return presenter.PrintMessage(cmd, "Publish", "Successfully submitted publication request", result) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:wrapcheck +package routing + +import ( + "errors" + "fmt" + "strings" + + corev1 "github.com/agntcy/dir/api/core/v1" + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/cli/presenter" + ctxUtils "github.com/agntcy/dir/cli/util/context" + "github.com/spf13/cobra" +) + +var publishCmd = &cobra.Command{ + Use: "publish ", + Short: "Publish record to the network for discovery", + Long: `Publish a record to the network to allow content discovery by other peers. + +This command announces a record that is already stored locally to the distributed +network, making it discoverable by other peers through the DHT. + +The record must already exist in local storage (use 'dirctl push' first if needed). + +Key Features: +- Network announcement: Makes record discoverable by other peers +- Local storage: Stores record in local routing index +- DHT announcement: Announces record and labels to distributed network +- Background retry: Failed announcements are retried automatically + +Usage examples: + +1. Publish a record to the network: + dirctl routing publish + +2. Output formats: + # Publish with JSON confirmation + dirctl routing publish --output json + + # Publish with raw output for scripting + dirctl routing publish --output raw + +Note: The record must already be pushed to storage before publishing. +`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runPublishCommand(cmd, args[0]) + }, +} + +func runPublishCommand(cmd *cobra.Command, cid string) error { + // Get the client from the context + c, ok := ctxUtils.GetClientFromContext(cmd.Context()) + if !ok { + return errors.New("failed to get client from context") + } + + // Create RecordRef from cid + recordRef := &corev1.RecordRef{ + Cid: cid, + } + + // Lookup metadata to verify record exists + _, err := c.Lookup(cmd.Context(), recordRef) + if err != nil { + return fmt.Errorf("failed to lookup: %w", err) + } + + // Start publishing using the same RecordRef + if err := c.Publish(cmd.Context(), &routingv1.PublishRequest{ + Request: &routingv1.PublishRequest_RecordRefs{ + RecordRefs: &routingv1.RecordRefs{ + Refs: []*corev1.RecordRef{recordRef}, + }, + }, + }); err != nil { + if strings.Contains(err.Error(), "failed to announce object") { + return errors.New("failed to announce object, it will be retried in the background on the API server") + } + + return fmt.Errorf("failed to publish: %w", err) + } + + // Output in the appropriate format + result := map[string]interface{}{ + "cid": recordRef.GetCid(), + "status": "Successfully submitted publication request", + "message": "Record will be discoverable by other peers once the publication service processes the request", + } + + return presenter.PrintMessage(cmd, "Publish", "Successfully submitted publication request", result) +} diff --git a/cli/cmd/routing/routing.go b/cli/cmd/routing/routing.go index 8a9d4d8da..befbaf750 100644 --- a/cli/cmd/routing/routing.go +++ b/cli/cmd/routing/routing.go @@ -1,53 +1,53 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package routing - -import ( - "github.com/agntcy/dir/cli/presenter" - "github.com/spf13/cobra" -) - -var Command = &cobra.Command{ - Use: "routing", - Short: "Routing operations for record discovery and announcement", - Long: `Routing operations for record discovery and announcement. - -This command group provides access to all routing-specific operations: - -- publish: Announce records to the network for discovery -- unpublish: Remove records from network discovery -- list: Query local records with filtering -- search: Discover remote records from other peers -- info: Show routing statistics and summary information - -Examples: - -1. Publish a record to the network: - dirctl routing publish - -2. List local records with skill filter: - dirctl routing list --skill "AI" - -3. Search for remote records across the network: - dirctl routing search --skill "AI" --limit 10 - -4. Unpublish a record from the network: - dirctl routing unpublish - -This follows clear service separation - all routing API operations are grouped together. -`, -} - -func init() { - // Add all routing subcommands - Command.AddCommand(publishCmd) - Command.AddCommand(unpublishCmd) - Command.AddCommand(listCmd) - Command.AddCommand(searchCmd) - Command.AddCommand(infoCmd) - - // Add output format flags to routing subcommands - presenter.AddOutputFlags(publishCmd) - presenter.AddOutputFlags(unpublishCmd) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package routing + +import ( + "github.com/agntcy/dir/cli/presenter" + "github.com/spf13/cobra" +) + +var Command = &cobra.Command{ + Use: "routing", + Short: "Routing operations for record discovery and announcement", + Long: `Routing operations for record discovery and announcement. + +This command group provides access to all routing-specific operations: + +- publish: Announce records to the network for discovery +- unpublish: Remove records from network discovery +- list: Query local records with filtering +- search: Discover remote records from other peers +- info: Show routing statistics and summary information + +Examples: + +1. Publish a record to the network: + dirctl routing publish + +2. List local records with skill filter: + dirctl routing list --skill "AI" + +3. Search for remote records across the network: + dirctl routing search --skill "AI" --limit 10 + +4. Unpublish a record from the network: + dirctl routing unpublish + +This follows clear service separation - all routing API operations are grouped together. +`, +} + +func init() { + // Add all routing subcommands + Command.AddCommand(publishCmd) + Command.AddCommand(unpublishCmd) + Command.AddCommand(listCmd) + Command.AddCommand(searchCmd) + Command.AddCommand(infoCmd) + + // Add output format flags to routing subcommands + presenter.AddOutputFlags(publishCmd) + presenter.AddOutputFlags(unpublishCmd) +} diff --git a/cli/cmd/routing/search.go b/cli/cmd/routing/search.go index 99863285c..d0cb74ad7 100644 --- a/cli/cmd/routing/search.go +++ b/cli/cmd/routing/search.go @@ -1,173 +1,173 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:wrapcheck -package routing - -import ( - "errors" - "fmt" - - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/cli/presenter" - ctxUtils "github.com/agntcy/dir/cli/util/context" - "github.com/spf13/cobra" -) - -var searchCmd = &cobra.Command{ - Use: "search", - Short: "Search for remote records from other peers", - Long: `Search for remote records from other peers using the routing API. - -This command discovers records that have been published by other peers in the network. -It uses cached network announcements and filters out local records. - -Key Features: -- Remote-only: Only returns records from other peers -- OR logic: Records returned if they match ≥ minScore queries -- Match scoring: Shows how well records match your criteria -- Peer information: Shows which peer provides each record - -Usage examples: - -1. Search for AI-related records: - dirctl routing search --skill "AI" - -2. Search with multiple criteria (AND logic): - dirctl routing search --skill "AI" --skill "ML" --min-score 2 - -3. Search with result limiting: - dirctl routing search --skill "web-development" --limit 5 - -4. Output formats: - # Get results as JSON - dirctl routing search --skill "AI" --output json - - # Search and pipe to sync - dirctl routing search --skill "AI" --output json | dirctl sync create --stdin - - # Get raw results for scripting - dirctl routing search --skill "web" --output raw - -`, - //nolint:gocritic // Lambda required due to signature mismatch - runSearchCommand doesn't use args - RunE: func(cmd *cobra.Command, _ []string) error { - return runSearchCommand(cmd) - }, -} - -// Search command options. -var searchOpts struct { - Skills []string - Locators []string - Domains []string - Modules []string - Limit uint32 - MinScore uint32 -} - -const ( - defaultSearchLimit = 10 - // defaultMinScore matches the server-side DefaultMinMatchScore constant for consistency. - defaultMinScore = 1 -) - -func init() { - // Add flags for search options - searchCmd.Flags().StringArrayVar(&searchOpts.Skills, "skill", nil, "Search for records with specific skill (can be repeated)") - searchCmd.Flags().StringArrayVar(&searchOpts.Locators, "locator", nil, "Search for records with specific locator type (can be repeated)") - searchCmd.Flags().StringArrayVar(&searchOpts.Domains, "domain", nil, "Search for records with specific domain (can be repeated)") - searchCmd.Flags().StringArrayVar(&searchOpts.Modules, "module", nil, "Search for records with specific module (can be repeated)") - searchCmd.Flags().Uint32Var(&searchOpts.Limit, "limit", defaultSearchLimit, "Maximum number of results to return") - searchCmd.Flags().Uint32Var(&searchOpts.MinScore, "min-score", defaultMinScore, "Minimum match score (number of queries that must match)") - - // Add examples in flag help - searchCmd.Flags().Lookup("skill").Usage = "Search for records with specific skill (e.g., --skill 'AI' --skill 'ML')" - searchCmd.Flags().Lookup("locator").Usage = "Search for records with specific locator type (e.g., --locator 'docker-image')" - searchCmd.Flags().Lookup("domain").Usage = "Search for records with specific domain (e.g., --domain 'research' --domain 'analytics')" - searchCmd.Flags().Lookup("module").Usage = "Search for records with specific module (e.g., --module 'core/llm/model')" - - // Add standard output format flags - presenter.AddOutputFlags(searchCmd) -} - -func runSearchCommand(cmd *cobra.Command) error { - // Get the client from the context - c, ok := ctxUtils.GetClientFromContext(cmd.Context()) - if !ok { - return errors.New("failed to get client from context") - } - - // Build queries from flags - queries := make([]*routingv1.RecordQuery, 0, len(searchOpts.Skills)+len(searchOpts.Locators)+len(searchOpts.Domains)+len(searchOpts.Modules)) - - // Add skill queries - for _, skill := range searchOpts.Skills { - queries = append(queries, &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: skill, - }) - } - - // Add locator queries - for _, locator := range searchOpts.Locators { - queries = append(queries, &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, - Value: locator, - }) - } - - // Add domain queries - for _, domain := range searchOpts.Domains { - queries = append(queries, &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, - Value: domain, - }) - } - - // Add module queries - for _, module := range searchOpts.Modules { - queries = append(queries, &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, - Value: module, - }) - } - - // Validate that we have at least some criteria - if len(queries) == 0 { - presenter.PrintSmartf(cmd, "No search criteria specified. Use --skill, --locator, --domain, or --module flags.\n") - presenter.PrintSmartf(cmd, "Examples:\n") - presenter.PrintSmartf(cmd, " dirctl routing search --skill 'AI' --locator 'docker-image'\n") - presenter.PrintSmartf(cmd, " dirctl routing search --domain 'research' --module 'core/llm/model'\n") - - return nil - } - - // Build search request - req := &routingv1.SearchRequest{ - Queries: queries, - } - - // Add optional parameters - if searchOpts.Limit > 0 { - req.Limit = &searchOpts.Limit - } - - if searchOpts.MinScore > 0 { - req.MinMatchScore = &searchOpts.MinScore - } - - // Execute search - resultCh, err := c.SearchRouting(cmd.Context(), req) - if err != nil { - return fmt.Errorf("failed to search routing: %w", err) - } - - // Collect results - results := make([]interface{}, 0, searchOpts.Limit) - for result := range resultCh { - results = append(results, result) - } - - return presenter.PrintMessage(cmd, "remote records", "Remote records found", results) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:wrapcheck +package routing + +import ( + "errors" + "fmt" + + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/cli/presenter" + ctxUtils "github.com/agntcy/dir/cli/util/context" + "github.com/spf13/cobra" +) + +var searchCmd = &cobra.Command{ + Use: "search", + Short: "Search for remote records from other peers", + Long: `Search for remote records from other peers using the routing API. + +This command discovers records that have been published by other peers in the network. +It uses cached network announcements and filters out local records. + +Key Features: +- Remote-only: Only returns records from other peers +- OR logic: Records returned if they match ≥ minScore queries +- Match scoring: Shows how well records match your criteria +- Peer information: Shows which peer provides each record + +Usage examples: + +1. Search for AI-related records: + dirctl routing search --skill "AI" + +2. Search with multiple criteria (AND logic): + dirctl routing search --skill "AI" --skill "ML" --min-score 2 + +3. Search with result limiting: + dirctl routing search --skill "web-development" --limit 5 + +4. Output formats: + # Get results as JSON + dirctl routing search --skill "AI" --output json + + # Search and pipe to sync + dirctl routing search --skill "AI" --output json | dirctl sync create --stdin + + # Get raw results for scripting + dirctl routing search --skill "web" --output raw + +`, + //nolint:gocritic // Lambda required due to signature mismatch - runSearchCommand doesn't use args + RunE: func(cmd *cobra.Command, _ []string) error { + return runSearchCommand(cmd) + }, +} + +// Search command options. +var searchOpts struct { + Skills []string + Locators []string + Domains []string + Modules []string + Limit uint32 + MinScore uint32 +} + +const ( + defaultSearchLimit = 10 + // defaultMinScore matches the server-side DefaultMinMatchScore constant for consistency. + defaultMinScore = 1 +) + +func init() { + // Add flags for search options + searchCmd.Flags().StringArrayVar(&searchOpts.Skills, "skill", nil, "Search for records with specific skill (can be repeated)") + searchCmd.Flags().StringArrayVar(&searchOpts.Locators, "locator", nil, "Search for records with specific locator type (can be repeated)") + searchCmd.Flags().StringArrayVar(&searchOpts.Domains, "domain", nil, "Search for records with specific domain (can be repeated)") + searchCmd.Flags().StringArrayVar(&searchOpts.Modules, "module", nil, "Search for records with specific module (can be repeated)") + searchCmd.Flags().Uint32Var(&searchOpts.Limit, "limit", defaultSearchLimit, "Maximum number of results to return") + searchCmd.Flags().Uint32Var(&searchOpts.MinScore, "min-score", defaultMinScore, "Minimum match score (number of queries that must match)") + + // Add examples in flag help + searchCmd.Flags().Lookup("skill").Usage = "Search for records with specific skill (e.g., --skill 'AI' --skill 'ML')" + searchCmd.Flags().Lookup("locator").Usage = "Search for records with specific locator type (e.g., --locator 'docker-image')" + searchCmd.Flags().Lookup("domain").Usage = "Search for records with specific domain (e.g., --domain 'research' --domain 'analytics')" + searchCmd.Flags().Lookup("module").Usage = "Search for records with specific module (e.g., --module 'core/llm/model')" + + // Add standard output format flags + presenter.AddOutputFlags(searchCmd) +} + +func runSearchCommand(cmd *cobra.Command) error { + // Get the client from the context + c, ok := ctxUtils.GetClientFromContext(cmd.Context()) + if !ok { + return errors.New("failed to get client from context") + } + + // Build queries from flags + queries := make([]*routingv1.RecordQuery, 0, len(searchOpts.Skills)+len(searchOpts.Locators)+len(searchOpts.Domains)+len(searchOpts.Modules)) + + // Add skill queries + for _, skill := range searchOpts.Skills { + queries = append(queries, &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: skill, + }) + } + + // Add locator queries + for _, locator := range searchOpts.Locators { + queries = append(queries, &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, + Value: locator, + }) + } + + // Add domain queries + for _, domain := range searchOpts.Domains { + queries = append(queries, &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, + Value: domain, + }) + } + + // Add module queries + for _, module := range searchOpts.Modules { + queries = append(queries, &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, + Value: module, + }) + } + + // Validate that we have at least some criteria + if len(queries) == 0 { + presenter.PrintSmartf(cmd, "No search criteria specified. Use --skill, --locator, --domain, or --module flags.\n") + presenter.PrintSmartf(cmd, "Examples:\n") + presenter.PrintSmartf(cmd, " dirctl routing search --skill 'AI' --locator 'docker-image'\n") + presenter.PrintSmartf(cmd, " dirctl routing search --domain 'research' --module 'core/llm/model'\n") + + return nil + } + + // Build search request + req := &routingv1.SearchRequest{ + Queries: queries, + } + + // Add optional parameters + if searchOpts.Limit > 0 { + req.Limit = &searchOpts.Limit + } + + if searchOpts.MinScore > 0 { + req.MinMatchScore = &searchOpts.MinScore + } + + // Execute search + resultCh, err := c.SearchRouting(cmd.Context(), req) + if err != nil { + return fmt.Errorf("failed to search routing: %w", err) + } + + // Collect results + results := make([]interface{}, 0, searchOpts.Limit) + for result := range resultCh { + results = append(results, result) + } + + return presenter.PrintMessage(cmd, "remote records", "Remote records found", results) +} diff --git a/cli/cmd/routing/unpublish.go b/cli/cmd/routing/unpublish.go index 849a8b080..8bb84ea20 100644 --- a/cli/cmd/routing/unpublish.go +++ b/cli/cmd/routing/unpublish.go @@ -1,89 +1,89 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:wrapcheck -package routing - -import ( - "errors" - "fmt" - - corev1 "github.com/agntcy/dir/api/core/v1" - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/cli/presenter" - ctxUtils "github.com/agntcy/dir/cli/util/context" - "github.com/spf13/cobra" -) - -var unpublishCmd = &cobra.Command{ - Use: "unpublish ", - Short: "Unpublish record from the network", - Long: `Unpublish a record from the network to stop content discovery by other peers. - -This command removes a record's network announcements, making it no longer -discoverable by other peers through the DHT. The record remains in local storage. - -Key Features: -- Network removal: Removes record from distributed discovery -- Local cleanup: Removes record from local routing index -- DHT cleanup: Removes record and label announcements from network -- Immediate effect: Record becomes undiscoverable by other peers - -Usage examples: - -1. Unpublish a record from the network: - dirctl routing unpublish - -2. Output formats: - # Unpublish with JSON confirmation - dirctl routing unpublish --output json - - # Unpublish with raw output for scripting - dirctl routing unpublish --output raw - -Note: This only removes network announcements. Use 'dirctl delete' to remove the record entirely. -`, - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runUnpublishCommand(cmd, args[0]) - }, -} - -func runUnpublishCommand(cmd *cobra.Command, cid string) error { - // Get the client from the context - c, ok := ctxUtils.GetClientFromContext(cmd.Context()) - if !ok { - return errors.New("failed to get client from context") - } - - // Create RecordRef from cid - recordRef := &corev1.RecordRef{ - Cid: cid, - } - - // Lookup metadata to verify record exists - _, err := c.Lookup(cmd.Context(), recordRef) - if err != nil { - return fmt.Errorf("failed to lookup: %w", err) - } - - // Start unpublishing using the same RecordRef - if err := c.Unpublish(cmd.Context(), &routingv1.UnpublishRequest{ - Request: &routingv1.UnpublishRequest_RecordRefs{ - RecordRefs: &routingv1.RecordRefs{ - Refs: []*corev1.RecordRef{recordRef}, - }, - }, - }); err != nil { - return fmt.Errorf("failed to unpublish: %w", err) - } - - // Output in the appropriate format - result := map[string]interface{}{ - "cid": recordRef.GetCid(), - "status": "unpublished", - "message": "Record is no longer discoverable by other peers", - } - - return presenter.PrintMessage(cmd, "Unpublish", "Successfully unpublished record", result) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:wrapcheck +package routing + +import ( + "errors" + "fmt" + + corev1 "github.com/agntcy/dir/api/core/v1" + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/cli/presenter" + ctxUtils "github.com/agntcy/dir/cli/util/context" + "github.com/spf13/cobra" +) + +var unpublishCmd = &cobra.Command{ + Use: "unpublish ", + Short: "Unpublish record from the network", + Long: `Unpublish a record from the network to stop content discovery by other peers. + +This command removes a record's network announcements, making it no longer +discoverable by other peers through the DHT. The record remains in local storage. + +Key Features: +- Network removal: Removes record from distributed discovery +- Local cleanup: Removes record from local routing index +- DHT cleanup: Removes record and label announcements from network +- Immediate effect: Record becomes undiscoverable by other peers + +Usage examples: + +1. Unpublish a record from the network: + dirctl routing unpublish + +2. Output formats: + # Unpublish with JSON confirmation + dirctl routing unpublish --output json + + # Unpublish with raw output for scripting + dirctl routing unpublish --output raw + +Note: This only removes network announcements. Use 'dirctl delete' to remove the record entirely. +`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runUnpublishCommand(cmd, args[0]) + }, +} + +func runUnpublishCommand(cmd *cobra.Command, cid string) error { + // Get the client from the context + c, ok := ctxUtils.GetClientFromContext(cmd.Context()) + if !ok { + return errors.New("failed to get client from context") + } + + // Create RecordRef from cid + recordRef := &corev1.RecordRef{ + Cid: cid, + } + + // Lookup metadata to verify record exists + _, err := c.Lookup(cmd.Context(), recordRef) + if err != nil { + return fmt.Errorf("failed to lookup: %w", err) + } + + // Start unpublishing using the same RecordRef + if err := c.Unpublish(cmd.Context(), &routingv1.UnpublishRequest{ + Request: &routingv1.UnpublishRequest_RecordRefs{ + RecordRefs: &routingv1.RecordRefs{ + Refs: []*corev1.RecordRef{recordRef}, + }, + }, + }); err != nil { + return fmt.Errorf("failed to unpublish: %w", err) + } + + // Output in the appropriate format + result := map[string]interface{}{ + "cid": recordRef.GetCid(), + "status": "unpublished", + "message": "Record is no longer discoverable by other peers", + } + + return presenter.PrintMessage(cmd, "Unpublish", "Successfully unpublished record", result) +} diff --git a/cli/cmd/search/options.go b/cli/cmd/search/options.go index 4d60d1dac..491c51e38 100644 --- a/cli/cmd/search/options.go +++ b/cli/cmd/search/options.go @@ -1,176 +1,176 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package search - -import ( - searchv1 "github.com/agntcy/dir/api/search/v1" - "github.com/spf13/cobra" -) - -var opts = &options{} - -type options struct { - Limit uint32 - Offset uint32 - Format string - - // Direct field flags (consistent with routing search) - Names []string - Versions []string - SkillIDs []string - SkillNames []string - Locators []string - Modules []string - DomainIDs []string - DomainNames []string - CreatedAts []string - Authors []string - SchemaVersions []string - ModuleIDs []string -} - -// registerFlags adds search flags to the command. -func registerFlags(cmd *cobra.Command) { - flags := cmd.Flags() - - flags.StringVar(&opts.Format, "format", "cid", "Output format: cid (default) or record") - flags.Uint32Var(&opts.Limit, "limit", 100, "Maximum number of results to return (default: 100)") //nolint:mnd - flags.Uint32Var(&opts.Offset, "offset", 0, "Pagination offset (default: 0)") - - // Direct field flags - flags.StringArrayVar(&opts.Names, "name", nil, "Search for records with specific name (can be repeated)") - flags.StringArrayVar(&opts.Versions, "version", nil, "Search for records with specific version (can be repeated)") - flags.StringArrayVar(&opts.SkillIDs, "skill-id", nil, "Search for records with specific skill ID (can be repeated)") - flags.StringArrayVar(&opts.SkillNames, "skill", nil, "Search for records with specific skill name (can be repeated)") - flags.StringArrayVar(&opts.Locators, "locator", nil, "Search for records with specific locator type (can be repeated)") - flags.StringArrayVar(&opts.Modules, "module", nil, "Search for records with specific module (can be repeated)") - flags.StringArrayVar(&opts.DomainIDs, "domain-id", nil, "Search for records with specific domain ID (can be repeated)") - flags.StringArrayVar(&opts.DomainNames, "domain", nil, "Search for records with specific domain name (can be repeated)") - flags.StringArrayVar(&opts.CreatedAts, "created-at", nil, "Search for records with specific created_at timestamp (can be repeated)") - flags.StringArrayVar(&opts.Authors, "author", nil, "Search for records with specific author (can be repeated)") - flags.StringArrayVar(&opts.SchemaVersions, "schema-version", nil, "Search for records with specific schema version (can be repeated)") - flags.StringArrayVar(&opts.ModuleIDs, "module-id", nil, "Search for records with specific module ID (can be repeated)") - - // Add examples in flag help - flags.Lookup("name").Usage = "Search for records with specific name (e.g., --name 'my-agent' --name 'web-*')" - flags.Lookup("version").Usage = "Search for records with specific version (e.g., --version 'v1.0.0' --version 'v1.*')" - flags.Lookup("skill-id").Usage = "Search for records with specific skill ID (e.g., --skill-id '10201')" - flags.Lookup("skill").Usage = "Search for records with specific skill name (e.g., --skill 'natural_language_processing' --skill 'audio')" - flags.Lookup("locator").Usage = "Search for records with specific locator type (e.g., --locator 'docker-image')" - flags.Lookup("module").Usage = "Search for records with specific module (e.g., --module 'core/llm/model')" - flags.Lookup("domain-id").Usage = "Search for records with specific domain ID (e.g., --domain-id '604')" - flags.Lookup("domain").Usage = "Search for records with specific domain name (e.g., --domain '*education*' --domain 'healthcare/*')" - flags.Lookup("created-at").Usage = "Search for records with specific created_at timestamp (e.g., --created-at '2024-*')" - flags.Lookup("author").Usage = "Search for records with specific author (e.g., --author 'john*')" - flags.Lookup("schema-version").Usage = "Search for records with specific schema version (e.g., --schema-version '0.8.*')" - flags.Lookup("module-id").Usage = "Search for records with specific module ID (e.g., --module-id '201')" -} - -// buildQueriesFromFlags builds API queries. -func buildQueriesFromFlags() []*searchv1.RecordQuery { - queries := make([]*searchv1.RecordQuery, 0, - len(opts.Names)+len(opts.Versions)+len(opts.SkillIDs)+ - len(opts.SkillNames)+len(opts.Locators)+len(opts.Modules)+ - len(opts.DomainIDs)+len(opts.DomainNames)+ - len(opts.CreatedAts)+len(opts.Authors)+ - len(opts.SchemaVersions)+len(opts.ModuleIDs)) - - // Add name queries - for _, name := range opts.Names { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_NAME, - Value: name, - }) - } - - // Add version queries - for _, version := range opts.Versions { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_VERSION, - Value: version, - }) - } - - // Add skill-id queries - for _, skillID := range opts.SkillIDs { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL_ID, - Value: skillID, - }) - } - - // Add skill-name queries - for _, skillName := range opts.SkillNames { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL_NAME, - Value: skillName, - }) - } - - // Add locator queries - for _, locator := range opts.Locators { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, - Value: locator, - }) - } - - // Add module queries - for _, module := range opts.Modules { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE_NAME, - Value: module, - }) - } - - // Add domain-id queries - for _, domainID := range opts.DomainIDs { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_ID, - Value: domainID, - }) - } - - // Add domain-name queries - for _, domainName := range opts.DomainNames { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_NAME, - Value: domainName, - }) - } - - // Add created-at queries - for _, createdAt := range opts.CreatedAts { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_CREATED_AT, - Value: createdAt, - }) - } - - // Add author queries - for _, author := range opts.Authors { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_AUTHOR, - Value: author, - }) - } - - // Add schema-version queries - for _, schemaVersion := range opts.SchemaVersions { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_SCHEMA_VERSION, - Value: schemaVersion, - }) - } - - // Add module-id queries - for _, moduleID := range opts.ModuleIDs { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE_ID, - Value: moduleID, - }) - } - - return queries -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package search + +import ( + searchv1 "github.com/agntcy/dir/api/search/v1" + "github.com/spf13/cobra" +) + +var opts = &options{} + +type options struct { + Limit uint32 + Offset uint32 + Format string + + // Direct field flags (consistent with routing search) + Names []string + Versions []string + SkillIDs []string + SkillNames []string + Locators []string + Modules []string + DomainIDs []string + DomainNames []string + CreatedAts []string + Authors []string + SchemaVersions []string + ModuleIDs []string +} + +// registerFlags adds search flags to the command. +func registerFlags(cmd *cobra.Command) { + flags := cmd.Flags() + + flags.StringVar(&opts.Format, "format", "cid", "Output format: cid (default) or record") + flags.Uint32Var(&opts.Limit, "limit", 100, "Maximum number of results to return (default: 100)") //nolint:mnd + flags.Uint32Var(&opts.Offset, "offset", 0, "Pagination offset (default: 0)") + + // Direct field flags + flags.StringArrayVar(&opts.Names, "name", nil, "Search for records with specific name (can be repeated)") + flags.StringArrayVar(&opts.Versions, "version", nil, "Search for records with specific version (can be repeated)") + flags.StringArrayVar(&opts.SkillIDs, "skill-id", nil, "Search for records with specific skill ID (can be repeated)") + flags.StringArrayVar(&opts.SkillNames, "skill", nil, "Search for records with specific skill name (can be repeated)") + flags.StringArrayVar(&opts.Locators, "locator", nil, "Search for records with specific locator type (can be repeated)") + flags.StringArrayVar(&opts.Modules, "module", nil, "Search for records with specific module (can be repeated)") + flags.StringArrayVar(&opts.DomainIDs, "domain-id", nil, "Search for records with specific domain ID (can be repeated)") + flags.StringArrayVar(&opts.DomainNames, "domain", nil, "Search for records with specific domain name (can be repeated)") + flags.StringArrayVar(&opts.CreatedAts, "created-at", nil, "Search for records with specific created_at timestamp (can be repeated)") + flags.StringArrayVar(&opts.Authors, "author", nil, "Search for records with specific author (can be repeated)") + flags.StringArrayVar(&opts.SchemaVersions, "schema-version", nil, "Search for records with specific schema version (can be repeated)") + flags.StringArrayVar(&opts.ModuleIDs, "module-id", nil, "Search for records with specific module ID (can be repeated)") + + // Add examples in flag help + flags.Lookup("name").Usage = "Search for records with specific name (e.g., --name 'my-agent' --name 'web-*')" + flags.Lookup("version").Usage = "Search for records with specific version (e.g., --version 'v1.0.0' --version 'v1.*')" + flags.Lookup("skill-id").Usage = "Search for records with specific skill ID (e.g., --skill-id '10201')" + flags.Lookup("skill").Usage = "Search for records with specific skill name (e.g., --skill 'natural_language_processing' --skill 'audio')" + flags.Lookup("locator").Usage = "Search for records with specific locator type (e.g., --locator 'docker-image')" + flags.Lookup("module").Usage = "Search for records with specific module (e.g., --module 'core/llm/model')" + flags.Lookup("domain-id").Usage = "Search for records with specific domain ID (e.g., --domain-id '604')" + flags.Lookup("domain").Usage = "Search for records with specific domain name (e.g., --domain '*education*' --domain 'healthcare/*')" + flags.Lookup("created-at").Usage = "Search for records with specific created_at timestamp (e.g., --created-at '2024-*')" + flags.Lookup("author").Usage = "Search for records with specific author (e.g., --author 'john*')" + flags.Lookup("schema-version").Usage = "Search for records with specific schema version (e.g., --schema-version '0.8.*')" + flags.Lookup("module-id").Usage = "Search for records with specific module ID (e.g., --module-id '201')" +} + +// buildQueriesFromFlags builds API queries. +func buildQueriesFromFlags() []*searchv1.RecordQuery { + queries := make([]*searchv1.RecordQuery, 0, + len(opts.Names)+len(opts.Versions)+len(opts.SkillIDs)+ + len(opts.SkillNames)+len(opts.Locators)+len(opts.Modules)+ + len(opts.DomainIDs)+len(opts.DomainNames)+ + len(opts.CreatedAts)+len(opts.Authors)+ + len(opts.SchemaVersions)+len(opts.ModuleIDs)) + + // Add name queries + for _, name := range opts.Names { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_NAME, + Value: name, + }) + } + + // Add version queries + for _, version := range opts.Versions { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_VERSION, + Value: version, + }) + } + + // Add skill-id queries + for _, skillID := range opts.SkillIDs { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL_ID, + Value: skillID, + }) + } + + // Add skill-name queries + for _, skillName := range opts.SkillNames { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL_NAME, + Value: skillName, + }) + } + + // Add locator queries + for _, locator := range opts.Locators { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, + Value: locator, + }) + } + + // Add module queries + for _, module := range opts.Modules { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE_NAME, + Value: module, + }) + } + + // Add domain-id queries + for _, domainID := range opts.DomainIDs { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_ID, + Value: domainID, + }) + } + + // Add domain-name queries + for _, domainName := range opts.DomainNames { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_NAME, + Value: domainName, + }) + } + + // Add created-at queries + for _, createdAt := range opts.CreatedAts { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_CREATED_AT, + Value: createdAt, + }) + } + + // Add author queries + for _, author := range opts.Authors { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_AUTHOR, + Value: author, + }) + } + + // Add schema-version queries + for _, schemaVersion := range opts.SchemaVersions { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_SCHEMA_VERSION, + Value: schemaVersion, + }) + } + + // Add module-id queries + for _, moduleID := range opts.ModuleIDs { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE_ID, + Value: moduleID, + }) + } + + return queries +} diff --git a/cli/cmd/search/search.go b/cli/cmd/search/search.go index fe18a254c..c8ec42928 100644 --- a/cli/cmd/search/search.go +++ b/cli/cmd/search/search.go @@ -1,137 +1,137 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:wrapcheck -package search - -import ( - "errors" - "fmt" - - searchv1 "github.com/agntcy/dir/api/search/v1" - "github.com/agntcy/dir/cli/presenter" - ctxUtils "github.com/agntcy/dir/cli/util/context" - "github.com/agntcy/dir/client" - "github.com/spf13/cobra" -) - -var Command = &cobra.Command{ - Use: "search", - Short: "Search for records in the directory", - Long: `Search for records in the directory using various filters and options. - -The --format flag controls what is returned: -- cid: Return only record CIDs (default, efficient for piping) -- record: Return full record data - -Examples: - -1. Search for CIDs only (default, efficient for piping): - dirctl search --name "web*" | xargs -I {} dirctl pull {} - -2. Search and get full records: - dirctl search --name "web*" --format record --output json - -3. Wildcard search examples: - dirctl search --name "web*" - dirctl search --version "v1.*" - dirctl search --skill "python*" --skill "*script" - dirctl search --domain "*education*" - -4. Comparison operators (for version, created-at, schema-version): - dirctl search --version ">=1.0.0" --version "<2.0.0" - dirctl search --created-at ">=2024-01-01" - -Supported wildcards: - * - matches zero or more characters - ? - matches exactly one character - [] - matches any character within brackets (e.g., [0-9], [a-z]) -`, - RunE: func(cmd *cobra.Command, _ []string) error { - return runSearchCommand(cmd) - }, -} - -func init() { - registerFlags(Command) - presenter.AddOutputFlags(Command) -} - -func runSearchCommand(cmd *cobra.Command) error { - c, ok := ctxUtils.GetClientFromContext(cmd.Context()) - if !ok { - return errors.New("failed to get client from context") - } - - // Build queries from direct field flags - queries := buildQueriesFromFlags() - - switch opts.Format { - case "cid": - return searchCIDs(cmd, c, queries) - case "record": - return searchRecords(cmd, c, queries) - default: - return fmt.Errorf("invalid format: %s (valid values: cid, record)", opts.Format) - } -} - -func searchCIDs(cmd *cobra.Command, c *client.Client, queries []*searchv1.RecordQuery) error { - result, err := c.SearchCIDs(cmd.Context(), &searchv1.SearchCIDsRequest{ - Limit: &opts.Limit, - Offset: &opts.Offset, - Queries: queries, - }) - if err != nil { - return fmt.Errorf("failed to search CIDs: %w", err) - } - - // Collect results and convert to interface{} slice - results := make([]interface{}, 0, opts.Limit) - - for { - select { - case resp := <-result.ResCh(): - cid := resp.GetRecordCid() - if cid != "" { - results = append(results, cid) - } - case err := <-result.ErrCh(): - return fmt.Errorf("error receiving CID: %w", err) - case <-result.DoneCh(): - return presenter.PrintMessage(cmd, "record CIDs", "Record CIDs found", results) - case <-cmd.Context().Done(): - return cmd.Context().Err() - } - } -} - -func searchRecords(cmd *cobra.Command, c *client.Client, queries []*searchv1.RecordQuery) error { - result, err := c.SearchRecords(cmd.Context(), &searchv1.SearchRecordsRequest{ - Limit: &opts.Limit, - Offset: &opts.Offset, - Queries: queries, - }) - if err != nil { - return fmt.Errorf("failed to search records: %w", err) - } - - // Collect records - results := make([]interface{}, 0, opts.Limit) - - for { - select { - case resp := <-result.ResCh(): - record := resp.GetRecord() - if record != nil { - results = append(results, record) - } - case err := <-result.ErrCh(): - return fmt.Errorf("error receiving record: %w", err) - case <-result.DoneCh(): - return presenter.PrintMessage(cmd, "records", "Records found", results) - case <-cmd.Context().Done(): - return cmd.Context().Err() - } - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:wrapcheck +package search + +import ( + "errors" + "fmt" + + searchv1 "github.com/agntcy/dir/api/search/v1" + "github.com/agntcy/dir/cli/presenter" + ctxUtils "github.com/agntcy/dir/cli/util/context" + "github.com/agntcy/dir/client" + "github.com/spf13/cobra" +) + +var Command = &cobra.Command{ + Use: "search", + Short: "Search for records in the directory", + Long: `Search for records in the directory using various filters and options. + +The --format flag controls what is returned: +- cid: Return only record CIDs (default, efficient for piping) +- record: Return full record data + +Examples: + +1. Search for CIDs only (default, efficient for piping): + dirctl search --name "web*" | xargs -I {} dirctl pull {} + +2. Search and get full records: + dirctl search --name "web*" --format record --output json + +3. Wildcard search examples: + dirctl search --name "web*" + dirctl search --version "v1.*" + dirctl search --skill "python*" --skill "*script" + dirctl search --domain "*education*" + +4. Comparison operators (for version, created-at, schema-version): + dirctl search --version ">=1.0.0" --version "<2.0.0" + dirctl search --created-at ">=2024-01-01" + +Supported wildcards: + * - matches zero or more characters + ? - matches exactly one character + [] - matches any character within brackets (e.g., [0-9], [a-z]) +`, + RunE: func(cmd *cobra.Command, _ []string) error { + return runSearchCommand(cmd) + }, +} + +func init() { + registerFlags(Command) + presenter.AddOutputFlags(Command) +} + +func runSearchCommand(cmd *cobra.Command) error { + c, ok := ctxUtils.GetClientFromContext(cmd.Context()) + if !ok { + return errors.New("failed to get client from context") + } + + // Build queries from direct field flags + queries := buildQueriesFromFlags() + + switch opts.Format { + case "cid": + return searchCIDs(cmd, c, queries) + case "record": + return searchRecords(cmd, c, queries) + default: + return fmt.Errorf("invalid format: %s (valid values: cid, record)", opts.Format) + } +} + +func searchCIDs(cmd *cobra.Command, c *client.Client, queries []*searchv1.RecordQuery) error { + result, err := c.SearchCIDs(cmd.Context(), &searchv1.SearchCIDsRequest{ + Limit: &opts.Limit, + Offset: &opts.Offset, + Queries: queries, + }) + if err != nil { + return fmt.Errorf("failed to search CIDs: %w", err) + } + + // Collect results and convert to interface{} slice + results := make([]interface{}, 0, opts.Limit) + + for { + select { + case resp := <-result.ResCh(): + cid := resp.GetRecordCid() + if cid != "" { + results = append(results, cid) + } + case err := <-result.ErrCh(): + return fmt.Errorf("error receiving CID: %w", err) + case <-result.DoneCh(): + return presenter.PrintMessage(cmd, "record CIDs", "Record CIDs found", results) + case <-cmd.Context().Done(): + return cmd.Context().Err() + } + } +} + +func searchRecords(cmd *cobra.Command, c *client.Client, queries []*searchv1.RecordQuery) error { + result, err := c.SearchRecords(cmd.Context(), &searchv1.SearchRecordsRequest{ + Limit: &opts.Limit, + Offset: &opts.Offset, + Queries: queries, + }) + if err != nil { + return fmt.Errorf("failed to search records: %w", err) + } + + // Collect records + results := make([]interface{}, 0, opts.Limit) + + for { + select { + case resp := <-result.ResCh(): + record := resp.GetRecord() + if record != nil { + results = append(results, record) + } + case err := <-result.ErrCh(): + return fmt.Errorf("error receiving record: %w", err) + case <-result.DoneCh(): + return presenter.PrintMessage(cmd, "records", "Records found", results) + case <-cmd.Context().Done(): + return cmd.Context().Err() + } + } +} diff --git a/cli/cmd/sign/options.go b/cli/cmd/sign/options.go index 59888f197..e7c8b4c4e 100644 --- a/cli/cmd/sign/options.go +++ b/cli/cmd/sign/options.go @@ -1,44 +1,44 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package sign - -import ( - "github.com/agntcy/dir/cli/presenter" - "github.com/agntcy/dir/client" - "github.com/agntcy/dir/utils/cosign" - "github.com/spf13/pflag" -) - -var opts = &options{} - -type options struct { - // Signing options - client.SignOpts -} - -func init() { - flags := Command.Flags() - - AddSigningFlags(flags) - - // Add output format flags - presenter.AddOutputFlags(Command) -} - -func AddSigningFlags(flags *pflag.FlagSet) { - flags.StringVar(&opts.FulcioURL, "fulcio-url", cosign.DefaultFulcioURL, - "Sigstore Fulcio URL") - flags.StringVar(&opts.RekorURL, "rekor-url", cosign.DefaultRekorURL, - "Sigstore Rekor URL") - flags.StringVar(&opts.TimestampURL, "timestamp-url", cosign.DefaultTimestampURL, - "Sigstore Timestamp URL") - flags.StringVar(&opts.OIDCProviderURL, "oidc-provider-url", cosign.DefaultOIDCProviderURL, - "OIDC Provider URL") - flags.StringVar(&opts.OIDCClientID, "oidc-client-id", cosign.DefaultOIDCClientID, - "OIDC Client ID") - flags.StringVar(&opts.OIDCToken, "oidc-token", "", - "OIDC Token for non-interactive signing. ") - flags.StringVar(&opts.Key, "key", "", - "Path to the private key file to use for signing (e.g., a Cosign key generated with a GitHub token). Use this option to sign with a self-managed keypair instead of OIDC identity-based signing.") -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package sign + +import ( + "github.com/agntcy/dir/cli/presenter" + "github.com/agntcy/dir/client" + "github.com/agntcy/dir/utils/cosign" + "github.com/spf13/pflag" +) + +var opts = &options{} + +type options struct { + // Signing options + client.SignOpts +} + +func init() { + flags := Command.Flags() + + AddSigningFlags(flags) + + // Add output format flags + presenter.AddOutputFlags(Command) +} + +func AddSigningFlags(flags *pflag.FlagSet) { + flags.StringVar(&opts.FulcioURL, "fulcio-url", cosign.DefaultFulcioURL, + "Sigstore Fulcio URL") + flags.StringVar(&opts.RekorURL, "rekor-url", cosign.DefaultRekorURL, + "Sigstore Rekor URL") + flags.StringVar(&opts.TimestampURL, "timestamp-url", cosign.DefaultTimestampURL, + "Sigstore Timestamp URL") + flags.StringVar(&opts.OIDCProviderURL, "oidc-provider-url", cosign.DefaultOIDCProviderURL, + "OIDC Provider URL") + flags.StringVar(&opts.OIDCClientID, "oidc-client-id", cosign.DefaultOIDCClientID, + "OIDC Client ID") + flags.StringVar(&opts.OIDCToken, "oidc-token", "", + "OIDC Token for non-interactive signing. ") + flags.StringVar(&opts.Key, "key", "", + "Path to the private key file to use for signing (e.g., a Cosign key generated with a GitHub token). Use this option to sign with a self-managed keypair instead of OIDC identity-based signing.") +} diff --git a/cli/cmd/sign/sign.go b/cli/cmd/sign/sign.go index 9b9e605cb..c3ad5b29c 100644 --- a/cli/cmd/sign/sign.go +++ b/cli/cmd/sign/sign.go @@ -1,172 +1,172 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:wrapcheck -package sign - -import ( - "context" - "errors" - "fmt" - "os" - "path/filepath" - - corev1 "github.com/agntcy/dir/api/core/v1" - signv1 "github.com/agntcy/dir/api/sign/v1" - "github.com/agntcy/dir/cli/presenter" - ctxUtils "github.com/agntcy/dir/cli/util/context" - "github.com/agntcy/dir/client" - "github.com/agntcy/dir/utils/cosign" - "github.com/sigstore/sigstore/pkg/oauthflow" - "github.com/spf13/cobra" -) - -var Command = &cobra.Command{ - Use: "sign", - Short: "Sign record using identity-based OIDC or key-based signing", - Long: `This command signs the record using identity-based signing. -It uses a short-lived signing certificate issued by Sigstore Fulcio -along with a local ephemeral signing key and OIDC identity. - -Verification data is attached to the signed record, -and the transparency log is pushed to Sigstore Rekor. - -This command opens a browser window to authenticate the user -with the default OIDC provider. - -Usage examples: - -1. Sign a record using OIDC: - - dirctl sign - -2. Sign a record using key: - - dirctl sign --key - -3. Output formats: - - # Get signing result as JSON - dirctl sign --output json - - # Sign with key and JSON output - dirctl sign --key --output json -`, - RunE: func(cmd *cobra.Command, args []string) error { - var recordCID string - if len(args) > 1 { - return errors.New("only one record CID is allowed") - } else if len(args) == 1 { - recordCID = args[0] - } else { - return errors.New("record CID is required") - } - - return runCommand(cmd, recordCID) - }, -} - -func runCommand(cmd *cobra.Command, recordCID string) error { - // Get the client from the context - c, ok := ctxUtils.GetClientFromContext(cmd.Context()) - if !ok { - return errors.New("failed to get client from context") - } - - err := Sign(cmd.Context(), c, recordCID) - if err != nil { - return fmt.Errorf("failed to sign record: %w", err) - } - - // Output in the appropriate format - return presenter.PrintMessage(cmd, "signature", "Record is", "signed") -} - -func Sign(ctx context.Context, c *client.Client, recordCID string) error { - switch { - case opts.Key != "": - // Load the key from file - rawKey, err := os.ReadFile(filepath.Clean(opts.Key)) - if err != nil { - return fmt.Errorf("failed to read key file: %w", err) - } - - // Read password from environment variable - pw, err := cosign.ReadPrivateKeyPassword()() - if err != nil { - return fmt.Errorf("failed to read password: %w", err) - } - - req := &signv1.SignRequest{ - RecordRef: &corev1.RecordRef{Cid: recordCID}, - Provider: &signv1.SignRequestProvider{ - Request: &signv1.SignRequestProvider_Key{ - Key: &signv1.SignWithKey{ - PrivateKey: rawKey, - Password: pw, - }, - }, - }, - } - - // Sign the record using the provided key - _, err = c.SignWithKey(ctx, req) - if err != nil { - return fmt.Errorf("failed to sign record with key: %w", err) - } - case opts.OIDCToken != "": - req := &signv1.SignRequest{ - RecordRef: &corev1.RecordRef{Cid: recordCID}, - Provider: &signv1.SignRequestProvider{ - Request: &signv1.SignRequestProvider_Oidc{ - Oidc: &signv1.SignWithOIDC{ - IdToken: opts.OIDCToken, - Options: &signv1.SignWithOIDC_SignOpts{ - FulcioUrl: &opts.FulcioURL, - RekorUrl: &opts.RekorURL, - TimestampUrl: &opts.TimestampURL, - OidcProviderUrl: &opts.OIDCProviderURL, - }, - }, - }, - }, - } - - // Sign the record using the OIDC provider - _, err := c.SignWithOIDC(ctx, req) - if err != nil { - return fmt.Errorf("failed to sign record: %w", err) - } - default: - // Retrieve the token from the OIDC provider - token, err := oauthflow.OIDConnect(opts.OIDCProviderURL, opts.OIDCClientID, "", "", oauthflow.DefaultIDTokenGetter) - if err != nil { - return fmt.Errorf("failed to get OIDC token: %w", err) - } - - req := &signv1.SignRequest{ - RecordRef: &corev1.RecordRef{Cid: recordCID}, - Provider: &signv1.SignRequestProvider{ - Request: &signv1.SignRequestProvider_Oidc{ - Oidc: &signv1.SignWithOIDC{ - IdToken: token.RawString, - Options: &signv1.SignWithOIDC_SignOpts{ - FulcioUrl: &opts.FulcioURL, - RekorUrl: &opts.RekorURL, - TimestampUrl: &opts.TimestampURL, - OidcProviderUrl: &opts.OIDCProviderURL, - }, - }, - }, - }, - } - - // Sign the record using the OIDC provider - _, err = c.SignWithOIDC(ctx, req) - if err != nil { - return fmt.Errorf("failed to sign record: %w", err) - } - } - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:wrapcheck +package sign + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + + corev1 "github.com/agntcy/dir/api/core/v1" + signv1 "github.com/agntcy/dir/api/sign/v1" + "github.com/agntcy/dir/cli/presenter" + ctxUtils "github.com/agntcy/dir/cli/util/context" + "github.com/agntcy/dir/client" + "github.com/agntcy/dir/utils/cosign" + "github.com/sigstore/sigstore/pkg/oauthflow" + "github.com/spf13/cobra" +) + +var Command = &cobra.Command{ + Use: "sign", + Short: "Sign record using identity-based OIDC or key-based signing", + Long: `This command signs the record using identity-based signing. +It uses a short-lived signing certificate issued by Sigstore Fulcio +along with a local ephemeral signing key and OIDC identity. + +Verification data is attached to the signed record, +and the transparency log is pushed to Sigstore Rekor. + +This command opens a browser window to authenticate the user +with the default OIDC provider. + +Usage examples: + +1. Sign a record using OIDC: + + dirctl sign + +2. Sign a record using key: + + dirctl sign --key + +3. Output formats: + + # Get signing result as JSON + dirctl sign --output json + + # Sign with key and JSON output + dirctl sign --key --output json +`, + RunE: func(cmd *cobra.Command, args []string) error { + var recordCID string + if len(args) > 1 { + return errors.New("only one record CID is allowed") + } else if len(args) == 1 { + recordCID = args[0] + } else { + return errors.New("record CID is required") + } + + return runCommand(cmd, recordCID) + }, +} + +func runCommand(cmd *cobra.Command, recordCID string) error { + // Get the client from the context + c, ok := ctxUtils.GetClientFromContext(cmd.Context()) + if !ok { + return errors.New("failed to get client from context") + } + + err := Sign(cmd.Context(), c, recordCID) + if err != nil { + return fmt.Errorf("failed to sign record: %w", err) + } + + // Output in the appropriate format + return presenter.PrintMessage(cmd, "signature", "Record is", "signed") +} + +func Sign(ctx context.Context, c *client.Client, recordCID string) error { + switch { + case opts.Key != "": + // Load the key from file + rawKey, err := os.ReadFile(filepath.Clean(opts.Key)) + if err != nil { + return fmt.Errorf("failed to read key file: %w", err) + } + + // Read password from environment variable + pw, err := cosign.ReadPrivateKeyPassword()() + if err != nil { + return fmt.Errorf("failed to read password: %w", err) + } + + req := &signv1.SignRequest{ + RecordRef: &corev1.RecordRef{Cid: recordCID}, + Provider: &signv1.SignRequestProvider{ + Request: &signv1.SignRequestProvider_Key{ + Key: &signv1.SignWithKey{ + PrivateKey: rawKey, + Password: pw, + }, + }, + }, + } + + // Sign the record using the provided key + _, err = c.SignWithKey(ctx, req) + if err != nil { + return fmt.Errorf("failed to sign record with key: %w", err) + } + case opts.OIDCToken != "": + req := &signv1.SignRequest{ + RecordRef: &corev1.RecordRef{Cid: recordCID}, + Provider: &signv1.SignRequestProvider{ + Request: &signv1.SignRequestProvider_Oidc{ + Oidc: &signv1.SignWithOIDC{ + IdToken: opts.OIDCToken, + Options: &signv1.SignWithOIDC_SignOpts{ + FulcioUrl: &opts.FulcioURL, + RekorUrl: &opts.RekorURL, + TimestampUrl: &opts.TimestampURL, + OidcProviderUrl: &opts.OIDCProviderURL, + }, + }, + }, + }, + } + + // Sign the record using the OIDC provider + _, err := c.SignWithOIDC(ctx, req) + if err != nil { + return fmt.Errorf("failed to sign record: %w", err) + } + default: + // Retrieve the token from the OIDC provider + token, err := oauthflow.OIDConnect(opts.OIDCProviderURL, opts.OIDCClientID, "", "", oauthflow.DefaultIDTokenGetter) + if err != nil { + return fmt.Errorf("failed to get OIDC token: %w", err) + } + + req := &signv1.SignRequest{ + RecordRef: &corev1.RecordRef{Cid: recordCID}, + Provider: &signv1.SignRequestProvider{ + Request: &signv1.SignRequestProvider_Oidc{ + Oidc: &signv1.SignWithOIDC{ + IdToken: token.RawString, + Options: &signv1.SignWithOIDC_SignOpts{ + FulcioUrl: &opts.FulcioURL, + RekorUrl: &opts.RekorURL, + TimestampUrl: &opts.TimestampURL, + OidcProviderUrl: &opts.OIDCProviderURL, + }, + }, + }, + }, + } + + // Sign the record using the OIDC provider + _, err = c.SignWithOIDC(ctx, req) + if err != nil { + return fmt.Errorf("failed to sign record: %w", err) + } + } + + return nil +} diff --git a/cli/cmd/sync/options.go b/cli/cmd/sync/options.go index 60044ff91..ad3cb9370 100644 --- a/cli/cmd/sync/options.go +++ b/cli/cmd/sync/options.go @@ -1,34 +1,34 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package sync - -import "github.com/agntcy/dir/cli/presenter" - -var opts = &options{} - -type options struct { - Limit uint32 - Offset uint32 - CIDs []string - Stdin bool -} - -//nolint:mnd -func init() { - // Add flags for list command - listFlags := listCmd.Flags() - listFlags.Uint32Var(&opts.Limit, "limit", 100, "Maximum number of sync operations to return (default: 100)") - listFlags.Uint32Var(&opts.Offset, "offset", 0, "Number of sync operations to skip (for pagination)") - - // Add flags for create command - createFlags := createCmd.Flags() - createFlags.StringSliceVar(&opts.CIDs, "cids", []string{}, "List of CIDs to synchronize from the remote Directory. If empty, all objects will be synchronized.") - createFlags.BoolVar(&opts.Stdin, "stdin", false, "Parse routing search output from stdin to create sync operations for each provider") - - // Add output format flags to all sync subcommands - presenter.AddOutputFlags(createCmd) - presenter.AddOutputFlags(listCmd) - presenter.AddOutputFlags(statusCmd) - presenter.AddOutputFlags(deleteCmd) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package sync + +import "github.com/agntcy/dir/cli/presenter" + +var opts = &options{} + +type options struct { + Limit uint32 + Offset uint32 + CIDs []string + Stdin bool +} + +//nolint:mnd +func init() { + // Add flags for list command + listFlags := listCmd.Flags() + listFlags.Uint32Var(&opts.Limit, "limit", 100, "Maximum number of sync operations to return (default: 100)") + listFlags.Uint32Var(&opts.Offset, "offset", 0, "Number of sync operations to skip (for pagination)") + + // Add flags for create command + createFlags := createCmd.Flags() + createFlags.StringSliceVar(&opts.CIDs, "cids", []string{}, "List of CIDs to synchronize from the remote Directory. If empty, all objects will be synchronized.") + createFlags.BoolVar(&opts.Stdin, "stdin", false, "Parse routing search output from stdin to create sync operations for each provider") + + // Add output format flags to all sync subcommands + presenter.AddOutputFlags(createCmd) + presenter.AddOutputFlags(listCmd) + presenter.AddOutputFlags(statusCmd) + presenter.AddOutputFlags(deleteCmd) +} diff --git a/cli/cmd/sync/sync.go b/cli/cmd/sync/sync.go index 93a79ea97..9c5888465 100644 --- a/cli/cmd/sync/sync.go +++ b/cli/cmd/sync/sync.go @@ -1,350 +1,350 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:wrapcheck -package sync - -import ( - "encoding/json" - "errors" - "fmt" - "io" - - routingv1 "github.com/agntcy/dir/api/routing/v1" - storev1 "github.com/agntcy/dir/api/store/v1" - "github.com/agntcy/dir/cli/presenter" - ctxUtils "github.com/agntcy/dir/cli/util/context" - "github.com/spf13/cobra" -) - -var Command = &cobra.Command{ - Use: "sync", - Short: "Manage synchronization operations with remote Directory nodes", - Long: `Sync command allows you to manage synchronization operations between Directory nodes. -It provides subcommands to create, list, monitor, and delete sync operations.`, -} - -// Create sync subcommand. -var createCmd = &cobra.Command{ - Use: "create ", - Short: "Create a new synchronization operation", - Long: `Create initiates a new synchronization operation from a remote Directory node. -The operation is asynchronous and returns a sync ID for tracking progress. - -When --stdin flag is used, the command parses JSON routing search output from stdin -and creates sync operations for each provider found in the search results. - -Usage examples: - -1. Create sync with remote peer: - dir sync create https://directory.example.com - -2. Create sync with specific CIDs: - dir sync create http://localhost:8080 --cids cid1,cid2,cid3 - -3. Create sync from routing search output: - dirctl routing search --skill "AI" --output json | dirctl sync create --stdin`, - Args: func(cmd *cobra.Command, args []string) error { - if opts.Stdin { - return cobra.MaximumNArgs(0)(cmd, args) - } - - return cobra.ExactArgs(1)(cmd, args) - }, - RunE: func(cmd *cobra.Command, args []string) error { - if opts.Stdin { - return runCreateSyncFromStdin(cmd) - } - - return runCreateSync(cmd, args[0], opts.CIDs) - }, -} - -// List syncs subcommand. -var listCmd = &cobra.Command{ - Use: "list", - Short: "List all synchronization operations", - Long: `List displays all sync operations known to the system, including active, -completed, and failed synchronizations. - -Usage examples: - -1. List all syncs: - dirctl sync list - -2. Pagination: - dirctl sync list --limit 10 --offset 20 - -3. Output formats: - # Get syncs as JSON - dirctl sync list --output json - - # Get syncs as JSONL for streaming - dirctl sync list --output jsonl - - # Get raw sync data - dirctl sync list --output raw`, - RunE: func(cmd *cobra.Command, _ []string) error { - return runListSyncs(cmd) - }, -} - -// Get sync status subcommand. -var statusCmd = &cobra.Command{ - Use: "status ", - Short: "Get detailed status of a synchronization operation", - Long: `Status retrieves comprehensive information about a specific sync operation, -including progress, timing, and error details if applicable. - -Usage examples: - -1. Get sync status: - dirctl sync status - -2. Output formats: - # Get sync status as JSON - dirctl sync status --output json - - # Get raw status data for scripting - dirctl sync status --output raw`, - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runGetSyncStatus(cmd, args[0]) - }, -} - -// Delete sync subcommand. -var deleteCmd = &cobra.Command{ - Use: "delete ", - Short: "Delete a synchronization operation", - Long: `Delete removes a sync operation from the system. For active syncs, -this will attempt to cancel the operation gracefully. - -Usage examples: - -1. Delete a sync: - dirctl sync delete - -2. Output formats: - # Delete sync with JSON confirmation - dirctl sync delete --output json - - # Delete sync with raw output for scripting - dirctl sync delete --output raw`, - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runDeleteSync(cmd, args[0]) - }, -} - -func init() { - // Add subcommands - Command.AddCommand(createCmd) - Command.AddCommand(listCmd) - Command.AddCommand(statusCmd) - Command.AddCommand(deleteCmd) -} - -func runCreateSync(cmd *cobra.Command, remoteURL string, cids []string) error { - // Validate remote URL - if remoteURL == "" { - return errors.New("remote URL is required") - } - - client, ok := ctxUtils.GetClientFromContext(cmd.Context()) - if !ok { - return errors.New("failed to get client from context") - } - - syncID, err := client.CreateSync(cmd.Context(), remoteURL, cids) - if err != nil { - return fmt.Errorf("failed to create sync: %w", err) - } - - // Output in the appropriate format - return presenter.PrintMessage(cmd, "sync", "Sync created with ID", syncID) -} - -func runListSyncs(cmd *cobra.Command) error { - client, ok := ctxUtils.GetClientFromContext(cmd.Context()) - if !ok { - return errors.New("failed to get client from context") - } - - itemCh, err := client.ListSyncs(cmd.Context(), &storev1.ListSyncsRequest{ - Limit: &opts.Limit, - Offset: &opts.Offset, - }) - if err != nil { - return fmt.Errorf("failed to list syncs: %w", err) - } - - // Collect results - var results []interface{} - - for { - select { - case sync, ok := <-itemCh: - if !ok { - // Channel closed, all items received - goto done - } - - results = append(results, sync) - case <-cmd.Context().Done(): - return fmt.Errorf("context cancelled while listing syncs: %w", cmd.Context().Err()) - } - } - -done: - - return presenter.PrintMessage(cmd, "syncs", "Sync results", results) -} - -func runGetSyncStatus(cmd *cobra.Command, syncID string) error { - // Validate sync ID - if syncID == "" { - return errors.New("sync ID is required") - } - - client, ok := ctxUtils.GetClientFromContext(cmd.Context()) - if !ok { - return errors.New("failed to get client from context") - } - - sync, err := client.GetSync(cmd.Context(), syncID) - if err != nil { - return fmt.Errorf("failed to get sync status: %w", err) - } - - return presenter.PrintMessage(cmd, "sync", "Sync status", sync.GetStatus()) -} - -func runDeleteSync(cmd *cobra.Command, syncID string) error { - // Validate sync ID - if syncID == "" { - return errors.New("sync ID is required") - } - - client, ok := ctxUtils.GetClientFromContext(cmd.Context()) - if !ok { - return errors.New("failed to get client from context") - } - - err := client.DeleteSync(cmd.Context(), syncID) - if err != nil { - return fmt.Errorf("failed to delete sync: %w", err) - } - - return presenter.PrintMessage(cmd, "sync", "Sync deleted with ID", syncID) -} - -func runCreateSyncFromStdin(cmd *cobra.Command) error { - // Parse the search output from stdin - results, err := parseSearchOutput(cmd.InOrStdin()) - if err != nil { - return fmt.Errorf("failed to parse search output: %w", err) - } - - if len(results) == 0 { - presenter.PrintSmartf(cmd, "No search results found in stdin\n") - - return nil - } - - // Group results by API address (one sync per peer) - peerResults := groupResultsByAPIAddress(results) - - // Create sync operations for each peer - return createSyncOperations(cmd, peerResults) -} - -func parseSearchOutput(input io.Reader) ([]*routingv1.SearchResponse, error) { - // Read JSON input from routing search --output json - inputBytes, err := io.ReadAll(input) - if err != nil { - return nil, fmt.Errorf("error reading input: %w", err) - } - - var searchResponses []*routingv1.SearchResponse - - err = json.Unmarshal(inputBytes, &searchResponses) - if err != nil { - return nil, fmt.Errorf("failed to parse JSON: %w", err) - } - - return searchResponses, nil -} - -// PeerSyncInfo holds sync information for a peer (grouped by API address). -type PeerSyncInfo struct { - APIAddress string - CIDs []string -} - -func groupResultsByAPIAddress(results []*routingv1.SearchResponse) map[string]PeerSyncInfo { - peerResults := make(map[string]PeerSyncInfo) - - for _, result := range results { - // Get the first API address if available - var apiAddress string - if result.GetPeer() != nil && len(result.GetPeer().GetAddrs()) > 0 { - apiAddress = result.GetPeer().GetAddrs()[0] - } - - // Skip results without API address - if apiAddress == "" { - continue - } - - cid := result.GetRecordRef().GetCid() - - if existing, exists := peerResults[apiAddress]; exists { - existing.CIDs = append(existing.CIDs, cid) - peerResults[apiAddress] = existing - } else { - peerResults[apiAddress] = PeerSyncInfo{ - APIAddress: apiAddress, - CIDs: []string{cid}, - } - } - } - - return peerResults -} - -func createSyncOperations(cmd *cobra.Command, peerResults map[string]PeerSyncInfo) error { - client, ok := ctxUtils.GetClientFromContext(cmd.Context()) - if !ok { - return errors.New("failed to get client from context") - } - - totalSyncs := 0 - totalCIDs := 0 - - syncIDs := make([]interface{}, 0, len(peerResults)) - - for apiAddress, syncInfo := range peerResults { - if syncInfo.APIAddress == "" { - presenter.PrintSmartf(cmd, "WARNING: No API address found for peer\n") - presenter.PrintSmartf(cmd, "Skipping sync for this peer\n") - - continue - } - - // Create sync operation - syncID, err := client.CreateSync(cmd.Context(), syncInfo.APIAddress, syncInfo.CIDs) - if err != nil { - presenter.PrintSmartf(cmd, "ERROR: Failed to create sync for peer %s: %v\n", apiAddress, err) - - continue - } - - syncIDs = append(syncIDs, syncID) - - totalSyncs++ - totalCIDs += len(syncInfo.CIDs) - } - - return presenter.PrintMessage(cmd, "sync IDs", "Sync IDs created", syncIDs) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:wrapcheck +package sync + +import ( + "encoding/json" + "errors" + "fmt" + "io" + + routingv1 "github.com/agntcy/dir/api/routing/v1" + storev1 "github.com/agntcy/dir/api/store/v1" + "github.com/agntcy/dir/cli/presenter" + ctxUtils "github.com/agntcy/dir/cli/util/context" + "github.com/spf13/cobra" +) + +var Command = &cobra.Command{ + Use: "sync", + Short: "Manage synchronization operations with remote Directory nodes", + Long: `Sync command allows you to manage synchronization operations between Directory nodes. +It provides subcommands to create, list, monitor, and delete sync operations.`, +} + +// Create sync subcommand. +var createCmd = &cobra.Command{ + Use: "create ", + Short: "Create a new synchronization operation", + Long: `Create initiates a new synchronization operation from a remote Directory node. +The operation is asynchronous and returns a sync ID for tracking progress. + +When --stdin flag is used, the command parses JSON routing search output from stdin +and creates sync operations for each provider found in the search results. + +Usage examples: + +1. Create sync with remote peer: + dir sync create https://directory.example.com + +2. Create sync with specific CIDs: + dir sync create http://localhost:8080 --cids cid1,cid2,cid3 + +3. Create sync from routing search output: + dirctl routing search --skill "AI" --output json | dirctl sync create --stdin`, + Args: func(cmd *cobra.Command, args []string) error { + if opts.Stdin { + return cobra.MaximumNArgs(0)(cmd, args) + } + + return cobra.ExactArgs(1)(cmd, args) + }, + RunE: func(cmd *cobra.Command, args []string) error { + if opts.Stdin { + return runCreateSyncFromStdin(cmd) + } + + return runCreateSync(cmd, args[0], opts.CIDs) + }, +} + +// List syncs subcommand. +var listCmd = &cobra.Command{ + Use: "list", + Short: "List all synchronization operations", + Long: `List displays all sync operations known to the system, including active, +completed, and failed synchronizations. + +Usage examples: + +1. List all syncs: + dirctl sync list + +2. Pagination: + dirctl sync list --limit 10 --offset 20 + +3. Output formats: + # Get syncs as JSON + dirctl sync list --output json + + # Get syncs as JSONL for streaming + dirctl sync list --output jsonl + + # Get raw sync data + dirctl sync list --output raw`, + RunE: func(cmd *cobra.Command, _ []string) error { + return runListSyncs(cmd) + }, +} + +// Get sync status subcommand. +var statusCmd = &cobra.Command{ + Use: "status ", + Short: "Get detailed status of a synchronization operation", + Long: `Status retrieves comprehensive information about a specific sync operation, +including progress, timing, and error details if applicable. + +Usage examples: + +1. Get sync status: + dirctl sync status + +2. Output formats: + # Get sync status as JSON + dirctl sync status --output json + + # Get raw status data for scripting + dirctl sync status --output raw`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runGetSyncStatus(cmd, args[0]) + }, +} + +// Delete sync subcommand. +var deleteCmd = &cobra.Command{ + Use: "delete ", + Short: "Delete a synchronization operation", + Long: `Delete removes a sync operation from the system. For active syncs, +this will attempt to cancel the operation gracefully. + +Usage examples: + +1. Delete a sync: + dirctl sync delete + +2. Output formats: + # Delete sync with JSON confirmation + dirctl sync delete --output json + + # Delete sync with raw output for scripting + dirctl sync delete --output raw`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runDeleteSync(cmd, args[0]) + }, +} + +func init() { + // Add subcommands + Command.AddCommand(createCmd) + Command.AddCommand(listCmd) + Command.AddCommand(statusCmd) + Command.AddCommand(deleteCmd) +} + +func runCreateSync(cmd *cobra.Command, remoteURL string, cids []string) error { + // Validate remote URL + if remoteURL == "" { + return errors.New("remote URL is required") + } + + client, ok := ctxUtils.GetClientFromContext(cmd.Context()) + if !ok { + return errors.New("failed to get client from context") + } + + syncID, err := client.CreateSync(cmd.Context(), remoteURL, cids) + if err != nil { + return fmt.Errorf("failed to create sync: %w", err) + } + + // Output in the appropriate format + return presenter.PrintMessage(cmd, "sync", "Sync created with ID", syncID) +} + +func runListSyncs(cmd *cobra.Command) error { + client, ok := ctxUtils.GetClientFromContext(cmd.Context()) + if !ok { + return errors.New("failed to get client from context") + } + + itemCh, err := client.ListSyncs(cmd.Context(), &storev1.ListSyncsRequest{ + Limit: &opts.Limit, + Offset: &opts.Offset, + }) + if err != nil { + return fmt.Errorf("failed to list syncs: %w", err) + } + + // Collect results + var results []interface{} + + for { + select { + case sync, ok := <-itemCh: + if !ok { + // Channel closed, all items received + goto done + } + + results = append(results, sync) + case <-cmd.Context().Done(): + return fmt.Errorf("context cancelled while listing syncs: %w", cmd.Context().Err()) + } + } + +done: + + return presenter.PrintMessage(cmd, "syncs", "Sync results", results) +} + +func runGetSyncStatus(cmd *cobra.Command, syncID string) error { + // Validate sync ID + if syncID == "" { + return errors.New("sync ID is required") + } + + client, ok := ctxUtils.GetClientFromContext(cmd.Context()) + if !ok { + return errors.New("failed to get client from context") + } + + sync, err := client.GetSync(cmd.Context(), syncID) + if err != nil { + return fmt.Errorf("failed to get sync status: %w", err) + } + + return presenter.PrintMessage(cmd, "sync", "Sync status", sync.GetStatus()) +} + +func runDeleteSync(cmd *cobra.Command, syncID string) error { + // Validate sync ID + if syncID == "" { + return errors.New("sync ID is required") + } + + client, ok := ctxUtils.GetClientFromContext(cmd.Context()) + if !ok { + return errors.New("failed to get client from context") + } + + err := client.DeleteSync(cmd.Context(), syncID) + if err != nil { + return fmt.Errorf("failed to delete sync: %w", err) + } + + return presenter.PrintMessage(cmd, "sync", "Sync deleted with ID", syncID) +} + +func runCreateSyncFromStdin(cmd *cobra.Command) error { + // Parse the search output from stdin + results, err := parseSearchOutput(cmd.InOrStdin()) + if err != nil { + return fmt.Errorf("failed to parse search output: %w", err) + } + + if len(results) == 0 { + presenter.PrintSmartf(cmd, "No search results found in stdin\n") + + return nil + } + + // Group results by API address (one sync per peer) + peerResults := groupResultsByAPIAddress(results) + + // Create sync operations for each peer + return createSyncOperations(cmd, peerResults) +} + +func parseSearchOutput(input io.Reader) ([]*routingv1.SearchResponse, error) { + // Read JSON input from routing search --output json + inputBytes, err := io.ReadAll(input) + if err != nil { + return nil, fmt.Errorf("error reading input: %w", err) + } + + var searchResponses []*routingv1.SearchResponse + + err = json.Unmarshal(inputBytes, &searchResponses) + if err != nil { + return nil, fmt.Errorf("failed to parse JSON: %w", err) + } + + return searchResponses, nil +} + +// PeerSyncInfo holds sync information for a peer (grouped by API address). +type PeerSyncInfo struct { + APIAddress string + CIDs []string +} + +func groupResultsByAPIAddress(results []*routingv1.SearchResponse) map[string]PeerSyncInfo { + peerResults := make(map[string]PeerSyncInfo) + + for _, result := range results { + // Get the first API address if available + var apiAddress string + if result.GetPeer() != nil && len(result.GetPeer().GetAddrs()) > 0 { + apiAddress = result.GetPeer().GetAddrs()[0] + } + + // Skip results without API address + if apiAddress == "" { + continue + } + + cid := result.GetRecordRef().GetCid() + + if existing, exists := peerResults[apiAddress]; exists { + existing.CIDs = append(existing.CIDs, cid) + peerResults[apiAddress] = existing + } else { + peerResults[apiAddress] = PeerSyncInfo{ + APIAddress: apiAddress, + CIDs: []string{cid}, + } + } + } + + return peerResults +} + +func createSyncOperations(cmd *cobra.Command, peerResults map[string]PeerSyncInfo) error { + client, ok := ctxUtils.GetClientFromContext(cmd.Context()) + if !ok { + return errors.New("failed to get client from context") + } + + totalSyncs := 0 + totalCIDs := 0 + + syncIDs := make([]interface{}, 0, len(peerResults)) + + for apiAddress, syncInfo := range peerResults { + if syncInfo.APIAddress == "" { + presenter.PrintSmartf(cmd, "WARNING: No API address found for peer\n") + presenter.PrintSmartf(cmd, "Skipping sync for this peer\n") + + continue + } + + // Create sync operation + syncID, err := client.CreateSync(cmd.Context(), syncInfo.APIAddress, syncInfo.CIDs) + if err != nil { + presenter.PrintSmartf(cmd, "ERROR: Failed to create sync for peer %s: %v\n", apiAddress, err) + + continue + } + + syncIDs = append(syncIDs, syncID) + + totalSyncs++ + totalCIDs += len(syncInfo.CIDs) + } + + return presenter.PrintMessage(cmd, "sync IDs", "Sync IDs created", syncIDs) +} diff --git a/cli/cmd/sync/sync_test.go b/cli/cmd/sync/sync_test.go index a2ca3a7eb..b8a0e30d5 100644 --- a/cli/cmd/sync/sync_test.go +++ b/cli/cmd/sync/sync_test.go @@ -1,622 +1,622 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package sync - -import ( - "bytes" - "strings" - "testing" - - corev1 "github.com/agntcy/dir/api/core/v1" - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/spf13/cobra" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestParseSearchOutput_EmptyInput tests parsing empty input. -func TestParseSearchOutput_EmptyInput(t *testing.T) { - tests := []struct { - name string - input string - expectError bool - }{ - { - name: "empty string", - input: "", - expectError: true, - }, - { - name: "empty array", - input: "[]", - expectError: false, - }, - { - name: "null", - input: "null", - expectError: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - reader := strings.NewReader(tt.input) - result, err := parseSearchOutput(reader) - - if tt.expectError { - require.Error(t, err) - } else { - require.NoError(t, err) - - if tt.input == "[]" { - assert.Empty(t, result) - } - } - }) - } -} - -// TestParseSearchOutput_ValidJSON tests parsing valid JSON input. -func TestParseSearchOutput_ValidJSON(t *testing.T) { - input := `[ - { - "record_ref": {"cid": "cid1"}, - "peer": { - "addrs": ["http://peer1.example.com"] - } - }, - { - "record_ref": {"cid": "cid2"}, - "peer": { - "addrs": ["http://peer2.example.com"] - } - } - ]` - - reader := strings.NewReader(input) - result, err := parseSearchOutput(reader) - - require.NoError(t, err) - assert.Len(t, result, 2) - assert.Equal(t, "cid1", result[0].GetRecordRef().GetCid()) - assert.Equal(t, "cid2", result[1].GetRecordRef().GetCid()) -} - -// TestParseSearchOutput_InvalidJSON tests error handling for invalid JSON. -func TestParseSearchOutput_InvalidJSON(t *testing.T) { - tests := []struct { - name string - input string - }{ - { - name: "malformed JSON", - input: `{"invalid": "json"`, - }, - { - name: "not an array", - input: `{"key": "value"}`, - }, - { - name: "random text", - input: "not json at all", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - reader := strings.NewReader(tt.input) - result, err := parseSearchOutput(reader) - - require.Error(t, err) - assert.Nil(t, result) - assert.Contains(t, err.Error(), "failed to parse JSON") - }) - } -} - -// TestParseSearchOutput_ComplexJSON tests parsing complex search results. -func TestParseSearchOutput_ComplexJSON(t *testing.T) { - input := `[ - { - "record_ref": {"cid": "bafyabc123"}, - "peer": { - "id": "peer1", - "addrs": ["http://api1.example.com", "http://api2.example.com"] - }, - "queries": ["skill:AI"], - "score": 2 - } - ]` - - reader := strings.NewReader(input) - result, err := parseSearchOutput(reader) - - require.NoError(t, err) - assert.Len(t, result, 1) - assert.Equal(t, "bafyabc123", result[0].GetRecordRef().GetCid()) - assert.Equal(t, "peer1", result[0].GetPeer().GetId()) - assert.Len(t, result[0].GetPeer().GetAddrs(), 2) -} - -// TestGroupResultsByAPIAddress_EmptyInput tests grouping empty results. -func TestGroupResultsByAPIAddress_EmptyInput(t *testing.T) { - result := groupResultsByAPIAddress(nil) - assert.Empty(t, result) - - result = groupResultsByAPIAddress([]*routingv1.SearchResponse{}) - assert.Empty(t, result) -} - -// TestGroupResultsByAPIAddress_SingleResult tests grouping single result. -func TestGroupResultsByAPIAddress_SingleResult(t *testing.T) { - results := []*routingv1.SearchResponse{ - { - RecordRef: &corev1.RecordRef{Cid: "cid1"}, - Peer: &routingv1.Peer{ - Id: "peer1", - Addrs: []string{"http://api1.example.com"}, - }, - }, - } - - grouped := groupResultsByAPIAddress(results) - - assert.Len(t, grouped, 1) - assert.Contains(t, grouped, "http://api1.example.com") - assert.Equal(t, "http://api1.example.com", grouped["http://api1.example.com"].APIAddress) - assert.Equal(t, []string{"cid1"}, grouped["http://api1.example.com"].CIDs) -} - -// TestGroupResultsByAPIAddress_MultipleSamePeer tests grouping multiple records from same peer. -func TestGroupResultsByAPIAddress_MultipleSamePeer(t *testing.T) { - results := []*routingv1.SearchResponse{ - { - RecordRef: &corev1.RecordRef{Cid: "cid1"}, - Peer: &routingv1.Peer{ - Id: "peer1", - Addrs: []string{"http://api1.example.com"}, - }, - }, - { - RecordRef: &corev1.RecordRef{Cid: "cid2"}, - Peer: &routingv1.Peer{ - Id: "peer1", - Addrs: []string{"http://api1.example.com"}, - }, - }, - { - RecordRef: &corev1.RecordRef{Cid: "cid3"}, - Peer: &routingv1.Peer{ - Id: "peer1", - Addrs: []string{"http://api1.example.com"}, - }, - }, - } - - grouped := groupResultsByAPIAddress(results) - - assert.Len(t, grouped, 1) - peerInfo := grouped["http://api1.example.com"] - assert.Equal(t, "http://api1.example.com", peerInfo.APIAddress) - assert.Equal(t, []string{"cid1", "cid2", "cid3"}, peerInfo.CIDs) -} - -// TestGroupResultsByAPIAddress_MultipleDifferentPeers tests grouping records from different peers. -func TestGroupResultsByAPIAddress_MultipleDifferentPeers(t *testing.T) { - results := []*routingv1.SearchResponse{ - { - RecordRef: &corev1.RecordRef{Cid: "cid1"}, - Peer: &routingv1.Peer{ - Id: "peer1", - Addrs: []string{"http://api1.example.com"}, - }, - }, - { - RecordRef: &corev1.RecordRef{Cid: "cid2"}, - Peer: &routingv1.Peer{ - Id: "peer2", - Addrs: []string{"http://api2.example.com"}, - }, - }, - { - RecordRef: &corev1.RecordRef{Cid: "cid3"}, - Peer: &routingv1.Peer{ - Id: "peer1", - Addrs: []string{"http://api1.example.com"}, - }, - }, - } - - grouped := groupResultsByAPIAddress(results) - - assert.Len(t, grouped, 2) - - peer1Info := grouped["http://api1.example.com"] - assert.Equal(t, "http://api1.example.com", peer1Info.APIAddress) - assert.Equal(t, []string{"cid1", "cid3"}, peer1Info.CIDs) - - peer2Info := grouped["http://api2.example.com"] - assert.Equal(t, "http://api2.example.com", peer2Info.APIAddress) - assert.Equal(t, []string{"cid2"}, peer2Info.CIDs) -} - -// TestGroupResultsByAPIAddress_NoPeerInfo tests skipping results without peer info. -func TestGroupResultsByAPIAddress_NoPeerInfo(t *testing.T) { - results := []*routingv1.SearchResponse{ - { - RecordRef: &corev1.RecordRef{Cid: "cid1"}, - Peer: nil, // No peer info - }, - { - RecordRef: &corev1.RecordRef{Cid: "cid2"}, - Peer: &routingv1.Peer{ - Id: "peer1", - Addrs: []string{}, // No addresses - }, - }, - { - RecordRef: &corev1.RecordRef{Cid: "cid3"}, - Peer: &routingv1.Peer{ - Id: "peer2", - Addrs: []string{"http://api1.example.com"}, - }, - }, - } - - grouped := groupResultsByAPIAddress(results) - - // Only the third result should be included - assert.Len(t, grouped, 1) - assert.Contains(t, grouped, "http://api1.example.com") - assert.Equal(t, []string{"cid3"}, grouped["http://api1.example.com"].CIDs) -} - -// TestGroupResultsByAPIAddress_MultipleAddresses tests using first address only. -func TestGroupResultsByAPIAddress_MultipleAddresses(t *testing.T) { - results := []*routingv1.SearchResponse{ - { - RecordRef: &corev1.RecordRef{Cid: "cid1"}, - Peer: &routingv1.Peer{ - Id: "peer1", - Addrs: []string{"http://api1.example.com", "http://api2.example.com", "http://api3.example.com"}, - }, - }, - } - - grouped := groupResultsByAPIAddress(results) - - // Should use the first address - assert.Len(t, grouped, 1) - assert.Contains(t, grouped, "http://api1.example.com") - assert.NotContains(t, grouped, "http://api2.example.com") - assert.NotContains(t, grouped, "http://api3.example.com") -} - -// TestGroupResultsByAPIAddress_MixedScenario tests complex real-world scenario. -func TestGroupResultsByAPIAddress_MixedScenario(t *testing.T) { - results := []*routingv1.SearchResponse{ - // Peer 1 - multiple CIDs - { - RecordRef: &corev1.RecordRef{Cid: "cid1"}, - Peer: &routingv1.Peer{ - Id: "peer1", - Addrs: []string{"http://peer1.com"}, - }, - }, - { - RecordRef: &corev1.RecordRef{Cid: "cid2"}, - Peer: &routingv1.Peer{ - Id: "peer1", - Addrs: []string{"http://peer1.com"}, - }, - }, - // Peer 2 - single CID - { - RecordRef: &corev1.RecordRef{Cid: "cid3"}, - Peer: &routingv1.Peer{ - Id: "peer2", - Addrs: []string{"http://peer2.com"}, - }, - }, - // No peer info - should be skipped - { - RecordRef: &corev1.RecordRef{Cid: "cid4"}, - Peer: nil, - }, - // Peer 3 - single CID - { - RecordRef: &corev1.RecordRef{Cid: "cid5"}, - Peer: &routingv1.Peer{ - Id: "peer3", - Addrs: []string{"http://peer3.com"}, - }, - }, - } - - grouped := groupResultsByAPIAddress(results) - - assert.Len(t, grouped, 3) - assert.Equal(t, []string{"cid1", "cid2"}, grouped["http://peer1.com"].CIDs) - assert.Equal(t, []string{"cid3"}, grouped["http://peer2.com"].CIDs) - assert.Equal(t, []string{"cid5"}, grouped["http://peer3.com"].CIDs) -} - -// TestPeerSyncInfo_Structure tests the PeerSyncInfo structure. -func TestPeerSyncInfo_Structure(t *testing.T) { - info := PeerSyncInfo{ - APIAddress: "http://example.com", - CIDs: []string{"cid1", "cid2", "cid3"}, - } - - assert.Equal(t, "http://example.com", info.APIAddress) - assert.Len(t, info.CIDs, 3) - assert.Contains(t, info.CIDs, "cid1") - assert.Contains(t, info.CIDs, "cid2") - assert.Contains(t, info.CIDs, "cid3") -} - -// TestParseSearchOutput_LargeDataset tests parsing large number of results. -func TestParseSearchOutput_LargeDataset(t *testing.T) { - // Build a large JSON array - var builder strings.Builder - builder.WriteString("[") - - for i := range 100 { - if i > 0 { - builder.WriteString(",") - } - - builder.WriteString(`{ - "recordRef": {"cid": "cid`) - builder.WriteString(strings.Repeat("a", i)) - builder.WriteString(`"}, - "peer": {"addrs": ["http://peer`) - builder.WriteString(strings.Repeat("a", i%10)) - builder.WriteString(`.com"]} - }`) - } - - builder.WriteString("]") - - reader := strings.NewReader(builder.String()) - result, err := parseSearchOutput(reader) - - require.NoError(t, err) - assert.Len(t, result, 100) -} - -// TestGroupResultsByAPIAddress_LargeDataset tests grouping large number of results. -func TestGroupResultsByAPIAddress_LargeDataset(t *testing.T) { - results := make([]*routingv1.SearchResponse, 100) - - // Create 100 results distributed across 10 peers - for i := range 100 { - peerNum := i % 10 - results[i] = &routingv1.SearchResponse{ - RecordRef: &corev1.RecordRef{Cid: "cid" + strings.Repeat("a", i)}, - Peer: &routingv1.Peer{ - Id: "peer" + strings.Repeat("a", peerNum), - Addrs: []string{"http://peer" + strings.Repeat("a", peerNum) + ".com"}, - }, - } - } - - grouped := groupResultsByAPIAddress(results) - - // Should have 10 unique API addresses - assert.Len(t, grouped, 10) - - // Each peer should have 10 CIDs - for _, info := range grouped { - assert.Len(t, info.CIDs, 10) - } -} - -// TestParseSearchOutput_SpecialCharacters tests handling special characters in JSON. -func TestParseSearchOutput_SpecialCharacters(t *testing.T) { - input := `[ - { - "record_ref": {"cid": "cid-with-special-chars-!@#$%^&*()"}, - "peer": { - "id": "peer/with\\slashes\"and'quotes", - "addrs": ["http://api.example.com/path?query=value&foo=bar"] - } - } - ]` - - reader := strings.NewReader(input) - result, err := parseSearchOutput(reader) - - require.NoError(t, err) - assert.Len(t, result, 1) - assert.Contains(t, result[0].GetRecordRef().GetCid(), "special-chars") -} - -// TestCommand_Initialization tests that sync command is properly initialized. -func TestCommand_Initialization(t *testing.T) { - assert.NotNil(t, Command) - assert.Equal(t, "sync", Command.Use) - assert.NotEmpty(t, Command.Short) - assert.NotEmpty(t, Command.Long) -} - -// TestCreateCmd_Initialization tests create subcommand initialization. -func TestCreateCmd_Initialization(t *testing.T) { - assert.NotNil(t, createCmd) - assert.Equal(t, "create ", createCmd.Use) - assert.NotEmpty(t, createCmd.Short) - assert.NotEmpty(t, createCmd.Long) - assert.NotNil(t, createCmd.Args) - assert.NotNil(t, createCmd.RunE) - - // Check that examples are in the Long description - assert.Contains(t, createCmd.Long, "dirctl routing search") - assert.Contains(t, createCmd.Long, "--output json") -} - -// TestListCmd_Initialization tests list subcommand initialization. -func TestListCmd_Initialization(t *testing.T) { - assert.NotNil(t, listCmd) - assert.Equal(t, "list", listCmd.Use) - assert.NotEmpty(t, listCmd.Short) - assert.NotNil(t, listCmd.RunE) -} - -// TestStatusCmd_Initialization tests status subcommand initialization. -func TestStatusCmd_Initialization(t *testing.T) { - assert.NotNil(t, statusCmd) - assert.Equal(t, "status ", statusCmd.Use) - assert.NotEmpty(t, statusCmd.Short) - assert.NotNil(t, statusCmd.Args) - assert.NotNil(t, statusCmd.RunE) -} - -// TestDeleteCmd_Initialization tests delete subcommand initialization. -func TestDeleteCmd_Initialization(t *testing.T) { - assert.NotNil(t, deleteCmd) - assert.Equal(t, "delete ", deleteCmd.Use) - assert.NotEmpty(t, deleteCmd.Short) - assert.NotNil(t, deleteCmd.Args) - assert.NotNil(t, deleteCmd.RunE) -} - -// TestCreateCmd_ArgsValidation tests argument validation for create command. -func TestCreateCmd_ArgsValidation(t *testing.T) { - tests := []struct { - name string - args []string - stdin bool - expectError bool - }{ - { - name: "no args with stdin", - args: []string{}, - stdin: true, - expectError: false, - }, - { - name: "one arg without stdin", - args: []string{"http://example.com"}, - stdin: false, - expectError: false, - }, - { - name: "no args without stdin", - args: []string{}, - stdin: false, - expectError: true, - }, - { - name: "multiple args without stdin", - args: []string{"http://example.com", "extra"}, - stdin: false, - expectError: true, - }, - { - name: "args with stdin flag", - args: []string{"http://example.com"}, - stdin: true, - expectError: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Save and restore opts - oldStdin := opts.Stdin - - defer func() { opts.Stdin = oldStdin }() - - opts.Stdin = tt.stdin - - cmd := &cobra.Command{} - err := createCmd.Args(cmd, tt.args) - - if tt.expectError { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - }) - } -} - -// TestParseSearchOutput_ReadError tests handling of read errors. -func TestParseSearchOutput_ReadError(t *testing.T) { - // Create a reader that always errors - reader := &errorReader{} - result, err := parseSearchOutput(reader) - - require.Error(t, err) - assert.Nil(t, result) - assert.Contains(t, err.Error(), "error reading input") -} - -// errorReader is a test helper that always returns an error. -type errorReader struct{} - -func (e *errorReader) Read(_ []byte) (int, error) { - return 0, assert.AnError -} - -// TestGroupResultsByAPIAddress_EdgeCases tests edge cases in grouping. -func TestGroupResultsByAPIAddress_EdgeCases(t *testing.T) { - tests := []struct { - name string - results []*routingv1.SearchResponse - expected int // expected number of groups - }{ - { - name: "empty CID", - results: []*routingv1.SearchResponse{ - { - RecordRef: &corev1.RecordRef{Cid: ""}, - Peer: &routingv1.Peer{ - Addrs: []string{"http://api.example.com"}, - }, - }, - }, - expected: 1, - }, - { - name: "nil recordRef", - results: []*routingv1.SearchResponse{ - { - RecordRef: nil, - Peer: &routingv1.Peer{ - Addrs: []string{"http://api.example.com"}, - }, - }, - }, - expected: 1, // Creates group with empty CID - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Should not panic - require.NotPanics(t, func() { - grouped := groupResultsByAPIAddress(tt.results) - if tt.expected >= 0 { - assert.Len(t, grouped, tt.expected) - } - }) - }) - } -} - -// TestParseSearchOutput_WithBytes tests using bytes.Buffer. -func TestParseSearchOutput_WithBytes(t *testing.T) { - // Use snake_case JSON field names (record_ref, not recordRef) - input := []byte(`[{"record_ref": {"cid": "test-cid-123"}, "peer": {"addrs": ["http://test.com"]}}]`) - buffer := bytes.NewBuffer(input) - - result, err := parseSearchOutput(buffer) - - require.NoError(t, err) - assert.Len(t, result, 1) - // The CID should be set - assert.NotNil(t, result[0].GetRecordRef()) - assert.Equal(t, "test-cid-123", result[0].GetRecordRef().GetCid()) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package sync + +import ( + "bytes" + "strings" + "testing" + + corev1 "github.com/agntcy/dir/api/core/v1" + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestParseSearchOutput_EmptyInput tests parsing empty input. +func TestParseSearchOutput_EmptyInput(t *testing.T) { + tests := []struct { + name string + input string + expectError bool + }{ + { + name: "empty string", + input: "", + expectError: true, + }, + { + name: "empty array", + input: "[]", + expectError: false, + }, + { + name: "null", + input: "null", + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + reader := strings.NewReader(tt.input) + result, err := parseSearchOutput(reader) + + if tt.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + + if tt.input == "[]" { + assert.Empty(t, result) + } + } + }) + } +} + +// TestParseSearchOutput_ValidJSON tests parsing valid JSON input. +func TestParseSearchOutput_ValidJSON(t *testing.T) { + input := `[ + { + "record_ref": {"cid": "cid1"}, + "peer": { + "addrs": ["http://peer1.example.com"] + } + }, + { + "record_ref": {"cid": "cid2"}, + "peer": { + "addrs": ["http://peer2.example.com"] + } + } + ]` + + reader := strings.NewReader(input) + result, err := parseSearchOutput(reader) + + require.NoError(t, err) + assert.Len(t, result, 2) + assert.Equal(t, "cid1", result[0].GetRecordRef().GetCid()) + assert.Equal(t, "cid2", result[1].GetRecordRef().GetCid()) +} + +// TestParseSearchOutput_InvalidJSON tests error handling for invalid JSON. +func TestParseSearchOutput_InvalidJSON(t *testing.T) { + tests := []struct { + name string + input string + }{ + { + name: "malformed JSON", + input: `{"invalid": "json"`, + }, + { + name: "not an array", + input: `{"key": "value"}`, + }, + { + name: "random text", + input: "not json at all", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + reader := strings.NewReader(tt.input) + result, err := parseSearchOutput(reader) + + require.Error(t, err) + assert.Nil(t, result) + assert.Contains(t, err.Error(), "failed to parse JSON") + }) + } +} + +// TestParseSearchOutput_ComplexJSON tests parsing complex search results. +func TestParseSearchOutput_ComplexJSON(t *testing.T) { + input := `[ + { + "record_ref": {"cid": "bafyabc123"}, + "peer": { + "id": "peer1", + "addrs": ["http://api1.example.com", "http://api2.example.com"] + }, + "queries": ["skill:AI"], + "score": 2 + } + ]` + + reader := strings.NewReader(input) + result, err := parseSearchOutput(reader) + + require.NoError(t, err) + assert.Len(t, result, 1) + assert.Equal(t, "bafyabc123", result[0].GetRecordRef().GetCid()) + assert.Equal(t, "peer1", result[0].GetPeer().GetId()) + assert.Len(t, result[0].GetPeer().GetAddrs(), 2) +} + +// TestGroupResultsByAPIAddress_EmptyInput tests grouping empty results. +func TestGroupResultsByAPIAddress_EmptyInput(t *testing.T) { + result := groupResultsByAPIAddress(nil) + assert.Empty(t, result) + + result = groupResultsByAPIAddress([]*routingv1.SearchResponse{}) + assert.Empty(t, result) +} + +// TestGroupResultsByAPIAddress_SingleResult tests grouping single result. +func TestGroupResultsByAPIAddress_SingleResult(t *testing.T) { + results := []*routingv1.SearchResponse{ + { + RecordRef: &corev1.RecordRef{Cid: "cid1"}, + Peer: &routingv1.Peer{ + Id: "peer1", + Addrs: []string{"http://api1.example.com"}, + }, + }, + } + + grouped := groupResultsByAPIAddress(results) + + assert.Len(t, grouped, 1) + assert.Contains(t, grouped, "http://api1.example.com") + assert.Equal(t, "http://api1.example.com", grouped["http://api1.example.com"].APIAddress) + assert.Equal(t, []string{"cid1"}, grouped["http://api1.example.com"].CIDs) +} + +// TestGroupResultsByAPIAddress_MultipleSamePeer tests grouping multiple records from same peer. +func TestGroupResultsByAPIAddress_MultipleSamePeer(t *testing.T) { + results := []*routingv1.SearchResponse{ + { + RecordRef: &corev1.RecordRef{Cid: "cid1"}, + Peer: &routingv1.Peer{ + Id: "peer1", + Addrs: []string{"http://api1.example.com"}, + }, + }, + { + RecordRef: &corev1.RecordRef{Cid: "cid2"}, + Peer: &routingv1.Peer{ + Id: "peer1", + Addrs: []string{"http://api1.example.com"}, + }, + }, + { + RecordRef: &corev1.RecordRef{Cid: "cid3"}, + Peer: &routingv1.Peer{ + Id: "peer1", + Addrs: []string{"http://api1.example.com"}, + }, + }, + } + + grouped := groupResultsByAPIAddress(results) + + assert.Len(t, grouped, 1) + peerInfo := grouped["http://api1.example.com"] + assert.Equal(t, "http://api1.example.com", peerInfo.APIAddress) + assert.Equal(t, []string{"cid1", "cid2", "cid3"}, peerInfo.CIDs) +} + +// TestGroupResultsByAPIAddress_MultipleDifferentPeers tests grouping records from different peers. +func TestGroupResultsByAPIAddress_MultipleDifferentPeers(t *testing.T) { + results := []*routingv1.SearchResponse{ + { + RecordRef: &corev1.RecordRef{Cid: "cid1"}, + Peer: &routingv1.Peer{ + Id: "peer1", + Addrs: []string{"http://api1.example.com"}, + }, + }, + { + RecordRef: &corev1.RecordRef{Cid: "cid2"}, + Peer: &routingv1.Peer{ + Id: "peer2", + Addrs: []string{"http://api2.example.com"}, + }, + }, + { + RecordRef: &corev1.RecordRef{Cid: "cid3"}, + Peer: &routingv1.Peer{ + Id: "peer1", + Addrs: []string{"http://api1.example.com"}, + }, + }, + } + + grouped := groupResultsByAPIAddress(results) + + assert.Len(t, grouped, 2) + + peer1Info := grouped["http://api1.example.com"] + assert.Equal(t, "http://api1.example.com", peer1Info.APIAddress) + assert.Equal(t, []string{"cid1", "cid3"}, peer1Info.CIDs) + + peer2Info := grouped["http://api2.example.com"] + assert.Equal(t, "http://api2.example.com", peer2Info.APIAddress) + assert.Equal(t, []string{"cid2"}, peer2Info.CIDs) +} + +// TestGroupResultsByAPIAddress_NoPeerInfo tests skipping results without peer info. +func TestGroupResultsByAPIAddress_NoPeerInfo(t *testing.T) { + results := []*routingv1.SearchResponse{ + { + RecordRef: &corev1.RecordRef{Cid: "cid1"}, + Peer: nil, // No peer info + }, + { + RecordRef: &corev1.RecordRef{Cid: "cid2"}, + Peer: &routingv1.Peer{ + Id: "peer1", + Addrs: []string{}, // No addresses + }, + }, + { + RecordRef: &corev1.RecordRef{Cid: "cid3"}, + Peer: &routingv1.Peer{ + Id: "peer2", + Addrs: []string{"http://api1.example.com"}, + }, + }, + } + + grouped := groupResultsByAPIAddress(results) + + // Only the third result should be included + assert.Len(t, grouped, 1) + assert.Contains(t, grouped, "http://api1.example.com") + assert.Equal(t, []string{"cid3"}, grouped["http://api1.example.com"].CIDs) +} + +// TestGroupResultsByAPIAddress_MultipleAddresses tests using first address only. +func TestGroupResultsByAPIAddress_MultipleAddresses(t *testing.T) { + results := []*routingv1.SearchResponse{ + { + RecordRef: &corev1.RecordRef{Cid: "cid1"}, + Peer: &routingv1.Peer{ + Id: "peer1", + Addrs: []string{"http://api1.example.com", "http://api2.example.com", "http://api3.example.com"}, + }, + }, + } + + grouped := groupResultsByAPIAddress(results) + + // Should use the first address + assert.Len(t, grouped, 1) + assert.Contains(t, grouped, "http://api1.example.com") + assert.NotContains(t, grouped, "http://api2.example.com") + assert.NotContains(t, grouped, "http://api3.example.com") +} + +// TestGroupResultsByAPIAddress_MixedScenario tests complex real-world scenario. +func TestGroupResultsByAPIAddress_MixedScenario(t *testing.T) { + results := []*routingv1.SearchResponse{ + // Peer 1 - multiple CIDs + { + RecordRef: &corev1.RecordRef{Cid: "cid1"}, + Peer: &routingv1.Peer{ + Id: "peer1", + Addrs: []string{"http://peer1.com"}, + }, + }, + { + RecordRef: &corev1.RecordRef{Cid: "cid2"}, + Peer: &routingv1.Peer{ + Id: "peer1", + Addrs: []string{"http://peer1.com"}, + }, + }, + // Peer 2 - single CID + { + RecordRef: &corev1.RecordRef{Cid: "cid3"}, + Peer: &routingv1.Peer{ + Id: "peer2", + Addrs: []string{"http://peer2.com"}, + }, + }, + // No peer info - should be skipped + { + RecordRef: &corev1.RecordRef{Cid: "cid4"}, + Peer: nil, + }, + // Peer 3 - single CID + { + RecordRef: &corev1.RecordRef{Cid: "cid5"}, + Peer: &routingv1.Peer{ + Id: "peer3", + Addrs: []string{"http://peer3.com"}, + }, + }, + } + + grouped := groupResultsByAPIAddress(results) + + assert.Len(t, grouped, 3) + assert.Equal(t, []string{"cid1", "cid2"}, grouped["http://peer1.com"].CIDs) + assert.Equal(t, []string{"cid3"}, grouped["http://peer2.com"].CIDs) + assert.Equal(t, []string{"cid5"}, grouped["http://peer3.com"].CIDs) +} + +// TestPeerSyncInfo_Structure tests the PeerSyncInfo structure. +func TestPeerSyncInfo_Structure(t *testing.T) { + info := PeerSyncInfo{ + APIAddress: "http://example.com", + CIDs: []string{"cid1", "cid2", "cid3"}, + } + + assert.Equal(t, "http://example.com", info.APIAddress) + assert.Len(t, info.CIDs, 3) + assert.Contains(t, info.CIDs, "cid1") + assert.Contains(t, info.CIDs, "cid2") + assert.Contains(t, info.CIDs, "cid3") +} + +// TestParseSearchOutput_LargeDataset tests parsing large number of results. +func TestParseSearchOutput_LargeDataset(t *testing.T) { + // Build a large JSON array + var builder strings.Builder + builder.WriteString("[") + + for i := range 100 { + if i > 0 { + builder.WriteString(",") + } + + builder.WriteString(`{ + "recordRef": {"cid": "cid`) + builder.WriteString(strings.Repeat("a", i)) + builder.WriteString(`"}, + "peer": {"addrs": ["http://peer`) + builder.WriteString(strings.Repeat("a", i%10)) + builder.WriteString(`.com"]} + }`) + } + + builder.WriteString("]") + + reader := strings.NewReader(builder.String()) + result, err := parseSearchOutput(reader) + + require.NoError(t, err) + assert.Len(t, result, 100) +} + +// TestGroupResultsByAPIAddress_LargeDataset tests grouping large number of results. +func TestGroupResultsByAPIAddress_LargeDataset(t *testing.T) { + results := make([]*routingv1.SearchResponse, 100) + + // Create 100 results distributed across 10 peers + for i := range 100 { + peerNum := i % 10 + results[i] = &routingv1.SearchResponse{ + RecordRef: &corev1.RecordRef{Cid: "cid" + strings.Repeat("a", i)}, + Peer: &routingv1.Peer{ + Id: "peer" + strings.Repeat("a", peerNum), + Addrs: []string{"http://peer" + strings.Repeat("a", peerNum) + ".com"}, + }, + } + } + + grouped := groupResultsByAPIAddress(results) + + // Should have 10 unique API addresses + assert.Len(t, grouped, 10) + + // Each peer should have 10 CIDs + for _, info := range grouped { + assert.Len(t, info.CIDs, 10) + } +} + +// TestParseSearchOutput_SpecialCharacters tests handling special characters in JSON. +func TestParseSearchOutput_SpecialCharacters(t *testing.T) { + input := `[ + { + "record_ref": {"cid": "cid-with-special-chars-!@#$%^&*()"}, + "peer": { + "id": "peer/with\\slashes\"and'quotes", + "addrs": ["http://api.example.com/path?query=value&foo=bar"] + } + } + ]` + + reader := strings.NewReader(input) + result, err := parseSearchOutput(reader) + + require.NoError(t, err) + assert.Len(t, result, 1) + assert.Contains(t, result[0].GetRecordRef().GetCid(), "special-chars") +} + +// TestCommand_Initialization tests that sync command is properly initialized. +func TestCommand_Initialization(t *testing.T) { + assert.NotNil(t, Command) + assert.Equal(t, "sync", Command.Use) + assert.NotEmpty(t, Command.Short) + assert.NotEmpty(t, Command.Long) +} + +// TestCreateCmd_Initialization tests create subcommand initialization. +func TestCreateCmd_Initialization(t *testing.T) { + assert.NotNil(t, createCmd) + assert.Equal(t, "create ", createCmd.Use) + assert.NotEmpty(t, createCmd.Short) + assert.NotEmpty(t, createCmd.Long) + assert.NotNil(t, createCmd.Args) + assert.NotNil(t, createCmd.RunE) + + // Check that examples are in the Long description + assert.Contains(t, createCmd.Long, "dirctl routing search") + assert.Contains(t, createCmd.Long, "--output json") +} + +// TestListCmd_Initialization tests list subcommand initialization. +func TestListCmd_Initialization(t *testing.T) { + assert.NotNil(t, listCmd) + assert.Equal(t, "list", listCmd.Use) + assert.NotEmpty(t, listCmd.Short) + assert.NotNil(t, listCmd.RunE) +} + +// TestStatusCmd_Initialization tests status subcommand initialization. +func TestStatusCmd_Initialization(t *testing.T) { + assert.NotNil(t, statusCmd) + assert.Equal(t, "status ", statusCmd.Use) + assert.NotEmpty(t, statusCmd.Short) + assert.NotNil(t, statusCmd.Args) + assert.NotNil(t, statusCmd.RunE) +} + +// TestDeleteCmd_Initialization tests delete subcommand initialization. +func TestDeleteCmd_Initialization(t *testing.T) { + assert.NotNil(t, deleteCmd) + assert.Equal(t, "delete ", deleteCmd.Use) + assert.NotEmpty(t, deleteCmd.Short) + assert.NotNil(t, deleteCmd.Args) + assert.NotNil(t, deleteCmd.RunE) +} + +// TestCreateCmd_ArgsValidation tests argument validation for create command. +func TestCreateCmd_ArgsValidation(t *testing.T) { + tests := []struct { + name string + args []string + stdin bool + expectError bool + }{ + { + name: "no args with stdin", + args: []string{}, + stdin: true, + expectError: false, + }, + { + name: "one arg without stdin", + args: []string{"http://example.com"}, + stdin: false, + expectError: false, + }, + { + name: "no args without stdin", + args: []string{}, + stdin: false, + expectError: true, + }, + { + name: "multiple args without stdin", + args: []string{"http://example.com", "extra"}, + stdin: false, + expectError: true, + }, + { + name: "args with stdin flag", + args: []string{"http://example.com"}, + stdin: true, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Save and restore opts + oldStdin := opts.Stdin + + defer func() { opts.Stdin = oldStdin }() + + opts.Stdin = tt.stdin + + cmd := &cobra.Command{} + err := createCmd.Args(cmd, tt.args) + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +// TestParseSearchOutput_ReadError tests handling of read errors. +func TestParseSearchOutput_ReadError(t *testing.T) { + // Create a reader that always errors + reader := &errorReader{} + result, err := parseSearchOutput(reader) + + require.Error(t, err) + assert.Nil(t, result) + assert.Contains(t, err.Error(), "error reading input") +} + +// errorReader is a test helper that always returns an error. +type errorReader struct{} + +func (e *errorReader) Read(_ []byte) (int, error) { + return 0, assert.AnError +} + +// TestGroupResultsByAPIAddress_EdgeCases tests edge cases in grouping. +func TestGroupResultsByAPIAddress_EdgeCases(t *testing.T) { + tests := []struct { + name string + results []*routingv1.SearchResponse + expected int // expected number of groups + }{ + { + name: "empty CID", + results: []*routingv1.SearchResponse{ + { + RecordRef: &corev1.RecordRef{Cid: ""}, + Peer: &routingv1.Peer{ + Addrs: []string{"http://api.example.com"}, + }, + }, + }, + expected: 1, + }, + { + name: "nil recordRef", + results: []*routingv1.SearchResponse{ + { + RecordRef: nil, + Peer: &routingv1.Peer{ + Addrs: []string{"http://api.example.com"}, + }, + }, + }, + expected: 1, // Creates group with empty CID + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Should not panic + require.NotPanics(t, func() { + grouped := groupResultsByAPIAddress(tt.results) + if tt.expected >= 0 { + assert.Len(t, grouped, tt.expected) + } + }) + }) + } +} + +// TestParseSearchOutput_WithBytes tests using bytes.Buffer. +func TestParseSearchOutput_WithBytes(t *testing.T) { + // Use snake_case JSON field names (record_ref, not recordRef) + input := []byte(`[{"record_ref": {"cid": "test-cid-123"}, "peer": {"addrs": ["http://test.com"]}}]`) + buffer := bytes.NewBuffer(input) + + result, err := parseSearchOutput(buffer) + + require.NoError(t, err) + assert.Len(t, result, 1) + // The CID should be set + assert.NotNil(t, result[0].GetRecordRef()) + assert.Equal(t, "test-cid-123", result[0].GetRecordRef().GetCid()) +} diff --git a/cli/cmd/verify/verify.go b/cli/cmd/verify/verify.go index a5126cbcd..a6be37fbd 100644 --- a/cli/cmd/verify/verify.go +++ b/cli/cmd/verify/verify.go @@ -1,80 +1,80 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:wrapcheck -package verify - -import ( - "errors" - "fmt" - - corev1 "github.com/agntcy/dir/api/core/v1" - signv1 "github.com/agntcy/dir/api/sign/v1" - "github.com/agntcy/dir/cli/presenter" - ctxUtils "github.com/agntcy/dir/cli/util/context" - "github.com/spf13/cobra" -) - -func init() { - // Add output format flags - presenter.AddOutputFlags(Command) -} - -//nolint:mnd -var Command = &cobra.Command{ - Use: "verify", - Short: "Verify record signature against identity-based OIDC or key-based signing", - Long: `This command verifies the record signature against -identity-based OIDC or key-based signing process. - -Usage examples: - -1. Verify a record signature: - - dirctl verify - -2. Output formats: - - # Get verification result as JSON - dirctl verify --output json - - # Get raw verification status for scripting - dirctl verify --output raw -`, - RunE: func(cmd *cobra.Command, args []string) error { - var recordRef string - if len(args) > 1 { - return errors.New("one argument is allowed") - } else if len(args) == 1 { - recordRef = args[0] - } - - return runCommand(cmd, recordRef) - }, -} - -// nolint:mnd -func runCommand(cmd *cobra.Command, recordRef string) error { - // Get the client from the context - c, ok := ctxUtils.GetClientFromContext(cmd.Context()) - if !ok { - return errors.New("failed to get client from context") - } - - response, err := c.Verify(cmd.Context(), &signv1.VerifyRequest{ - RecordRef: &corev1.RecordRef{ - Cid: recordRef, - }, - }) - if err != nil { - return fmt.Errorf("failed to verify record with Zot: %w", err) - } - - // Output in the appropriate format - status := "trusted" - if !response.GetSuccess() { - status = "not trusted" - } - - return presenter.PrintMessage(cmd, "signature", "Record signature is", status) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:wrapcheck +package verify + +import ( + "errors" + "fmt" + + corev1 "github.com/agntcy/dir/api/core/v1" + signv1 "github.com/agntcy/dir/api/sign/v1" + "github.com/agntcy/dir/cli/presenter" + ctxUtils "github.com/agntcy/dir/cli/util/context" + "github.com/spf13/cobra" +) + +func init() { + // Add output format flags + presenter.AddOutputFlags(Command) +} + +//nolint:mnd +var Command = &cobra.Command{ + Use: "verify", + Short: "Verify record signature against identity-based OIDC or key-based signing", + Long: `This command verifies the record signature against +identity-based OIDC or key-based signing process. + +Usage examples: + +1. Verify a record signature: + + dirctl verify + +2. Output formats: + + # Get verification result as JSON + dirctl verify --output json + + # Get raw verification status for scripting + dirctl verify --output raw +`, + RunE: func(cmd *cobra.Command, args []string) error { + var recordRef string + if len(args) > 1 { + return errors.New("one argument is allowed") + } else if len(args) == 1 { + recordRef = args[0] + } + + return runCommand(cmd, recordRef) + }, +} + +// nolint:mnd +func runCommand(cmd *cobra.Command, recordRef string) error { + // Get the client from the context + c, ok := ctxUtils.GetClientFromContext(cmd.Context()) + if !ok { + return errors.New("failed to get client from context") + } + + response, err := c.Verify(cmd.Context(), &signv1.VerifyRequest{ + RecordRef: &corev1.RecordRef{ + Cid: recordRef, + }, + }) + if err != nil { + return fmt.Errorf("failed to verify record with Zot: %w", err) + } + + // Output in the appropriate format + status := "trusted" + if !response.GetSuccess() { + status = "not trusted" + } + + return presenter.PrintMessage(cmd, "signature", "Record signature is", status) +} diff --git a/cli/cmd/version/version.go b/cli/cmd/version/version.go index 7ca251f5d..ffc4c775b 100644 --- a/cli/cmd/version/version.go +++ b/cli/cmd/version/version.go @@ -1,18 +1,18 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package version - -import ( - "github.com/agntcy/dir/api/version" - "github.com/agntcy/dir/cli/presenter" - "github.com/spf13/cobra" -) - -var Command = &cobra.Command{ - Use: "version", - Short: "Print the version of the application", - Run: func(cmd *cobra.Command, _ []string) { - presenter.Print(cmd, "Application Version: ", version.String()) - }, -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package version + +import ( + "github.com/agntcy/dir/api/version" + "github.com/agntcy/dir/cli/presenter" + "github.com/spf13/cobra" +) + +var Command = &cobra.Command{ + Use: "version", + Short: "Print the version of the application", + Run: func(cmd *cobra.Command, _ []string) { + presenter.Print(cmd, "Application Version: ", version.String()) + }, +} diff --git a/cli/go.mod b/cli/go.mod index 2cdc0f874..16bbb0f81 100644 --- a/cli/go.mod +++ b/cli/go.mod @@ -1,292 +1,292 @@ -module github.com/agntcy/dir/cli - -go 1.25.2 - -replace ( - github.com/agntcy/dir/api => ../api - github.com/agntcy/dir/client => ../client - github.com/agntcy/dir/importer => ../importer - github.com/agntcy/dir/mcp => ../mcp - github.com/agntcy/dir/utils => ../utils -) - -require ( - github.com/agntcy/dir/api v0.6.0 - github.com/agntcy/dir/client v0.6.0 - github.com/agntcy/dir/importer v0.6.0 - github.com/agntcy/dir/mcp v0.6.0 - github.com/agntcy/dir/utils v0.6.0 - github.com/libp2p/go-libp2p v0.44.0 - github.com/sigstore/sigstore v1.10.0 - github.com/spf13/cobra v1.10.2 - github.com/spf13/pflag v1.0.10 - github.com/stretchr/testify v1.11.1 - golang.org/x/crypto v0.45.0 - google.golang.org/protobuf v1.36.10 -) - -require ( - buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 // indirect - buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 // indirect - cloud.google.com/go v0.121.6 // indirect - cloud.google.com/go/auth v0.17.0 // indirect - cloud.google.com/go/compute/metadata v0.9.0 // indirect - github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect - github.com/JohannesKaufmann/html-to-markdown v1.6.0 // indirect - github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/PuerkitoBio/goquery v1.10.3 // indirect - github.com/ThalesIgnite/crypto11 v1.2.5 // indirect - github.com/agntcy/oasf-sdk/pkg v0.0.14 // indirect - github.com/alecthomas/chroma/v2 v2.20.0 // indirect - github.com/andybalholm/cascadia v1.3.3 // indirect - github.com/anthropics/anthropic-sdk-go v1.10.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/atotto/clipboard v0.1.4 // indirect - github.com/aws/aws-sdk-go-v2 v1.40.0 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 // indirect - github.com/aws/aws-sdk-go-v2/config v1.32.2 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.19.2 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 // indirect - github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 // indirect - github.com/aws/smithy-go v1.24.0 // indirect - github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect - github.com/aymerick/douceur v0.2.0 // indirect - github.com/bahlo/generic-list-go v0.2.0 // indirect - github.com/blang/semver v3.5.1+incompatible // indirect - github.com/buger/jsonparser v1.1.1 // indirect - github.com/bytedance/gopkg v0.1.3 // indirect - github.com/bytedance/sonic v1.14.1 // indirect - github.com/bytedance/sonic/loader v0.3.0 // indirect - github.com/cenkalti/backoff/v5 v5.0.3 // indirect - github.com/charmbracelet/bubbles v0.21.0 // indirect - github.com/charmbracelet/bubbletea v1.3.10 // indirect - github.com/charmbracelet/colorprofile v0.3.2 // indirect - github.com/charmbracelet/glamour v0.10.0 // indirect - github.com/charmbracelet/harmonica v0.2.0 // indirect - github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 // indirect - github.com/charmbracelet/x/ansi v0.10.2 // indirect - github.com/charmbracelet/x/cellbuf v0.0.13 // indirect - github.com/charmbracelet/x/exp/slice v0.0.0-20250902204034-1cdc10c66d5b // indirect - github.com/charmbracelet/x/term v0.2.1 // indirect - github.com/cloudwego/base64x v0.1.6 // indirect - github.com/cloudwego/eino v0.5.0-alpha.11 // indirect - github.com/cloudwego/eino-ext/components/model/claude v0.1.0 // indirect - github.com/cloudwego/eino-ext/components/model/ollama v0.1.2 // indirect - github.com/cloudwego/eino-ext/components/model/openai v0.0.0-20250903035842-96774a3ec845 // indirect - github.com/cloudwego/eino-ext/libs/acl/openai v0.0.0-20250826113018-8c6f6358d4bb // indirect - github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect - github.com/coreos/go-oidc/v3 v3.17.0 // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect - github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect - github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect - github.com/djherbis/times v1.6.0 // indirect - github.com/dlclark/regexp2 v1.11.5 // indirect - github.com/docker/cli v29.0.3+incompatible // indirect - github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker-credential-helpers v0.9.4 // indirect - github.com/dustin/go-humanize v1.0.1 // indirect - github.com/eino-contrib/jsonschema v1.0.0 // indirect - github.com/emicklei/go-restful/v3 v3.12.2 // indirect - github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect - github.com/evanphx/json-patch v0.5.2 // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/fxamacker/cbor/v2 v2.9.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.10 // indirect - github.com/getkin/kin-openapi v0.120.0 // indirect - github.com/go-chi/chi/v5 v5.2.3 // indirect - github.com/go-jose/go-jose/v4 v4.1.3 // indirect - github.com/go-logr/logr v1.4.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.24.1 // indirect - github.com/go-openapi/errors v0.22.4 // indirect - github.com/go-openapi/jsonpointer v0.22.1 // indirect - github.com/go-openapi/jsonreference v0.21.3 // indirect - github.com/go-openapi/loads v0.23.2 // indirect - github.com/go-openapi/runtime v0.29.2 // indirect - github.com/go-openapi/spec v0.22.1 // indirect - github.com/go-openapi/strfmt v0.25.0 // indirect - github.com/go-openapi/swag v0.25.4 // indirect - github.com/go-openapi/swag/cmdutils v0.25.4 // indirect - github.com/go-openapi/swag/conv v0.25.4 // indirect - github.com/go-openapi/swag/fileutils v0.25.4 // indirect - github.com/go-openapi/swag/jsonname v0.25.4 // indirect - github.com/go-openapi/swag/jsonutils v0.25.4 // indirect - github.com/go-openapi/swag/loading v0.25.4 // indirect - github.com/go-openapi/swag/mangling v0.25.4 // indirect - github.com/go-openapi/swag/netutils v0.25.4 // indirect - github.com/go-openapi/swag/stringutils v0.25.4 // indirect - github.com/go-openapi/swag/typeutils v0.25.4 // indirect - github.com/go-openapi/swag/yamlutils v0.25.4 // indirect - github.com/go-openapi/validate v0.25.1 // indirect - github.com/go-viper/mapstructure/v2 v2.4.0 // indirect - github.com/gobwas/glob v0.2.3 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect - github.com/google/certificate-transparency-go v1.3.2 // indirect - github.com/google/gnostic-models v0.7.0 // indirect - github.com/google/go-cmp v0.7.0 // indirect - github.com/google/go-containerregistry v0.20.7 // indirect - github.com/google/go-github/v73 v73.0.0 // indirect - github.com/google/go-querystring v1.1.0 // indirect - github.com/google/jsonschema-go v0.3.0 // indirect - github.com/google/s2a-go v0.1.9 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect - github.com/googleapis/gax-go/v2 v2.15.0 // indirect - github.com/goph/emperror v0.17.2 // indirect - github.com/gorilla/css v1.0.1 // indirect - github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect - github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-retryablehttp v0.7.8 // indirect - github.com/in-toto/attestation v1.1.2 // indirect - github.com/in-toto/in-toto-golang v0.9.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/invopop/jsonschema v0.13.0 // indirect - github.com/invopop/yaml v0.2.0 // indirect - github.com/ipfs/go-cid v0.5.0 // indirect - github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect - github.com/josharian/intern v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.18.1 // indirect - github.com/klauspost/cpuid/v2 v2.3.0 // indirect - github.com/letsencrypt/boulder v0.20251110.0 // indirect - github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/lucasb-eyer/go-colorful v1.3.0 // indirect - github.com/mailru/easyjson v0.9.0 // indirect - github.com/mark3labs/mcp-filesystem-server v0.11.1 // indirect - github.com/mark3labs/mcp-go v0.41.1 // indirect - github.com/mark3labs/mcphost v0.31.3 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-localereader v0.0.1 // indirect - github.com/mattn/go-runewidth v0.0.17 // indirect - github.com/meguminnnnnnnnn/go-openai v0.0.0-20250821095446-07791bea23a0 // indirect - github.com/microcosm-cc/bluemonday v1.0.27 // indirect - github.com/miekg/pkcs11 v1.1.1 // indirect - github.com/minio/sha256-simd v1.0.1 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect - github.com/moby/term v0.5.2 // indirect - github.com/modelcontextprotocol/go-sdk v0.8.0 // indirect - github.com/modelcontextprotocol/registry v1.2.3 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect - github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect - github.com/mr-tron/base58 v1.2.0 // indirect - github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect - github.com/muesli/cancelreader v0.2.2 // indirect - github.com/muesli/reflow v0.3.0 // indirect - github.com/muesli/termenv v0.16.0 // indirect - github.com/multiformats/go-base32 v0.1.0 // indirect - github.com/multiformats/go-base36 v0.2.0 // indirect - github.com/multiformats/go-multiaddr v0.16.0 // indirect - github.com/multiformats/go-multibase v0.2.0 // indirect - github.com/multiformats/go-multicodec v0.9.1 // indirect - github.com/multiformats/go-multihash v0.2.3 // indirect - github.com/multiformats/go-varint v0.0.7 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/nikolalohinski/gonja v1.5.3 // indirect - github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect - github.com/oklog/ulid v1.3.1 // indirect - github.com/ollama/ollama v0.12.9 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.1 // indirect - github.com/pelletier/go-toml/v2 v2.2.4 // indirect - github.com/perimeterx/marshmallow v1.1.5 // indirect - github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/rivo/uniseg v0.4.7 // indirect - github.com/sagikazarmark/locafero v0.11.0 // indirect - github.com/sassoftware/relic v7.2.1+incompatible // indirect - github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect - github.com/shibumi/go-pathspec v1.3.0 // indirect - github.com/sigstore/cosign/v3 v3.0.3 // indirect - github.com/sigstore/protobuf-specs v0.5.0 // indirect - github.com/sigstore/rekor v1.4.3 // indirect - github.com/sigstore/rekor-tiles/v2 v2.0.1 // indirect - github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 // indirect - github.com/sigstore/timestamp-authority/v2 v2.0.3 // indirect - github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect - github.com/slongfield/pyfmt v0.0.0-20220222012616-ea85ff4c361f // indirect - github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect - github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.15.0 // indirect - github.com/spf13/cast v1.10.0 // indirect - github.com/spf13/viper v1.21.0 // indirect - github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect - github.com/subosito/gotenv v1.6.0 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect - github.com/thales-e-security/pool v0.0.2 // indirect - github.com/theupdateframework/go-tuf v0.7.0 // indirect - github.com/theupdateframework/go-tuf/v2 v2.3.0 // indirect - github.com/tidwall/gjson v1.18.0 // indirect - github.com/tidwall/match v1.1.1 // indirect - github.com/tidwall/pretty v1.2.1 // indirect - github.com/tidwall/sjson v1.2.5 // indirect - github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect - github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect - github.com/transparency-dev/merkle v0.0.2 // indirect - github.com/twitchyliquid64/golang-asm v0.15.1 // indirect - github.com/vbatts/tar-split v0.12.2 // indirect - github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect - github.com/x448/float16 v0.8.4 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xeipuuv/gojsonschema v1.2.0 // indirect - github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect - github.com/yargevad/filepathx v1.0.0 // indirect - github.com/yosida95/uritemplate/v3 v3.0.2 // indirect - github.com/yuin/goldmark v1.7.13 // indirect - github.com/yuin/goldmark-emoji v1.0.6 // indirect - gitlab.com/gitlab-org/api/client-go v0.160.0 // indirect - go.mongodb.org/mongo-driver v1.17.6 // indirect - go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect - go.opentelemetry.io/otel v1.38.0 // indirect - go.opentelemetry.io/otel/metric v1.38.0 // indirect - go.opentelemetry.io/otel/trace v1.38.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.1 // indirect - go.yaml.in/yaml/v2 v2.4.3 // indirect - go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/arch v0.20.0 // indirect - golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect - golang.org/x/mod v0.30.0 // indirect - golang.org/x/net v0.47.0 // indirect - golang.org/x/oauth2 v0.33.0 // indirect - golang.org/x/sync v0.18.0 // indirect - golang.org/x/sys v0.38.0 // indirect - golang.org/x/term v0.37.0 // indirect - golang.org/x/text v0.31.0 // indirect - golang.org/x/time v0.14.0 // indirect - google.golang.org/genai v1.22.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect - google.golang.org/grpc v1.77.0 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.34.2 // indirect - k8s.io/apimachinery v0.34.2 // indirect - k8s.io/client-go v0.34.2 // indirect - k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect - k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d // indirect - lukechampine.com/blake3 v1.4.1 // indirect - sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect - sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect - sigs.k8s.io/yaml v1.6.0 // indirect -) +module github.com/agntcy/dir/cli + +go 1.25.2 + +replace ( + github.com/agntcy/dir/api => ../api + github.com/agntcy/dir/client => ../client + github.com/agntcy/dir/importer => ../importer + github.com/agntcy/dir/mcp => ../mcp + github.com/agntcy/dir/utils => ../utils +) + +require ( + github.com/agntcy/dir/api v0.6.0 + github.com/agntcy/dir/client v0.6.0 + github.com/agntcy/dir/importer v0.6.0 + github.com/agntcy/dir/mcp v0.6.0 + github.com/agntcy/dir/utils v0.6.0 + github.com/libp2p/go-libp2p v0.44.0 + github.com/sigstore/sigstore v1.10.0 + github.com/spf13/cobra v1.10.2 + github.com/spf13/pflag v1.0.10 + github.com/stretchr/testify v1.11.1 + golang.org/x/crypto v0.45.0 + google.golang.org/protobuf v1.36.10 +) + +require ( + buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 // indirect + buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 // indirect + cloud.google.com/go v0.121.6 // indirect + cloud.google.com/go/auth v0.17.0 // indirect + cloud.google.com/go/compute/metadata v0.9.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/JohannesKaufmann/html-to-markdown v1.6.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/PuerkitoBio/goquery v1.10.3 // indirect + github.com/ThalesIgnite/crypto11 v1.2.5 // indirect + github.com/agntcy/oasf-sdk/pkg v0.0.14 // indirect + github.com/alecthomas/chroma/v2 v2.20.0 // indirect + github.com/andybalholm/cascadia v1.3.3 // indirect + github.com/anthropics/anthropic-sdk-go v1.10.0 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/atotto/clipboard v0.1.4 // indirect + github.com/aws/aws-sdk-go-v2 v1.40.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 // indirect + github.com/aws/aws-sdk-go-v2/config v1.32.2 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.19.2 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 // indirect + github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 // indirect + github.com/aws/smithy-go v1.24.0 // indirect + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect + github.com/aymerick/douceur v0.2.0 // indirect + github.com/bahlo/generic-list-go v0.2.0 // indirect + github.com/blang/semver v3.5.1+incompatible // indirect + github.com/buger/jsonparser v1.1.1 // indirect + github.com/bytedance/gopkg v0.1.3 // indirect + github.com/bytedance/sonic v1.14.1 // indirect + github.com/bytedance/sonic/loader v0.3.0 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect + github.com/charmbracelet/bubbles v0.21.0 // indirect + github.com/charmbracelet/bubbletea v1.3.10 // indirect + github.com/charmbracelet/colorprofile v0.3.2 // indirect + github.com/charmbracelet/glamour v0.10.0 // indirect + github.com/charmbracelet/harmonica v0.2.0 // indirect + github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 // indirect + github.com/charmbracelet/x/ansi v0.10.2 // indirect + github.com/charmbracelet/x/cellbuf v0.0.13 // indirect + github.com/charmbracelet/x/exp/slice v0.0.0-20250902204034-1cdc10c66d5b // indirect + github.com/charmbracelet/x/term v0.2.1 // indirect + github.com/cloudwego/base64x v0.1.6 // indirect + github.com/cloudwego/eino v0.5.0-alpha.11 // indirect + github.com/cloudwego/eino-ext/components/model/claude v0.1.0 // indirect + github.com/cloudwego/eino-ext/components/model/ollama v0.1.2 // indirect + github.com/cloudwego/eino-ext/components/model/openai v0.0.0-20250903035842-96774a3ec845 // indirect + github.com/cloudwego/eino-ext/libs/acl/openai v0.0.0-20250826113018-8c6f6358d4bb // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect + github.com/coreos/go-oidc/v3 v3.17.0 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect + github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect + github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect + github.com/djherbis/times v1.6.0 // indirect + github.com/dlclark/regexp2 v1.11.5 // indirect + github.com/docker/cli v29.0.3+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.4 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/eino-contrib/jsonschema v1.0.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect + github.com/evanphx/json-patch v0.5.2 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.10 // indirect + github.com/getkin/kin-openapi v0.120.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect + github.com/go-jose/go-jose/v4 v4.1.3 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/analysis v0.24.1 // indirect + github.com/go-openapi/errors v0.22.4 // indirect + github.com/go-openapi/jsonpointer v0.22.1 // indirect + github.com/go-openapi/jsonreference v0.21.3 // indirect + github.com/go-openapi/loads v0.23.2 // indirect + github.com/go-openapi/runtime v0.29.2 // indirect + github.com/go-openapi/spec v0.22.1 // indirect + github.com/go-openapi/strfmt v0.25.0 // indirect + github.com/go-openapi/swag v0.25.4 // indirect + github.com/go-openapi/swag/cmdutils v0.25.4 // indirect + github.com/go-openapi/swag/conv v0.25.4 // indirect + github.com/go-openapi/swag/fileutils v0.25.4 // indirect + github.com/go-openapi/swag/jsonname v0.25.4 // indirect + github.com/go-openapi/swag/jsonutils v0.25.4 // indirect + github.com/go-openapi/swag/loading v0.25.4 // indirect + github.com/go-openapi/swag/mangling v0.25.4 // indirect + github.com/go-openapi/swag/netutils v0.25.4 // indirect + github.com/go-openapi/swag/stringutils v0.25.4 // indirect + github.com/go-openapi/swag/typeutils v0.25.4 // indirect + github.com/go-openapi/swag/yamlutils v0.25.4 // indirect + github.com/go-openapi/validate v0.25.1 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/google/certificate-transparency-go v1.3.2 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/go-containerregistry v0.20.7 // indirect + github.com/google/go-github/v73 v73.0.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/jsonschema-go v0.3.0 // indirect + github.com/google/s2a-go v0.1.9 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect + github.com/googleapis/gax-go/v2 v2.15.0 // indirect + github.com/goph/emperror v0.17.2 // indirect + github.com/gorilla/css v1.0.1 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-retryablehttp v0.7.8 // indirect + github.com/in-toto/attestation v1.1.2 // indirect + github.com/in-toto/in-toto-golang v0.9.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/invopop/jsonschema v0.13.0 // indirect + github.com/invopop/yaml v0.2.0 // indirect + github.com/ipfs/go-cid v0.5.0 // indirect + github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.18.1 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/letsencrypt/boulder v0.20251110.0 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/lucasb-eyer/go-colorful v1.3.0 // indirect + github.com/mailru/easyjson v0.9.0 // indirect + github.com/mark3labs/mcp-filesystem-server v0.11.1 // indirect + github.com/mark3labs/mcp-go v0.41.1 // indirect + github.com/mark3labs/mcphost v0.31.3 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-localereader v0.0.1 // indirect + github.com/mattn/go-runewidth v0.0.17 // indirect + github.com/meguminnnnnnnnn/go-openai v0.0.0-20250821095446-07791bea23a0 // indirect + github.com/microcosm-cc/bluemonday v1.0.27 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect + github.com/moby/term v0.5.2 // indirect + github.com/modelcontextprotocol/go-sdk v0.8.0 // indirect + github.com/modelcontextprotocol/registry v1.2.3 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect + github.com/muesli/cancelreader v0.2.2 // indirect + github.com/muesli/reflow v0.3.0 // indirect + github.com/muesli/termenv v0.16.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multiaddr v0.16.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.9.1 // indirect + github.com/multiformats/go-multihash v0.2.3 // indirect + github.com/multiformats/go-varint v0.0.7 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/nikolalohinski/gonja v1.5.3 // indirect + github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/ollama/ollama v0.12.9 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/perimeterx/marshmallow v1.1.5 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect + github.com/sassoftware/relic v7.2.1+incompatible // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect + github.com/shibumi/go-pathspec v1.3.0 // indirect + github.com/sigstore/cosign/v3 v3.0.3 // indirect + github.com/sigstore/protobuf-specs v0.5.0 // indirect + github.com/sigstore/rekor v1.4.3 // indirect + github.com/sigstore/rekor-tiles/v2 v2.0.1 // indirect + github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 // indirect + github.com/sigstore/timestamp-authority/v2 v2.0.3 // indirect + github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect + github.com/slongfield/pyfmt v0.0.0-20220222012616-ea85ff4c361f // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/viper v1.21.0 // indirect + github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/thales-e-security/pool v0.0.2 // indirect + github.com/theupdateframework/go-tuf v0.7.0 // indirect + github.com/theupdateframework/go-tuf/v2 v2.3.0 // indirect + github.com/tidwall/gjson v1.18.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect + github.com/transparency-dev/merkle v0.0.2 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/vbatts/tar-split v0.12.2 // indirect + github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect + github.com/yargevad/filepathx v1.0.0 // indirect + github.com/yosida95/uritemplate/v3 v3.0.2 // indirect + github.com/yuin/goldmark v1.7.13 // indirect + github.com/yuin/goldmark-emoji v1.0.6 // indirect + gitlab.com/gitlab-org/api/client-go v0.160.0 // indirect + go.mongodb.org/mongo-driver v1.17.6 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.1 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/arch v0.20.0 // indirect + golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect + golang.org/x/mod v0.30.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.33.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.14.0 // indirect + google.golang.org/genai v1.22.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect + google.golang.org/grpc v1.77.0 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.34.2 // indirect + k8s.io/apimachinery v0.34.2 // indirect + k8s.io/client-go v0.34.2 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d // indirect + lukechampine.com/blake3 v1.4.1 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect +) diff --git a/cli/go.sum b/cli/go.sum index b6eabf9e9..5bf79c306 100644 --- a/cli/go.sum +++ b/cli/go.sum @@ -1,1008 +1,1008 @@ -al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= -al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= -buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 h1:THc6uLCGTpU393vVD5Eu5JHUdikvaP1+dqAclQe8pOE= -buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1/go.mod h1:xkbAJMbZuuebIblSFnLrfTpvmfjarhKsIid+Q9snDQ0= -buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 h1:ZObM/Cdu5dZO4ibBXNRSy+rFwG4oV86mYfKbI0Z7AAI= -buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1/go.mod h1:yJHswa2p3J+WxGLpgzuWNWn3I1CIkxdOu80Y/vN5lbE= -cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= -cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= -cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= -cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= -cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= -cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= -cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= -cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= -cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= -cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= -cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= -cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= -cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= -cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= -github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= -github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0 h1:E4MgwLBGeVB5f2MdcIVD3ELVAWpr+WD6MUe1i+tM/PA= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0/go.mod h1:Y2b/1clN4zsAoUd/pgNAQHjLDnTis/6ROkUfyob6psM= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA= -github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= -github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= -github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= -github.com/JohannesKaufmann/html-to-markdown v1.6.0 h1:04VXMiE50YYfCfLboJCLcgqF5x+rHJnb1ssNmqpLH/k= -github.com/JohannesKaufmann/html-to-markdown v1.6.0/go.mod h1:NUI78lGg/a7vpEJTz/0uOcYMaibytE4BUOQS8k78yPQ= -github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= -github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= -github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= -github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/PuerkitoBio/goquery v1.9.2/go.mod h1:GHPCaP0ODyyxqcNoFGYlAprUFH81NuRPd0GX3Zu2Mvk= -github.com/PuerkitoBio/goquery v1.10.3 h1:pFYcNSqHxBD06Fpj/KsbStFRsgRATgnf3LeXiUkhzPo= -github.com/PuerkitoBio/goquery v1.10.3/go.mod h1:tMUX0zDMHXYlAQk6p35XxQMqMweEKB7iK7iLNd4RH4Y= -github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= -github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= -github.com/agntcy/oasf-sdk/pkg v0.0.14 h1:DNKQNf4R4SMDbnaawoSl6FVOBvkSy4O9MyqKd7iHE8I= -github.com/agntcy/oasf-sdk/pkg v0.0.14/go.mod h1:FvcEB49gsvK+JO5i6l/pt5QgTK0LZeR7KYKsdcI6ZIM= -github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o= -github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= -github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= -github.com/alecthomas/chroma/v2 v2.20.0 h1:sfIHpxPyR07/Oylvmcai3X/exDlE8+FA820NTz+9sGw= -github.com/alecthomas/chroma/v2 v2.20.0/go.mod h1:e7tViK0xh/Nf4BYHl00ycY6rV7b8iXBksI9E359yNmA= -github.com/alecthomas/repr v0.5.1 h1:E3G4t2QbHTSNpPKBgMTln5KLkZHLOcU7r37J4pXBuIg= -github.com/alecthomas/repr v0.5.1/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= -github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= -github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM= -github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA= -github.com/anthropics/anthropic-sdk-go v1.10.0 h1:jDKQTfC0miIEj21eMmPrNSLKTNdNa3nHZOhd4wZz1cI= -github.com/anthropics/anthropic-sdk-go v1.10.0/go.mod h1:WTz31rIUHUHqai2UslPpw5CwXrQP3geYBioRV4WOLvE= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= -github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= -github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= -github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= -github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc= -github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 h1:t9yYsydLYNBk9cJ73rgPhPWqOh/52fcWDQB5b1JsKSY= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2/go.mod h1:IusfVNTmiSN3t4rhxWFaBAqn+mcNdwKtPcV16eYdgko= -github.com/aws/aws-sdk-go-v2/config v1.32.2 h1:4liUsdEpUUPZs5WVapsJLx5NPmQhQdez7nYFcovrytk= -github.com/aws/aws-sdk-go-v2/config v1.32.2/go.mod h1:l0hs06IFz1eCT+jTacU/qZtC33nvcnLADAPL/XyrkZI= -github.com/aws/aws-sdk-go-v2/credentials v1.19.2 h1:qZry8VUyTK4VIo5aEdUcBjPZHL2v4FyQ3QEOaWcFLu4= -github.com/aws/aws-sdk-go-v2/credentials v1.19.2/go.mod h1:YUqm5a1/kBnoK+/NY5WEiMocZihKSo15/tJdmdXnM5g= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 h1:WZVR5DbDgxzA0BJeudId89Kmgy6DIU4ORpxwsVHz0qA= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14/go.mod h1:Dadl9QO0kHgbrH1GRqGiZdYtW5w+IXXaBNCHTIaheM4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 h1:FIouAnCE46kyYqyhs0XEBDFFSREtdnr8HQuLPQPLCrY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14/go.mod h1:UTwDc5COa5+guonQU8qBikJo1ZJ4ln2r1MkF7Dqag1E= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.1 h1:U0asSZ3ifpuIehDPkRI2rxHbmFUMplDA2VeR9Uogrmw= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.1/go.mod h1:NZo9WJqQ0sxQ1Yqu1IwCHQFQunTms2MlVgejg16S1rY= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 h1:MxMBdKTYBjPQChlJhi4qlEueqB1p1KcbTEa7tD5aqPs= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.2/go.mod h1:iS6EPmNeqCsGo+xQmXv0jIMjyYtQfnwg36zl2FwEouk= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 h1:ksUT5KtgpZd3SAiFJNJ0AFEJVva3gjBmN7eXUZjzUwQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.5/go.mod h1:av+ArJpoYf3pgyrj6tcehSFW+y9/QvAY8kMooR9bZCw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 h1:GtsxyiF3Nd3JahRBJbxLCCdYW9ltGQYrFWg8XdkGDd8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10/go.mod h1:/j67Z5XBVDx8nZVp9EuFM9/BS5dvBznbqILGuu73hug= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 h1:a5UTtD4mHBU3t0o6aHQZFJTNKVfxFWfPX7J0Lr7G+uY= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.2/go.mod h1:6TxbXoDSgBQ225Qd8Q+MbxUxUh6TtNKwbRt/EPS9xso= -github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= -github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= -github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= -github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= -github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8= -github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA= -github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= -github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= -github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= -github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M= -github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM= -github.com/bytedance/mockey v1.2.14 h1:KZaFgPdiUwW+jOWFieo3Lr7INM1P+6adO3hxZhDswY8= -github.com/bytedance/mockey v1.2.14/go.mod h1:1BPHF9sol5R1ud/+0VEHGQq/+i2lN+GTsr3O2Q9IENY= -github.com/bytedance/sonic v1.14.1 h1:FBMC0zVz5XUmE4z9wF4Jey0An5FueFvOsTKKKtwIl7w= -github.com/bytedance/sonic v1.14.1/go.mod h1:gi6uhQLMbTdeP0muCnrjHLeCUPyb70ujhnNlhOylAFc= -github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= -github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= -github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= -github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs= -github.com/charmbracelet/bubbles v0.21.0/go.mod h1:HF+v6QUR4HkEpz62dx7ym2xc71/KBHg+zKwJtMw+qtg= -github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw= -github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4= -github.com/charmbracelet/colorprofile v0.3.2 h1:9J27WdztfJQVAQKX2WOlSSRB+5gaKqqITmrvb1uTIiI= -github.com/charmbracelet/colorprofile v0.3.2/go.mod h1:mTD5XzNeWHj8oqHb+S1bssQb7vIHbepiebQ2kPKVKbI= -github.com/charmbracelet/glamour v0.10.0 h1:MtZvfwsYCx8jEPFJm3rIBFIMZUfUJ765oX8V6kXldcY= -github.com/charmbracelet/glamour v0.10.0/go.mod h1:f+uf+I/ChNmqo087elLnVdCiVgjSKWuXa/l6NU2ndYk= -github.com/charmbracelet/harmonica v0.2.0 h1:8NxJWRWg/bzKqqEaaeFNipOu77YR5t8aSwG4pgaUBiQ= -github.com/charmbracelet/harmonica v0.2.0/go.mod h1:KSri/1RMQOZLbw7AHqgcBycp8pgJnQMYYT8QZRqZ1Ao= -github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 h1:ZR7e0ro+SZZiIZD7msJyA+NjkCNNavuiPBLgerbOziE= -github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834/go.mod h1:aKC/t2arECF6rNOnaKaVU6y4t4ZeHQzqfxedE/VkVhA= -github.com/charmbracelet/x/ansi v0.10.2 h1:ith2ArZS0CJG30cIUfID1LXN7ZFXRCww6RUvAPA+Pzw= -github.com/charmbracelet/x/ansi v0.10.2/go.mod h1:HbLdJjQH4UH4AqA2HpRWuWNluRE6zxJH/yteYEYCFa8= -github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k= -github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= -github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ= -github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= -github.com/charmbracelet/x/exp/slice v0.0.0-20250902204034-1cdc10c66d5b h1:DZ2Li1O0j+wWw6AgEUDrODB7PAIKpmOy65yu1UBPYc4= -github.com/charmbracelet/x/exp/slice v0.0.0-20250902204034-1cdc10c66d5b/go.mod h1:vI5nDVMWi6veaYH+0Fmvpbe/+cv/iJfMntdh+N0+Tms= -github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= -github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= -github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= -github.com/cloudwego/eino v0.5.0-alpha.11 h1:KhjJ8JTAI/Ed5iCHWKUn1v4j1sDCxqV26HRoUQpSRFc= -github.com/cloudwego/eino v0.5.0-alpha.11/go.mod h1:S38tlNO4cNqFfGJKQSJZimxjzc9JDJKdf2eW3FEEfdc= -github.com/cloudwego/eino-ext/components/model/claude v0.1.0 h1:UZVwYzV7gOBCBKHGdAT2fZzm/+2TBEfDDYn713EvLF0= -github.com/cloudwego/eino-ext/components/model/claude v0.1.0/go.mod h1:lacy0WE3yKuOSxrhJQKqWAxn3LiUy/CJ91jU7nLDNNQ= -github.com/cloudwego/eino-ext/components/model/ollama v0.1.2 h1:WxJ+7oXnr3AhM6u4VbFF3L2ionxCrPfmLetx7V+zthw= -github.com/cloudwego/eino-ext/components/model/ollama v0.1.2/go.mod h1:OgGMCiR/G/RnOWaJvdK8pVSxAzoz2SlCqim43oFTuwo= -github.com/cloudwego/eino-ext/components/model/openai v0.0.0-20250903035842-96774a3ec845 h1:nxflfiBwWNPoKS9X4SMhmT+si7rtYv+lQzIyPJik4DM= -github.com/cloudwego/eino-ext/components/model/openai v0.0.0-20250903035842-96774a3ec845/go.mod h1:QQhCuQxuBAVWvu/YAZBhs/RsR76mUigw59Tl0kh04C8= -github.com/cloudwego/eino-ext/libs/acl/openai v0.0.0-20250826113018-8c6f6358d4bb h1:RMslzyijc3bi9EkqCulpS0hZupTl1y/wayR3+fVRN/c= -github.com/cloudwego/eino-ext/libs/acl/openai v0.0.0-20250826113018-8c6f6358d4bb/go.mod h1:fHn/6OqPPY1iLLx9wzz+MEVT5Dl9gwuZte1oLEnCoYw= -github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= -github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= -github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= -github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= -github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= -github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= -github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= -github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/danieljoos/wincred v1.2.3 h1:v7dZC2x32Ut3nEfRH+vhoZGvN72+dQ/snVXo/vMFLdQ= -github.com/danieljoos/wincred v1.2.3/go.mod h1:6qqX0WNrS4RzPZ1tnroDzq9kY3fu1KwE7MRLQK4X0bs= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= -github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= -github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= -github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= -github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= -github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= -github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= -github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= -github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= -github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ= -github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= -github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= -github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= -github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/eino-contrib/jsonschema v1.0.0 h1:dXxbhGNZuI3+xNi8x3JT8AGyoXz6Pff6mRvmpjVl5Ww= -github.com/eino-contrib/jsonschema v1.0.0/go.mod h1:cpnX4SyKjWjGC7iN2EbhxaTdLqGjCi0e9DxpLYxddD4= -github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= -github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= -github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= -github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= -github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= -github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= -github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= -github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= -github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0= -github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= -github.com/getkin/kin-openapi v0.120.0 h1:MqJcNJFrMDFNc07iwE8iFC5eT2k/NPUFDIpNeiZv8Jg= -github.com/getkin/kin-openapi v0.120.0/go.mod h1:PCWw/lfBrJY4HcdqE3jj+QFkaFK8ABoqo7PvqVhXXqw= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= -github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= -github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= -github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rBOesYM= -github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84= -github.com/go-openapi/errors v0.22.4 h1:oi2K9mHTOb5DPW2Zjdzs/NIvwi2N3fARKaTJLdNabaM= -github.com/go-openapi/errors v0.22.4/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= -github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= -github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= -github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc= -github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4= -github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4= -github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY= -github.com/go-openapi/runtime v0.29.2 h1:UmwSGWNmWQqKm1c2MGgXVpC2FTGwPDQeUsBMufc5Yj0= -github.com/go-openapi/runtime v0.29.2/go.mod h1:biq5kJXRJKBJxTDJXAa00DOTa/anflQPhT0/wmjuy+0= -github.com/go-openapi/spec v0.22.1 h1:beZMa5AVQzRspNjvhe5aG1/XyBSMeX1eEOs7dMoXh/k= -github.com/go-openapi/spec v0.22.1/go.mod h1:c7aeIQT175dVowfp7FeCvXXnjN/MrpaONStibD2WtDA= -github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= -github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= -github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= -github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= -github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= -github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= -github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= -github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= -github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= -github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= -github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= -github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= -github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= -github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= -github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= -github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= -github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= -github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= -github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= -github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= -github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= -github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= -github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= -github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= -github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= -github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= -github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= -github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= -github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw= -github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc= -github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= -github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= -github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= -github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= -github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= -github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= -github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= -github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= -github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= -github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= -github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= -github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= -github.com/google/go-github/v73 v73.0.0 h1:aR+Utnh+Y4mMkS+2qLQwcQ/cF9mOTpdwnzlaw//rG24= -github.com/google/go-github/v73 v73.0.0/go.mod h1:fa6w8+/V+edSU0muqdhCVY7Beh1M8F1IlQPZIANKIYw= -github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= -github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/jsonschema-go v0.3.0 h1:6AH2TxVNtk3IlvkkhjrtbUc4S8AvO0Xii0DxIygDg+Q= -github.com/google/jsonschema-go v0.3.0/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e h1:FJta/0WsADCe1r9vQjdHbd3KuiLPu7Y9WlyLGwMUNyE= -github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= -github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= -github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= -github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= -github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= -github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= -github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= -github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= -github.com/goph/emperror v0.17.2 h1:yLapQcmEsO0ipe9p5TaN22djm3OFV/TfM/fcYP0/J18= -github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic= -github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= -github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= -github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= -github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= -github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= -github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= -github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= -github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= -github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= -github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= -github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/vault/api v1.22.0 h1:+HYFquE35/B74fHoIeXlZIP2YADVboaPjaSicHEZiH0= -github.com/hashicorp/vault/api v1.22.0/go.mod h1:IUZA2cDvr4Ok3+NtK2Oq/r+lJeXkeCrHRmqdyWfpmGM= -github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= -github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= -github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= -github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E= -github.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM= -github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= -github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= -github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= -github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= -github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= -github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= -github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= -github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk= -github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= -github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= -github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= -github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= -github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= -github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= -github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= -github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= -github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= -github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= -github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/letsencrypt/boulder v0.20251110.0 h1:J8MnKICeilO91dyQ2n5eBbab24neHzUpYMUIOdOtbjc= -github.com/letsencrypt/boulder v0.20251110.0/go.mod h1:ogKCJQwll82m7OVHWyTuf8eeFCjuzdRQlgnZcCl0V+8= -github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= -github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= -github.com/libp2p/go-libp2p v0.44.0 h1:5Gtt8OrF8yiXmH+Mx4+/iBeFRMK1TY3a8OrEBDEqAvs= -github.com/libp2p/go-libp2p v0.44.0/go.mod h1:NovCojezAt4dnDd4fH048K7PKEqH0UFYYqJRjIIu8zc= -github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag= -github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= -github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= -github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= -github.com/mark3labs/mcp-filesystem-server v0.11.1 h1:7uKIZRMaKWfgvtDj/uLAvo0+7Mwb8gxo5DJywhqFW88= -github.com/mark3labs/mcp-filesystem-server v0.11.1/go.mod h1:xDqJizVYWZ5a31Mt4xuYbVku2AR/kT56H3O0SbpANoQ= -github.com/mark3labs/mcp-go v0.41.1 h1:w78eWfiQam2i8ICL7AL0WFiq7KHNJQ6UB53ZVtH4KGA= -github.com/mark3labs/mcp-go v0.41.1/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g= -github.com/mark3labs/mcphost v0.31.3 h1:v8kWozQXPXHTBKT2GMo1CCtjz5yZWKMJdXSl9awH3pM= -github.com/mark3labs/mcphost v0.31.3/go.mod h1:rJ5SEO4eo+Vs3XfUAJdxgioB+CVXt02sl+37r0Erato= -github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= -github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= -github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= -github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= -github.com/mattn/go-runewidth v0.0.17 h1:78v8ZlW0bP43XfmAfPsdXcoNCelfMHsDmd/pkENfrjQ= -github.com/mattn/go-runewidth v0.0.17/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/meguminnnnnnnnn/go-openai v0.0.0-20250821095446-07791bea23a0 h1:nIohpHs1ViKR0SVgW/cbBstHjmnqFZDM9RqgX9m9Xu8= -github.com/meguminnnnnnnnn/go-openai v0.0.0-20250821095446-07791bea23a0/go.mod h1:qs96ysDmxhE4BZoU45I43zcyfnaYxU3X+aRzLko/htY= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk= -github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA= -github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= -github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= -github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= -github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= -github.com/modelcontextprotocol/go-sdk v0.8.0 h1:jdsBtGzBLY287WKSIjYovOXAqtJkP+HtFQFKrZd4a6c= -github.com/modelcontextprotocol/go-sdk v0.8.0/go.mod h1:nYtYQroQ2KQiM0/SbyEPUWQ6xs4B95gJjEalc9AQyOs= -github.com/modelcontextprotocol/registry v1.2.3 h1:PaQTn7VxJ0xlgiI+OJUHrG7H12x8uP27wepYKJRaD88= -github.com/modelcontextprotocol/registry v1.2.3/go.mod h1:WcvDr/Cn7JS7MHdSsNPVlLZYwfmzG1/3zTtuW23IRCc= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= -github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= -github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI= -github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo= -github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA= -github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= -github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= -github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= -github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= -github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= -github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= -github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= -github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= -github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc= -github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= -github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= -github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo= -github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo= -github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= -github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= -github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= -github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= -github.com/nikolalohinski/gonja v1.5.3 h1:GsA+EEaZDZPGJ8JtpeGN78jidhOlxeJROpqMT9fTj9c= -github.com/nikolalohinski/gonja v1.5.3/go.mod h1:RmjwxNiXAEqcq1HeK5SSMmqFJvKOfTfXhkJv6YBtPa4= -github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= -github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= -github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/ollama/ollama v0.12.9 h1:qvhEcBZtaTTiXoe/elPnKsbf3z0s0bmU9urCIYUkV54= -github.com/ollama/ollama v0.12.9/go.mod h1:9+1//yWPsDE2u+l1a5mpaKrYw4VdnSsRU3ioq5BvMms= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= -github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= -github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= -github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= -github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= -github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= -github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= -github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= -github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= -github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= -github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= -github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= -github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= -github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= -github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= -github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= -github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= -github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= -github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= -github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= -github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= -github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= -github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= -github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= -github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= -github.com/sigstore/cosign/v3 v3.0.3 h1:IknuTUYM+tZ/ToghM7mvg9V0O31NG3rev97u1IJIuYA= -github.com/sigstore/cosign/v3 v3.0.3/go.mod h1:poeQqwvpDNIDyim7a2ljUhonVKpCys+fx3SY0Lkmi/4= -github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= -github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= -github.com/sigstore/rekor v1.4.3 h1:2+aw4Gbgumv8vYM/QVg6b+hvr4x4Cukur8stJrVPKU0= -github.com/sigstore/rekor v1.4.3/go.mod h1:o0zgY087Q21YwohVvGwV9vK1/tliat5mfnPiVI3i75o= -github.com/sigstore/rekor-tiles/v2 v2.0.1 h1:1Wfz15oSRNGF5Dzb0lWn5W8+lfO50ork4PGIfEKjZeo= -github.com/sigstore/rekor-tiles/v2 v2.0.1/go.mod h1:Pjsbhzj5hc3MKY8FfVTYHBUHQEnP0ozC4huatu4x7OU= -github.com/sigstore/sigstore v1.10.0 h1:lQrmdzqlR8p9SCfWIpFoGUqdXEzJSZT2X+lTXOMPaQI= -github.com/sigstore/sigstore v1.10.0/go.mod h1:Ygq+L/y9Bm3YnjpJTlQrOk/gXyrjkpn3/AEJpmk1n9Y= -github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 h1:K8hnZhun6XacjxAdCdxkowSi7+FpmfYnAcMhTXZQyPg= -github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894/go.mod h1:uuR+Edo6P+iwi0HKscycUm8mxXL748nAureqSg6jFLA= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0 h1:UOHpiyezCj5RuixgIvCV3QyuxIGQT+N6nGZEXA7OTTY= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0/go.mod h1:U0CZmA2psabDa8DdiV7yXab0AHODzfKqvD2isH7Hrvw= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0 h1:fq4+8Y4YadxeF8mzhoMRPZ1mVvDYXmI3BfS0vlkPT7M= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0/go.mod h1:u05nqPWY05lmcdHhv2lPaWTH3FGUhJzO7iW2hbboK3Q= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0 h1:iUEf5MZYOuXGnXxdF/WrarJrk0DTVHqeIOjYdtpVXtc= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0/go.mod h1:i6vg5JfEQix46R1rhQlrKmUtJoeH91drltyYOJEk1T4= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0 h1:dUvPv/MP23ZPIXZUW45kvCIgC0ZRfYxEof57AB6bAtU= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0/go.mod h1:fR/gDdPvJWGWL70/NgBBIL1O0/3Wma6JHs3tSSYg3s4= -github.com/sigstore/timestamp-authority/v2 v2.0.3 h1:sRyYNtdED/ttLCMdaYnwpf0zre1A9chvjTnCmWWxN8Y= -github.com/sigstore/timestamp-authority/v2 v2.0.3/go.mod h1:mDaHxkt3HmZYoIlwYj4QWo0RUr7VjYU52aVO5f5Qb3I= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= -github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/slongfield/pyfmt v0.0.0-20220222012616-ea85ff4c361f h1:Z2cODYsUxQPofhpYRMQVwWz4yUVpHF+vPi+eUdruUYI= -github.com/slongfield/pyfmt v0.0.0-20220222012616-ea85ff4c361f/go.mod h1:JqzWyvTuI2X4+9wOHmKSQCYxybB/8j6Ko43qVmXDuZg= -github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= -github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= -github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= -github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= -github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= -github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= -github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= -github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= -github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= -github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= -github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= -github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= -github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= -github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= -github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= -github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= -github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= -github.com/theupdateframework/go-tuf/v2 v2.3.0 h1:gt3X8xT8qu/HT4w+n1jgv+p7koi5ad8XEkLXXZqG9AA= -github.com/theupdateframework/go-tuf/v2 v2.3.0/go.mod h1:xW8yNvgXRncmovMLvBxKwrKpsOwJZu/8x+aB0KtFcdw= -github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= -github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= -github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= -github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= -github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= -github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= -github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= -github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= -github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= -github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= -github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= -github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= -github.com/tink-crypto/tink-go/v2 v2.5.0 h1:B8KLF6AofxdBIE4UJIaFbmoj5/1ehEtt7/MmzfI4Zpw= -github.com/tink-crypto/tink-go/v2 v2.5.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8= -github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= -github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= -github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c h1:5a2XDQ2LiAUV+/RjckMyq9sXudfrPSuCY4FuPC1NyAw= -github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c/go.mod h1:g85IafeFJZLxlzZCDRu4JLpfS7HKzR+Hw9qRh3bVzDI= -github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= -github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= -github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= -github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= -github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= -github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= -github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= -github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= -github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= -github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= -github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= -github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= -github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= -github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= -github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= -github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= -github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= -github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= -github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= -github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= -github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= -github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= -github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= -github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= -github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= -github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= -github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= -github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA= -github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg= -github.com/yuin/goldmark-emoji v1.0.6 h1:QWfF2FYaXwL74tfGOW5izeiZepUDroDJfWubQI9HTHs= -github.com/yuin/goldmark-emoji v1.0.6/go.mod h1:ukxJDKFpdFb5x0a5HqbdlcKtebh086iJpI31LTKmWuA= -github.com/zalando/go-keyring v0.2.6 h1:r7Yc3+H+Ux0+M72zacZoItR3UDxeWfKTcabvkI8ua9s= -github.com/zalando/go-keyring v0.2.6/go.mod h1:2TCrxYrbUNYfNS/Kgy/LSrkSQzZ5UPVH85RwfczwvcI= -gitlab.com/gitlab-org/api/client-go v0.160.0 h1:aMQzbcE8zFe0lR/J+a3zneEgH+/EBFs8rD8Chrr4Snw= -gitlab.com/gitlab-org/api/client-go v0.160.0/go.mod h1:ooCNtKB7OyP7GBa279+HrUS3eeJF6Yi6XABZZy7RTSk= -go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= -go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= -go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= -go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= -go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= -go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= -go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= -go.step.sm/crypto v0.74.0 h1:/APBEv45yYR4qQFg47HA8w1nesIGcxh44pGyQNw6JRA= -go.step.sm/crypto v0.74.0/go.mod h1:UoXqCAJjjRgzPte0Llaqen7O9P7XjPmgjgTHQGkKCDk= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= -go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= -go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= -go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= -go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= -go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= -golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c= -golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= -golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= -golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= -golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= -golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= -golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= -golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= -golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= -golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= -golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= -golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= -gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI= -google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964= -google.golang.org/genai v1.22.0 h1:5hrEhXXWJQZa3tdPocl4vQ/0w6myEAxdNns2Kmx0f4Y= -google.golang.org/genai v1.22.0/go.mod h1:QPj5NGJw+3wEOHg+PrsWwJKvG6UC84ex5FR7qAYsN/M= -google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 h1:LvZVVaPE0JSqL+ZWb6ErZfnEOKIqqFWUJE2D0fObSmc= -google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9/go.mod h1:QFOrLhdAe2PsTp3vQY4quuLKTi9j3XG3r6JPPaw7MSc= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= -google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= -google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= -google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 h1:ExN12ndbJ608cboPYflpTny6mXSzPrDLh0iTaVrRrds= -google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6/go.mod h1:6ytKWczdvnpnO+m+JiG9NjEDzR1FJfsnmJdG7B8QVZ8= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= -google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= -gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= -k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY= -k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw= -k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= -k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= -k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M= -k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE= -k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= -k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= -k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= -k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= -lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= -sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= -sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= -sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= -software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= -software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= +al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= +al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= +buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 h1:THc6uLCGTpU393vVD5Eu5JHUdikvaP1+dqAclQe8pOE= +buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1/go.mod h1:xkbAJMbZuuebIblSFnLrfTpvmfjarhKsIid+Q9snDQ0= +buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 h1:ZObM/Cdu5dZO4ibBXNRSy+rFwG4oV86mYfKbI0Z7AAI= +buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1/go.mod h1:yJHswa2p3J+WxGLpgzuWNWn3I1CIkxdOu80Y/vN5lbE= +cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= +cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= +cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= +cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= +cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= +cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= +cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0 h1:E4MgwLBGeVB5f2MdcIVD3ELVAWpr+WD6MUe1i+tM/PA= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0/go.mod h1:Y2b/1clN4zsAoUd/pgNAQHjLDnTis/6ROkUfyob6psM= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= +github.com/JohannesKaufmann/html-to-markdown v1.6.0 h1:04VXMiE50YYfCfLboJCLcgqF5x+rHJnb1ssNmqpLH/k= +github.com/JohannesKaufmann/html-to-markdown v1.6.0/go.mod h1:NUI78lGg/a7vpEJTz/0uOcYMaibytE4BUOQS8k78yPQ= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/PuerkitoBio/goquery v1.9.2/go.mod h1:GHPCaP0ODyyxqcNoFGYlAprUFH81NuRPd0GX3Zu2Mvk= +github.com/PuerkitoBio/goquery v1.10.3 h1:pFYcNSqHxBD06Fpj/KsbStFRsgRATgnf3LeXiUkhzPo= +github.com/PuerkitoBio/goquery v1.10.3/go.mod h1:tMUX0zDMHXYlAQk6p35XxQMqMweEKB7iK7iLNd4RH4Y= +github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= +github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= +github.com/agntcy/oasf-sdk/pkg v0.0.14 h1:DNKQNf4R4SMDbnaawoSl6FVOBvkSy4O9MyqKd7iHE8I= +github.com/agntcy/oasf-sdk/pkg v0.0.14/go.mod h1:FvcEB49gsvK+JO5i6l/pt5QgTK0LZeR7KYKsdcI6ZIM= +github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o= +github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= +github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/chroma/v2 v2.20.0 h1:sfIHpxPyR07/Oylvmcai3X/exDlE8+FA820NTz+9sGw= +github.com/alecthomas/chroma/v2 v2.20.0/go.mod h1:e7tViK0xh/Nf4BYHl00ycY6rV7b8iXBksI9E359yNmA= +github.com/alecthomas/repr v0.5.1 h1:E3G4t2QbHTSNpPKBgMTln5KLkZHLOcU7r37J4pXBuIg= +github.com/alecthomas/repr v0.5.1/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= +github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM= +github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA= +github.com/anthropics/anthropic-sdk-go v1.10.0 h1:jDKQTfC0miIEj21eMmPrNSLKTNdNa3nHZOhd4wZz1cI= +github.com/anthropics/anthropic-sdk-go v1.10.0/go.mod h1:WTz31rIUHUHqai2UslPpw5CwXrQP3geYBioRV4WOLvE= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= +github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= +github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= +github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= +github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc= +github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 h1:t9yYsydLYNBk9cJ73rgPhPWqOh/52fcWDQB5b1JsKSY= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2/go.mod h1:IusfVNTmiSN3t4rhxWFaBAqn+mcNdwKtPcV16eYdgko= +github.com/aws/aws-sdk-go-v2/config v1.32.2 h1:4liUsdEpUUPZs5WVapsJLx5NPmQhQdez7nYFcovrytk= +github.com/aws/aws-sdk-go-v2/config v1.32.2/go.mod h1:l0hs06IFz1eCT+jTacU/qZtC33nvcnLADAPL/XyrkZI= +github.com/aws/aws-sdk-go-v2/credentials v1.19.2 h1:qZry8VUyTK4VIo5aEdUcBjPZHL2v4FyQ3QEOaWcFLu4= +github.com/aws/aws-sdk-go-v2/credentials v1.19.2/go.mod h1:YUqm5a1/kBnoK+/NY5WEiMocZihKSo15/tJdmdXnM5g= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 h1:WZVR5DbDgxzA0BJeudId89Kmgy6DIU4ORpxwsVHz0qA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14/go.mod h1:Dadl9QO0kHgbrH1GRqGiZdYtW5w+IXXaBNCHTIaheM4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 h1:FIouAnCE46kyYqyhs0XEBDFFSREtdnr8HQuLPQPLCrY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14/go.mod h1:UTwDc5COa5+guonQU8qBikJo1ZJ4ln2r1MkF7Dqag1E= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.1 h1:U0asSZ3ifpuIehDPkRI2rxHbmFUMplDA2VeR9Uogrmw= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.1/go.mod h1:NZo9WJqQ0sxQ1Yqu1IwCHQFQunTms2MlVgejg16S1rY= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 h1:MxMBdKTYBjPQChlJhi4qlEueqB1p1KcbTEa7tD5aqPs= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.2/go.mod h1:iS6EPmNeqCsGo+xQmXv0jIMjyYtQfnwg36zl2FwEouk= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 h1:ksUT5KtgpZd3SAiFJNJ0AFEJVva3gjBmN7eXUZjzUwQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.5/go.mod h1:av+ArJpoYf3pgyrj6tcehSFW+y9/QvAY8kMooR9bZCw= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 h1:GtsxyiF3Nd3JahRBJbxLCCdYW9ltGQYrFWg8XdkGDd8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10/go.mod h1:/j67Z5XBVDx8nZVp9EuFM9/BS5dvBznbqILGuu73hug= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 h1:a5UTtD4mHBU3t0o6aHQZFJTNKVfxFWfPX7J0Lr7G+uY= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.2/go.mod h1:6TxbXoDSgBQ225Qd8Q+MbxUxUh6TtNKwbRt/EPS9xso= +github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= +github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= +github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8= +github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA= +github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= +github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= +github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M= +github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM= +github.com/bytedance/mockey v1.2.14 h1:KZaFgPdiUwW+jOWFieo3Lr7INM1P+6adO3hxZhDswY8= +github.com/bytedance/mockey v1.2.14/go.mod h1:1BPHF9sol5R1ud/+0VEHGQq/+i2lN+GTsr3O2Q9IENY= +github.com/bytedance/sonic v1.14.1 h1:FBMC0zVz5XUmE4z9wF4Jey0An5FueFvOsTKKKtwIl7w= +github.com/bytedance/sonic v1.14.1/go.mod h1:gi6uhQLMbTdeP0muCnrjHLeCUPyb70ujhnNlhOylAFc= +github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= +github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs= +github.com/charmbracelet/bubbles v0.21.0/go.mod h1:HF+v6QUR4HkEpz62dx7ym2xc71/KBHg+zKwJtMw+qtg= +github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw= +github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4= +github.com/charmbracelet/colorprofile v0.3.2 h1:9J27WdztfJQVAQKX2WOlSSRB+5gaKqqITmrvb1uTIiI= +github.com/charmbracelet/colorprofile v0.3.2/go.mod h1:mTD5XzNeWHj8oqHb+S1bssQb7vIHbepiebQ2kPKVKbI= +github.com/charmbracelet/glamour v0.10.0 h1:MtZvfwsYCx8jEPFJm3rIBFIMZUfUJ765oX8V6kXldcY= +github.com/charmbracelet/glamour v0.10.0/go.mod h1:f+uf+I/ChNmqo087elLnVdCiVgjSKWuXa/l6NU2ndYk= +github.com/charmbracelet/harmonica v0.2.0 h1:8NxJWRWg/bzKqqEaaeFNipOu77YR5t8aSwG4pgaUBiQ= +github.com/charmbracelet/harmonica v0.2.0/go.mod h1:KSri/1RMQOZLbw7AHqgcBycp8pgJnQMYYT8QZRqZ1Ao= +github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 h1:ZR7e0ro+SZZiIZD7msJyA+NjkCNNavuiPBLgerbOziE= +github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834/go.mod h1:aKC/t2arECF6rNOnaKaVU6y4t4ZeHQzqfxedE/VkVhA= +github.com/charmbracelet/x/ansi v0.10.2 h1:ith2ArZS0CJG30cIUfID1LXN7ZFXRCww6RUvAPA+Pzw= +github.com/charmbracelet/x/ansi v0.10.2/go.mod h1:HbLdJjQH4UH4AqA2HpRWuWNluRE6zxJH/yteYEYCFa8= +github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k= +github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= +github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ= +github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= +github.com/charmbracelet/x/exp/slice v0.0.0-20250902204034-1cdc10c66d5b h1:DZ2Li1O0j+wWw6AgEUDrODB7PAIKpmOy65yu1UBPYc4= +github.com/charmbracelet/x/exp/slice v0.0.0-20250902204034-1cdc10c66d5b/go.mod h1:vI5nDVMWi6veaYH+0Fmvpbe/+cv/iJfMntdh+N0+Tms= +github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= +github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= +github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= +github.com/cloudwego/eino v0.5.0-alpha.11 h1:KhjJ8JTAI/Ed5iCHWKUn1v4j1sDCxqV26HRoUQpSRFc= +github.com/cloudwego/eino v0.5.0-alpha.11/go.mod h1:S38tlNO4cNqFfGJKQSJZimxjzc9JDJKdf2eW3FEEfdc= +github.com/cloudwego/eino-ext/components/model/claude v0.1.0 h1:UZVwYzV7gOBCBKHGdAT2fZzm/+2TBEfDDYn713EvLF0= +github.com/cloudwego/eino-ext/components/model/claude v0.1.0/go.mod h1:lacy0WE3yKuOSxrhJQKqWAxn3LiUy/CJ91jU7nLDNNQ= +github.com/cloudwego/eino-ext/components/model/ollama v0.1.2 h1:WxJ+7oXnr3AhM6u4VbFF3L2ionxCrPfmLetx7V+zthw= +github.com/cloudwego/eino-ext/components/model/ollama v0.1.2/go.mod h1:OgGMCiR/G/RnOWaJvdK8pVSxAzoz2SlCqim43oFTuwo= +github.com/cloudwego/eino-ext/components/model/openai v0.0.0-20250903035842-96774a3ec845 h1:nxflfiBwWNPoKS9X4SMhmT+si7rtYv+lQzIyPJik4DM= +github.com/cloudwego/eino-ext/components/model/openai v0.0.0-20250903035842-96774a3ec845/go.mod h1:QQhCuQxuBAVWvu/YAZBhs/RsR76mUigw59Tl0kh04C8= +github.com/cloudwego/eino-ext/libs/acl/openai v0.0.0-20250826113018-8c6f6358d4bb h1:RMslzyijc3bi9EkqCulpS0hZupTl1y/wayR3+fVRN/c= +github.com/cloudwego/eino-ext/libs/acl/openai v0.0.0-20250826113018-8c6f6358d4bb/go.mod h1:fHn/6OqPPY1iLLx9wzz+MEVT5Dl9gwuZte1oLEnCoYw= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= +github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= +github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= +github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= +github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/danieljoos/wincred v1.2.3 h1:v7dZC2x32Ut3nEfRH+vhoZGvN72+dQ/snVXo/vMFLdQ= +github.com/danieljoos/wincred v1.2.3/go.mod h1:6qqX0WNrS4RzPZ1tnroDzq9kY3fu1KwE7MRLQK4X0bs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= +github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= +github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= +github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= +github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ= +github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= +github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= +github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/eino-contrib/jsonschema v1.0.0 h1:dXxbhGNZuI3+xNi8x3JT8AGyoXz6Pff6mRvmpjVl5Ww= +github.com/eino-contrib/jsonschema v1.0.0/go.mod h1:cpnX4SyKjWjGC7iN2EbhxaTdLqGjCi0e9DxpLYxddD4= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= +github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0= +github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/getkin/kin-openapi v0.120.0 h1:MqJcNJFrMDFNc07iwE8iFC5eT2k/NPUFDIpNeiZv8Jg= +github.com/getkin/kin-openapi v0.120.0/go.mod h1:PCWw/lfBrJY4HcdqE3jj+QFkaFK8ABoqo7PvqVhXXqw= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rBOesYM= +github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84= +github.com/go-openapi/errors v0.22.4 h1:oi2K9mHTOb5DPW2Zjdzs/NIvwi2N3fARKaTJLdNabaM= +github.com/go-openapi/errors v0.22.4/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= +github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= +github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= +github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc= +github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4= +github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4= +github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY= +github.com/go-openapi/runtime v0.29.2 h1:UmwSGWNmWQqKm1c2MGgXVpC2FTGwPDQeUsBMufc5Yj0= +github.com/go-openapi/runtime v0.29.2/go.mod h1:biq5kJXRJKBJxTDJXAa00DOTa/anflQPhT0/wmjuy+0= +github.com/go-openapi/spec v0.22.1 h1:beZMa5AVQzRspNjvhe5aG1/XyBSMeX1eEOs7dMoXh/k= +github.com/go-openapi/spec v0.22.1/go.mod h1:c7aeIQT175dVowfp7FeCvXXnjN/MrpaONStibD2WtDA= +github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= +github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= +github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= +github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= +github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= +github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= +github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= +github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= +github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= +github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= +github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= +github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= +github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= +github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= +github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= +github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= +github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= +github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= +github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= +github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= +github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= +github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw= +github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc= +github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= +github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= +github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= +github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= +github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= +github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= +github.com/google/go-github/v73 v73.0.0 h1:aR+Utnh+Y4mMkS+2qLQwcQ/cF9mOTpdwnzlaw//rG24= +github.com/google/go-github/v73 v73.0.0/go.mod h1:fa6w8+/V+edSU0muqdhCVY7Beh1M8F1IlQPZIANKIYw= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/jsonschema-go v0.3.0 h1:6AH2TxVNtk3IlvkkhjrtbUc4S8AvO0Xii0DxIygDg+Q= +github.com/google/jsonschema-go v0.3.0/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e h1:FJta/0WsADCe1r9vQjdHbd3KuiLPu7Y9WlyLGwMUNyE= +github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= +github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= +github.com/goph/emperror v0.17.2 h1:yLapQcmEsO0ipe9p5TaN22djm3OFV/TfM/fcYP0/J18= +github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic= +github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= +github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= +github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/vault/api v1.22.0 h1:+HYFquE35/B74fHoIeXlZIP2YADVboaPjaSicHEZiH0= +github.com/hashicorp/vault/api v1.22.0/go.mod h1:IUZA2cDvr4Ok3+NtK2Oq/r+lJeXkeCrHRmqdyWfpmGM= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E= +github.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM= +github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= +github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= +github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= +github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= +github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= +github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= +github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk= +github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= +github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= +github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= +github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= +github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/letsencrypt/boulder v0.20251110.0 h1:J8MnKICeilO91dyQ2n5eBbab24neHzUpYMUIOdOtbjc= +github.com/letsencrypt/boulder v0.20251110.0/go.mod h1:ogKCJQwll82m7OVHWyTuf8eeFCjuzdRQlgnZcCl0V+8= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-libp2p v0.44.0 h1:5Gtt8OrF8yiXmH+Mx4+/iBeFRMK1TY3a8OrEBDEqAvs= +github.com/libp2p/go-libp2p v0.44.0/go.mod h1:NovCojezAt4dnDd4fH048K7PKEqH0UFYYqJRjIIu8zc= +github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag= +github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mark3labs/mcp-filesystem-server v0.11.1 h1:7uKIZRMaKWfgvtDj/uLAvo0+7Mwb8gxo5DJywhqFW88= +github.com/mark3labs/mcp-filesystem-server v0.11.1/go.mod h1:xDqJizVYWZ5a31Mt4xuYbVku2AR/kT56H3O0SbpANoQ= +github.com/mark3labs/mcp-go v0.41.1 h1:w78eWfiQam2i8ICL7AL0WFiq7KHNJQ6UB53ZVtH4KGA= +github.com/mark3labs/mcp-go v0.41.1/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g= +github.com/mark3labs/mcphost v0.31.3 h1:v8kWozQXPXHTBKT2GMo1CCtjz5yZWKMJdXSl9awH3pM= +github.com/mark3labs/mcphost v0.31.3/go.mod h1:rJ5SEO4eo+Vs3XfUAJdxgioB+CVXt02sl+37r0Erato= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= +github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-runewidth v0.0.17 h1:78v8ZlW0bP43XfmAfPsdXcoNCelfMHsDmd/pkENfrjQ= +github.com/mattn/go-runewidth v0.0.17/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/meguminnnnnnnnn/go-openai v0.0.0-20250821095446-07791bea23a0 h1:nIohpHs1ViKR0SVgW/cbBstHjmnqFZDM9RqgX9m9Xu8= +github.com/meguminnnnnnnnn/go-openai v0.0.0-20250821095446-07791bea23a0/go.mod h1:qs96ysDmxhE4BZoU45I43zcyfnaYxU3X+aRzLko/htY= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk= +github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA= +github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/modelcontextprotocol/go-sdk v0.8.0 h1:jdsBtGzBLY287WKSIjYovOXAqtJkP+HtFQFKrZd4a6c= +github.com/modelcontextprotocol/go-sdk v0.8.0/go.mod h1:nYtYQroQ2KQiM0/SbyEPUWQ6xs4B95gJjEalc9AQyOs= +github.com/modelcontextprotocol/registry v1.2.3 h1:PaQTn7VxJ0xlgiI+OJUHrG7H12x8uP27wepYKJRaD88= +github.com/modelcontextprotocol/registry v1.2.3/go.mod h1:WcvDr/Cn7JS7MHdSsNPVlLZYwfmzG1/3zTtuW23IRCc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI= +github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo= +github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA= +github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= +github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= +github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= +github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= +github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc= +github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo= +github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= +github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= +github.com/nikolalohinski/gonja v1.5.3 h1:GsA+EEaZDZPGJ8JtpeGN78jidhOlxeJROpqMT9fTj9c= +github.com/nikolalohinski/gonja v1.5.3/go.mod h1:RmjwxNiXAEqcq1HeK5SSMmqFJvKOfTfXhkJv6YBtPa4= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/ollama/ollama v0.12.9 h1:qvhEcBZtaTTiXoe/elPnKsbf3z0s0bmU9urCIYUkV54= +github.com/ollama/ollama v0.12.9/go.mod h1:9+1//yWPsDE2u+l1a5mpaKrYw4VdnSsRU3ioq5BvMms= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= +github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= +github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= +github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= +github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= +github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= +github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= +github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= +github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= +github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= +github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= +github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= +github.com/sigstore/cosign/v3 v3.0.3 h1:IknuTUYM+tZ/ToghM7mvg9V0O31NG3rev97u1IJIuYA= +github.com/sigstore/cosign/v3 v3.0.3/go.mod h1:poeQqwvpDNIDyim7a2ljUhonVKpCys+fx3SY0Lkmi/4= +github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= +github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.4.3 h1:2+aw4Gbgumv8vYM/QVg6b+hvr4x4Cukur8stJrVPKU0= +github.com/sigstore/rekor v1.4.3/go.mod h1:o0zgY087Q21YwohVvGwV9vK1/tliat5mfnPiVI3i75o= +github.com/sigstore/rekor-tiles/v2 v2.0.1 h1:1Wfz15oSRNGF5Dzb0lWn5W8+lfO50ork4PGIfEKjZeo= +github.com/sigstore/rekor-tiles/v2 v2.0.1/go.mod h1:Pjsbhzj5hc3MKY8FfVTYHBUHQEnP0ozC4huatu4x7OU= +github.com/sigstore/sigstore v1.10.0 h1:lQrmdzqlR8p9SCfWIpFoGUqdXEzJSZT2X+lTXOMPaQI= +github.com/sigstore/sigstore v1.10.0/go.mod h1:Ygq+L/y9Bm3YnjpJTlQrOk/gXyrjkpn3/AEJpmk1n9Y= +github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 h1:K8hnZhun6XacjxAdCdxkowSi7+FpmfYnAcMhTXZQyPg= +github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894/go.mod h1:uuR+Edo6P+iwi0HKscycUm8mxXL748nAureqSg6jFLA= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0 h1:UOHpiyezCj5RuixgIvCV3QyuxIGQT+N6nGZEXA7OTTY= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0/go.mod h1:U0CZmA2psabDa8DdiV7yXab0AHODzfKqvD2isH7Hrvw= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0 h1:fq4+8Y4YadxeF8mzhoMRPZ1mVvDYXmI3BfS0vlkPT7M= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0/go.mod h1:u05nqPWY05lmcdHhv2lPaWTH3FGUhJzO7iW2hbboK3Q= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0 h1:iUEf5MZYOuXGnXxdF/WrarJrk0DTVHqeIOjYdtpVXtc= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0/go.mod h1:i6vg5JfEQix46R1rhQlrKmUtJoeH91drltyYOJEk1T4= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0 h1:dUvPv/MP23ZPIXZUW45kvCIgC0ZRfYxEof57AB6bAtU= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0/go.mod h1:fR/gDdPvJWGWL70/NgBBIL1O0/3Wma6JHs3tSSYg3s4= +github.com/sigstore/timestamp-authority/v2 v2.0.3 h1:sRyYNtdED/ttLCMdaYnwpf0zre1A9chvjTnCmWWxN8Y= +github.com/sigstore/timestamp-authority/v2 v2.0.3/go.mod h1:mDaHxkt3HmZYoIlwYj4QWo0RUr7VjYU52aVO5f5Qb3I= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/slongfield/pyfmt v0.0.0-20220222012616-ea85ff4c361f h1:Z2cODYsUxQPofhpYRMQVwWz4yUVpHF+vPi+eUdruUYI= +github.com/slongfield/pyfmt v0.0.0-20220222012616-ea85ff4c361f/go.mod h1:JqzWyvTuI2X4+9wOHmKSQCYxybB/8j6Ko43qVmXDuZg= +github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= +github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= +github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= +github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= +github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= +github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= +github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= +github.com/theupdateframework/go-tuf/v2 v2.3.0 h1:gt3X8xT8qu/HT4w+n1jgv+p7koi5ad8XEkLXXZqG9AA= +github.com/theupdateframework/go-tuf/v2 v2.3.0/go.mod h1:xW8yNvgXRncmovMLvBxKwrKpsOwJZu/8x+aB0KtFcdw= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= +github.com/tink-crypto/tink-go/v2 v2.5.0 h1:B8KLF6AofxdBIE4UJIaFbmoj5/1ehEtt7/MmzfI4Zpw= +github.com/tink-crypto/tink-go/v2 v2.5.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c h1:5a2XDQ2LiAUV+/RjckMyq9sXudfrPSuCY4FuPC1NyAw= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c/go.mod h1:g85IafeFJZLxlzZCDRu4JLpfS7HKzR+Hw9qRh3bVzDI= +github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= +github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= +github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= +github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= +github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= +github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= +github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= +github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= +github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= +github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= +github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= +github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= +github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= +github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= +github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= +github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= +github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= +github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= +github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= +github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA= +github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg= +github.com/yuin/goldmark-emoji v1.0.6 h1:QWfF2FYaXwL74tfGOW5izeiZepUDroDJfWubQI9HTHs= +github.com/yuin/goldmark-emoji v1.0.6/go.mod h1:ukxJDKFpdFb5x0a5HqbdlcKtebh086iJpI31LTKmWuA= +github.com/zalando/go-keyring v0.2.6 h1:r7Yc3+H+Ux0+M72zacZoItR3UDxeWfKTcabvkI8ua9s= +github.com/zalando/go-keyring v0.2.6/go.mod h1:2TCrxYrbUNYfNS/Kgy/LSrkSQzZ5UPVH85RwfczwvcI= +gitlab.com/gitlab-org/api/client-go v0.160.0 h1:aMQzbcE8zFe0lR/J+a3zneEgH+/EBFs8rD8Chrr4Snw= +gitlab.com/gitlab-org/api/client-go v0.160.0/go.mod h1:ooCNtKB7OyP7GBa279+HrUS3eeJF6Yi6XABZZy7RTSk= +go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= +go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.step.sm/crypto v0.74.0 h1:/APBEv45yYR4qQFg47HA8w1nesIGcxh44pGyQNw6JRA= +go.step.sm/crypto v0.74.0/go.mod h1:UoXqCAJjjRgzPte0Llaqen7O9P7XjPmgjgTHQGkKCDk= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= +go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c= +golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= +golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI= +google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964= +google.golang.org/genai v1.22.0 h1:5hrEhXXWJQZa3tdPocl4vQ/0w6myEAxdNns2Kmx0f4Y= +google.golang.org/genai v1.22.0/go.mod h1:QPj5NGJw+3wEOHg+PrsWwJKvG6UC84ex5FR7qAYsN/M= +google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 h1:LvZVVaPE0JSqL+ZWb6ErZfnEOKIqqFWUJE2D0fObSmc= +google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9/go.mod h1:QFOrLhdAe2PsTp3vQY4quuLKTi9j3XG3r6JPPaw7MSc= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 h1:ExN12ndbJ608cboPYflpTny6mXSzPrDLh0iTaVrRrds= +google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6/go.mod h1:6ytKWczdvnpnO+m+JiG9NjEDzR1FJfsnmJdG7B8QVZ8= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY= +k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw= +k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= +k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M= +k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/cli/presenter/output.go b/cli/presenter/output.go index 0196b27cc..4344570f7 100644 --- a/cli/presenter/output.go +++ b/cli/presenter/output.go @@ -1,164 +1,164 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package presenter - -import ( - "encoding/json" - "fmt" - "reflect" - - "github.com/spf13/cobra" -) - -// OutputFormat represents the different output formats available. -type OutputFormat string - -const ( - // FormatHuman is the default human-readable output format. - FormatHuman OutputFormat = "human" - // FormatJSON is pretty-printed JSON format with indentation. - FormatJSON OutputFormat = "json" - // FormatJSONL is newline-delimited JSON format (one object per line, no indentation). - FormatJSONL OutputFormat = "jsonl" - // FormatRaw outputs only raw values (CIDs, IDs, etc.) without formatting. - FormatRaw OutputFormat = "raw" -) - -// OutputOptions holds the output formatting options. -type OutputOptions struct { - Format OutputFormat -} - -// IsStructuredOutput returns true if the output format is structured (json, jsonl, or raw). -// Structured outputs route metadata to stderr instead of stdout. -func (o OutputOptions) IsStructuredOutput() bool { - return o.Format == FormatJSON || o.Format == FormatJSONL || o.Format == FormatRaw -} - -// GetOutputOptions extracts output format options from command flags. -func GetOutputOptions(cmd *cobra.Command) OutputOptions { - opts := OutputOptions{ - Format: FormatHuman, // Default to human-readable - } - - // Check for --output flag - if outputFlag, err := cmd.Flags().GetString("output"); err == nil && outputFlag != "" { - opts.Format = OutputFormat(outputFlag) - } - - return opts -} - -// AddOutputFlags adds the standard --output flag to a command. -func AddOutputFlags(cmd *cobra.Command) { - cmd.Flags().StringP("output", "o", "human", "Output format: human|json|jsonl|raw") -} - -// PrintMessage outputs data in the appropriate format based on command flags. -func PrintMessage(cmd *cobra.Command, title, message string, value any) error { - opts := GetOutputOptions(cmd) - - // Handle empty case for multiple values - if value == nil || isEmptySlice(value) { - if opts.IsStructuredOutput() { - // For structured output, print empty array to stdout - Print(cmd, "[]\n") - } else { - // For human format, print descriptive message - Println(cmd, fmt.Sprintf("No %s found", title)) - } - - return nil - } - - switch opts.Format { - case FormatRaw: - // For raw format, output just the value - Print(cmd, fmt.Sprintf("%v", value)) - - return nil - - case FormatJSON: - // For JSON format, output the value as JSON with indentation - output, err := json.MarshalIndent(value, "", " ") - if err != nil { - return fmt.Errorf("failed to marshal JSON: %w", err) - } - - Print(cmd, string(output)) - Print(cmd, "\n") - - return nil - - case FormatJSONL: - // For JSONL format, output newline-delimited JSON - return printJSONL(cmd, value) - - case FormatHuman: - // For human-readable format, output with descriptive message - Println(cmd, fmt.Sprintf("%s: %s", message, fmt.Sprintf("%v", value))) - - return nil - } - - return nil -} - -// printJSONL outputs data in newline-delimited JSON format (one object per line, no indentation). -func printJSONL(cmd *cobra.Command, value any) error { - // Handle single object - if !isSliceOrArray(value) { - output, err := json.Marshal(value) - if err != nil { - return fmt.Errorf("failed to marshal JSON: %w", err) - } - - Printf(cmd, "%s\n", string(output)) - - return nil - } - - // Handle array/slice - print each element on a separate line - v := reflect.ValueOf(value) - for i := range v.Len() { - output, err := json.Marshal(v.Index(i).Interface()) - if err != nil { - return fmt.Errorf("failed to marshal JSON: %w", err) - } - - Printf(cmd, "%s\n", string(output)) - } - - return nil -} - -// isSliceOrArray returns true if the value is a slice or array. -func isSliceOrArray(value any) bool { - if value == nil { - return false - } - - v := reflect.ValueOf(value) - - return v.Kind() == reflect.Slice || v.Kind() == reflect.Array -} - -// isEmptySlice returns true if the value is an empty slice. -func isEmptySlice(value any) bool { - if value == nil { - return false - } - - if slice, ok := value.([]interface{}); ok && len(slice) == 0 { - return true - } - - // Check using reflection for other slice types - v := reflect.ValueOf(value) - if v.Kind() == reflect.Slice || v.Kind() == reflect.Array { - return v.Len() == 0 - } - - return false -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package presenter + +import ( + "encoding/json" + "fmt" + "reflect" + + "github.com/spf13/cobra" +) + +// OutputFormat represents the different output formats available. +type OutputFormat string + +const ( + // FormatHuman is the default human-readable output format. + FormatHuman OutputFormat = "human" + // FormatJSON is pretty-printed JSON format with indentation. + FormatJSON OutputFormat = "json" + // FormatJSONL is newline-delimited JSON format (one object per line, no indentation). + FormatJSONL OutputFormat = "jsonl" + // FormatRaw outputs only raw values (CIDs, IDs, etc.) without formatting. + FormatRaw OutputFormat = "raw" +) + +// OutputOptions holds the output formatting options. +type OutputOptions struct { + Format OutputFormat +} + +// IsStructuredOutput returns true if the output format is structured (json, jsonl, or raw). +// Structured outputs route metadata to stderr instead of stdout. +func (o OutputOptions) IsStructuredOutput() bool { + return o.Format == FormatJSON || o.Format == FormatJSONL || o.Format == FormatRaw +} + +// GetOutputOptions extracts output format options from command flags. +func GetOutputOptions(cmd *cobra.Command) OutputOptions { + opts := OutputOptions{ + Format: FormatHuman, // Default to human-readable + } + + // Check for --output flag + if outputFlag, err := cmd.Flags().GetString("output"); err == nil && outputFlag != "" { + opts.Format = OutputFormat(outputFlag) + } + + return opts +} + +// AddOutputFlags adds the standard --output flag to a command. +func AddOutputFlags(cmd *cobra.Command) { + cmd.Flags().StringP("output", "o", "human", "Output format: human|json|jsonl|raw") +} + +// PrintMessage outputs data in the appropriate format based on command flags. +func PrintMessage(cmd *cobra.Command, title, message string, value any) error { + opts := GetOutputOptions(cmd) + + // Handle empty case for multiple values + if value == nil || isEmptySlice(value) { + if opts.IsStructuredOutput() { + // For structured output, print empty array to stdout + Print(cmd, "[]\n") + } else { + // For human format, print descriptive message + Println(cmd, fmt.Sprintf("No %s found", title)) + } + + return nil + } + + switch opts.Format { + case FormatRaw: + // For raw format, output just the value + Print(cmd, fmt.Sprintf("%v", value)) + + return nil + + case FormatJSON: + // For JSON format, output the value as JSON with indentation + output, err := json.MarshalIndent(value, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal JSON: %w", err) + } + + Print(cmd, string(output)) + Print(cmd, "\n") + + return nil + + case FormatJSONL: + // For JSONL format, output newline-delimited JSON + return printJSONL(cmd, value) + + case FormatHuman: + // For human-readable format, output with descriptive message + Println(cmd, fmt.Sprintf("%s: %s", message, fmt.Sprintf("%v", value))) + + return nil + } + + return nil +} + +// printJSONL outputs data in newline-delimited JSON format (one object per line, no indentation). +func printJSONL(cmd *cobra.Command, value any) error { + // Handle single object + if !isSliceOrArray(value) { + output, err := json.Marshal(value) + if err != nil { + return fmt.Errorf("failed to marshal JSON: %w", err) + } + + Printf(cmd, "%s\n", string(output)) + + return nil + } + + // Handle array/slice - print each element on a separate line + v := reflect.ValueOf(value) + for i := range v.Len() { + output, err := json.Marshal(v.Index(i).Interface()) + if err != nil { + return fmt.Errorf("failed to marshal JSON: %w", err) + } + + Printf(cmd, "%s\n", string(output)) + } + + return nil +} + +// isSliceOrArray returns true if the value is a slice or array. +func isSliceOrArray(value any) bool { + if value == nil { + return false + } + + v := reflect.ValueOf(value) + + return v.Kind() == reflect.Slice || v.Kind() == reflect.Array +} + +// isEmptySlice returns true if the value is an empty slice. +func isEmptySlice(value any) bool { + if value == nil { + return false + } + + if slice, ok := value.([]interface{}); ok && len(slice) == 0 { + return true + } + + // Check using reflection for other slice types + v := reflect.ValueOf(value) + if v.Kind() == reflect.Slice || v.Kind() == reflect.Array { + return v.Len() == 0 + } + + return false +} diff --git a/cli/presenter/output_test.go b/cli/presenter/output_test.go index 1a41f752f..5776afd03 100644 --- a/cli/presenter/output_test.go +++ b/cli/presenter/output_test.go @@ -1,641 +1,641 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package presenter - -import ( - "bytes" - "encoding/json" - "testing" - - "github.com/spf13/cobra" -) - -// Test constants for frequently used values with semantic meaning. -const ( - outputFlagName = "output" - outputShortFlagName = "o" - emptyJSONArray = "[]\n" - defaultFormatValue = "human" - flagNotFoundMsg = "--output flag not found" - shortFlagNotFoundMsg = "short flag -o not found" -) - -func TestGetOutputOptions(t *testing.T) { - tests := []struct { - name string - flagValue string - expectedFormat OutputFormat - }{ - { - name: "default format", - flagValue: "", - expectedFormat: FormatHuman, - }, - { - name: "human format", - flagValue: string(FormatHuman), - expectedFormat: FormatHuman, - }, - { - name: "json format", - flagValue: string(FormatJSON), - expectedFormat: FormatJSON, - }, - { - name: "jsonl format", - flagValue: string(FormatJSONL), - expectedFormat: FormatJSONL, - }, - { - name: "raw format", - flagValue: string(FormatRaw), - expectedFormat: FormatRaw, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cmd := &cobra.Command{} - AddOutputFlags(cmd) - - if tt.flagValue != "" { - if err := cmd.Flags().Set(outputFlagName, tt.flagValue); err != nil { - t.Fatalf("failed to set flag: %v", err) - } - } - - opts := GetOutputOptions(cmd) - if opts.Format != tt.expectedFormat { - t.Errorf("expected format %q, got %q", tt.expectedFormat, opts.Format) - } - }) - } -} - -func TestIsStructuredOutput(t *testing.T) { - tests := []struct { - name string - format OutputFormat - isStructed bool - }{ - { - name: "human is not structured", - format: FormatHuman, - isStructed: false, - }, - { - name: "json is structured", - format: FormatJSON, - isStructed: true, - }, - { - name: "jsonl is structured", - format: FormatJSONL, - isStructed: true, - }, - { - name: "raw is structured", - format: FormatRaw, - isStructed: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - opts := OutputOptions{Format: tt.format} - if got := opts.IsStructuredOutput(); got != tt.isStructed { - t.Errorf("IsStructuredOutput() = %v, want %v", got, tt.isStructed) - } - }) - } -} - -func TestPrintMessageHumanFormat(t *testing.T) { - tests := []struct { - name string - title string - message string - value any - expected string - }{ - { - name: "simple string value", - title: "result", - message: "Found result", - value: "test-value", - expected: "Found result: test-value\n", - }, - { - name: "nil value", - title: "results", - message: "Found results", - value: nil, - expected: "No results found\n", - }, - { - name: "empty slice", - title: "items", - message: "Found items", - value: []interface{}{}, - expected: "No items found\n", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var buf bytes.Buffer - - cmd := &cobra.Command{} - cmd.SetOut(&buf) - AddOutputFlags(cmd) - - err := PrintMessage(cmd, tt.title, tt.message, tt.value) - if err != nil { - t.Fatalf("PrintMessage() error = %v", err) - } - - if got := buf.String(); got != tt.expected { - t.Errorf("PrintMessage() output = %q, want %q", got, tt.expected) - } - }) - } -} - -func TestPrintMessageJSONFormat(t *testing.T) { - tests := []struct { - name string - value any - expected string - }{ - { - name: "simple object", - value: map[string]string{"key": "value"}, - expected: `{ - "key": "value" -} -`, - }, - { - name: "array of objects", - value: []map[string]string{{"id": "1"}, {"id": "2"}}, - expected: `[ - { - "id": "1" - }, - { - "id": "2" - } -] -`, - }, - { - name: "nil value", - value: nil, - expected: emptyJSONArray, - }, - { - name: "empty slice", - value: []interface{}{}, - expected: emptyJSONArray, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var buf bytes.Buffer - - cmd := &cobra.Command{} - cmd.SetOut(&buf) - AddOutputFlags(cmd) - - if err := cmd.Flags().Set(outputFlagName, string(FormatJSON)); err != nil { - t.Fatalf("failed to set flag: %v", err) - } - - err := PrintMessage(cmd, "test", "Test", tt.value) - if err != nil { - t.Fatalf("PrintMessage() error = %v", err) - } - - if got := buf.String(); got != tt.expected { - t.Errorf("PrintMessage() output = %q, want %q", got, tt.expected) - } - }) - } -} - -func TestPrintMessageJSONLFormat(t *testing.T) { - tests := []struct { - name string - value any - expected string - }{ - { - name: "single object", - value: map[string]string{"key": "value"}, - expected: "{\"key\":\"value\"}\n", - }, - { - name: "array of objects", - value: []map[string]string{{"id": "1"}, {"id": "2"}}, - expected: "{\"id\":\"1\"}\n{\"id\":\"2\"}\n", - }, - { - name: "array of strings", - value: []string{"a", "b", "c"}, - expected: "\"a\"\n\"b\"\n\"c\"\n", - }, - { - name: "nil value", - value: nil, - expected: emptyJSONArray, - }, - { - name: "empty slice", - value: []interface{}{}, - expected: emptyJSONArray, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var buf bytes.Buffer - - cmd := &cobra.Command{} - cmd.SetOut(&buf) - AddOutputFlags(cmd) - - if err := cmd.Flags().Set(outputFlagName, string(FormatJSONL)); err != nil { - t.Fatalf("failed to set flag: %v", err) - } - - err := PrintMessage(cmd, "test", "Test", tt.value) - if err != nil { - t.Fatalf("PrintMessage() error = %v", err) - } - - if got := buf.String(); got != tt.expected { - t.Errorf("PrintMessage() output = %q, want %q", got, tt.expected) - } - }) - } -} - -func TestPrintMessageRawFormat(t *testing.T) { - tests := []struct { - name string - value any - expected string - }{ - { - name: "string value", - value: "test-cid-123", - expected: "test-cid-123", - }, - { - name: "slice of strings", - value: []string{"cid1", "cid2"}, - expected: "[cid1 cid2]", - }, - { - name: "nil value", - value: nil, - expected: emptyJSONArray, - }, - { - name: "empty slice", - value: []interface{}{}, - expected: emptyJSONArray, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var buf bytes.Buffer - - cmd := &cobra.Command{} - cmd.SetOut(&buf) - AddOutputFlags(cmd) - - if err := cmd.Flags().Set(outputFlagName, string(FormatRaw)); err != nil { - t.Fatalf("failed to set flag: %v", err) - } - - err := PrintMessage(cmd, "test", "Test", tt.value) - if err != nil { - t.Fatalf("PrintMessage() error = %v", err) - } - - if got := buf.String(); got != tt.expected { - t.Errorf("PrintMessage() output = %q, want %q", got, tt.expected) - } - }) - } -} - -func TestPrintJSONL(t *testing.T) { - tests := []struct { - name string - value any - expected string - }{ - { - name: "single object", - value: map[string]string{"key": "value"}, - expected: "{\"key\":\"value\"}\n", - }, - { - name: "array of objects", - value: []map[string]int{{"count": 1}, {"count": 2}}, - expected: "{\"count\":1}\n{\"count\":2}\n", - }, - { - name: "complex nested object", - value: map[string]interface{}{ - "id": "123", - "tags": []string{"a", "b"}, - }, - expected: "{\"id\":\"123\",\"tags\":[\"a\",\"b\"]}\n", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var buf bytes.Buffer - - cmd := &cobra.Command{} - cmd.SetOut(&buf) - - err := printJSONL(cmd, tt.value) - if err != nil { - t.Fatalf("printJSONL() error = %v", err) - } - - if got := buf.String(); got != tt.expected { - t.Errorf("printJSONL() output = %q, want %q", got, tt.expected) - } - - // Verify each line is valid JSON - lines := bytes.Split(bytes.TrimSpace(buf.Bytes()), []byte("\n")) - for i, line := range lines { - var v interface{} - if err := json.Unmarshal(line, &v); err != nil { - t.Errorf("line %d is not valid JSON: %v", i, err) - } - } - }) - } -} - -func TestIsSliceOrArray(t *testing.T) { - tests := []struct { - name string - value any - expected bool - }{ - { - name: "nil value", - value: nil, - expected: false, - }, - { - name: "string value", - value: "test", - expected: false, - }, - { - name: "int value", - value: 42, - expected: false, - }, - { - name: "map value", - value: map[string]string{"key": "value"}, - expected: false, - }, - { - name: "slice of strings", - value: []string{"a", "b"}, - expected: true, - }, - { - name: "slice of interfaces", - value: []interface{}{"a", 1}, - expected: true, - }, - { - name: "array of ints", - value: [3]int{1, 2, 3}, - expected: true, - }, - { - name: "empty slice", - value: []string{}, - expected: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := isSliceOrArray(tt.value); got != tt.expected { - t.Errorf("isSliceOrArray() = %v, want %v", got, tt.expected) - } - }) - } -} - -func TestIsEmptySlice(t *testing.T) { - tests := []struct { - name string - value any - expected bool - }{ - { - name: "nil value", - value: nil, - expected: false, - }, - { - name: "non-slice value", - value: "test", - expected: false, - }, - { - name: "empty interface slice", - value: []interface{}{}, - expected: true, - }, - { - name: "non-empty interface slice", - value: []interface{}{"item"}, - expected: false, - }, - { - name: "empty string slice", - value: []string{}, - expected: true, - }, - { - name: "non-empty string slice", - value: []string{"item"}, - expected: false, - }, - { - name: "empty array", - value: [0]int{}, - expected: true, - }, - { - name: "non-empty array", - value: [1]int{42}, - expected: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := isEmptySlice(tt.value); got != tt.expected { - t.Errorf("isEmptySlice() = %v, want %v", got, tt.expected) - } - }) - } -} - -func TestAddOutputFlags(t *testing.T) { - cmd := &cobra.Command{} - AddOutputFlags(cmd) - - // Check flag exists - flag := cmd.Flags().Lookup(outputFlagName) - if flag == nil { - t.Fatal(flagNotFoundMsg) - } - - // Check short flag exists - if shortFlag := cmd.Flags().ShorthandLookup(outputShortFlagName); shortFlag == nil { - t.Error(shortFlagNotFoundMsg) - } - - // Check default value - if flag.DefValue != defaultFormatValue { - t.Errorf("default value = %q, want %q", flag.DefValue, defaultFormatValue) - } - - // Check usage message contains all formats - usage := flag.Usage - - formats := []string{string(FormatHuman), string(FormatJSON), string(FormatJSONL), string(FormatRaw)} - for _, format := range formats { - if !containsSubstring(usage, format) { - t.Errorf("usage message missing format %q", format) - } - } -} - -func TestPrintMessageMarshalError(t *testing.T) { - // Create a value that can't be marshaled to JSON - invalidValue := make(chan int) - - var buf bytes.Buffer - - cmd := &cobra.Command{} - cmd.SetOut(&buf) - AddOutputFlags(cmd) - - if err := cmd.Flags().Set(outputFlagName, string(FormatJSON)); err != nil { - t.Fatalf("failed to set flag: %v", err) - } - - err := PrintMessage(cmd, "test", "Test", invalidValue) - if err == nil { - t.Error("PrintMessage() expected error for invalid JSON, got nil") - } -} - -func TestPrintMessageJSONLMarshalError_SingleObject(t *testing.T) { - // Create a value that can't be marshaled to JSON (channel) - invalidValue := make(chan int) - - var buf bytes.Buffer - - cmd := &cobra.Command{} - cmd.SetOut(&buf) - AddOutputFlags(cmd) - - if err := cmd.Flags().Set(outputFlagName, string(FormatJSONL)); err != nil { - t.Fatalf("failed to set flag: %v", err) - } - - err := PrintMessage(cmd, "test", "Test", invalidValue) - if err == nil { - t.Error("PrintMessage() expected error for invalid JSONL single object, got nil") - } - - if err != nil && !containsSubstring(err.Error(), "failed to marshal JSON") { - t.Errorf("PrintMessage() error = %v, should contain 'failed to marshal JSON'", err) - } -} - -func TestPrintMessageJSONLMarshalError_ArrayElement(t *testing.T) { - // Create an array with an element that can't be marshaled to JSON - invalidValue := []interface{}{ - map[string]interface{}{"id": 1, "name": "valid"}, - make(chan int), // This will cause marshal error - } - - var buf bytes.Buffer - - cmd := &cobra.Command{} - cmd.SetOut(&buf) - AddOutputFlags(cmd) - - if err := cmd.Flags().Set(outputFlagName, string(FormatJSONL)); err != nil { - t.Fatalf("failed to set flag: %v", err) - } - - err := PrintMessage(cmd, "test", "Test", invalidValue) - if err == nil { - t.Error("PrintMessage() expected error for invalid JSONL array element, got nil") - } - - if err != nil && !containsSubstring(err.Error(), "failed to marshal JSON") { - t.Errorf("PrintMessage() error = %v, should contain 'failed to marshal JSON'", err) - } -} - -func TestPrintMessageInvalidFormat(t *testing.T) { - // Test with an invalid format (not one of the defined constants) - var buf bytes.Buffer - - cmd := &cobra.Command{} - cmd.SetOut(&buf) - AddOutputFlags(cmd) - - // Set an invalid format value directly - if err := cmd.Flags().Set(outputFlagName, "invalid-format"); err != nil { - t.Fatalf("failed to set flag: %v", err) - } - - // Should handle gracefully (return nil without error) - err := PrintMessage(cmd, "test", "Test", "value") - if err != nil { - t.Errorf("PrintMessage() with invalid format should not error, got: %v", err) - } -} - -// Helper function. -func containsSubstring(s, substr string) bool { - return len(s) > 0 && len(substr) > 0 && len(s) >= len(substr) && - (s == substr || len(s) > len(substr) && findInString(s, substr)) -} - -func findInString(s, substr string) bool { - for i := 0; i <= len(s)-len(substr); i++ { - if s[i:i+len(substr)] == substr { - return true - } - } - - return false -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package presenter + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/spf13/cobra" +) + +// Test constants for frequently used values with semantic meaning. +const ( + outputFlagName = "output" + outputShortFlagName = "o" + emptyJSONArray = "[]\n" + defaultFormatValue = "human" + flagNotFoundMsg = "--output flag not found" + shortFlagNotFoundMsg = "short flag -o not found" +) + +func TestGetOutputOptions(t *testing.T) { + tests := []struct { + name string + flagValue string + expectedFormat OutputFormat + }{ + { + name: "default format", + flagValue: "", + expectedFormat: FormatHuman, + }, + { + name: "human format", + flagValue: string(FormatHuman), + expectedFormat: FormatHuman, + }, + { + name: "json format", + flagValue: string(FormatJSON), + expectedFormat: FormatJSON, + }, + { + name: "jsonl format", + flagValue: string(FormatJSONL), + expectedFormat: FormatJSONL, + }, + { + name: "raw format", + flagValue: string(FormatRaw), + expectedFormat: FormatRaw, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cmd := &cobra.Command{} + AddOutputFlags(cmd) + + if tt.flagValue != "" { + if err := cmd.Flags().Set(outputFlagName, tt.flagValue); err != nil { + t.Fatalf("failed to set flag: %v", err) + } + } + + opts := GetOutputOptions(cmd) + if opts.Format != tt.expectedFormat { + t.Errorf("expected format %q, got %q", tt.expectedFormat, opts.Format) + } + }) + } +} + +func TestIsStructuredOutput(t *testing.T) { + tests := []struct { + name string + format OutputFormat + isStructed bool + }{ + { + name: "human is not structured", + format: FormatHuman, + isStructed: false, + }, + { + name: "json is structured", + format: FormatJSON, + isStructed: true, + }, + { + name: "jsonl is structured", + format: FormatJSONL, + isStructed: true, + }, + { + name: "raw is structured", + format: FormatRaw, + isStructed: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + opts := OutputOptions{Format: tt.format} + if got := opts.IsStructuredOutput(); got != tt.isStructed { + t.Errorf("IsStructuredOutput() = %v, want %v", got, tt.isStructed) + } + }) + } +} + +func TestPrintMessageHumanFormat(t *testing.T) { + tests := []struct { + name string + title string + message string + value any + expected string + }{ + { + name: "simple string value", + title: "result", + message: "Found result", + value: "test-value", + expected: "Found result: test-value\n", + }, + { + name: "nil value", + title: "results", + message: "Found results", + value: nil, + expected: "No results found\n", + }, + { + name: "empty slice", + title: "items", + message: "Found items", + value: []interface{}{}, + expected: "No items found\n", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var buf bytes.Buffer + + cmd := &cobra.Command{} + cmd.SetOut(&buf) + AddOutputFlags(cmd) + + err := PrintMessage(cmd, tt.title, tt.message, tt.value) + if err != nil { + t.Fatalf("PrintMessage() error = %v", err) + } + + if got := buf.String(); got != tt.expected { + t.Errorf("PrintMessage() output = %q, want %q", got, tt.expected) + } + }) + } +} + +func TestPrintMessageJSONFormat(t *testing.T) { + tests := []struct { + name string + value any + expected string + }{ + { + name: "simple object", + value: map[string]string{"key": "value"}, + expected: `{ + "key": "value" +} +`, + }, + { + name: "array of objects", + value: []map[string]string{{"id": "1"}, {"id": "2"}}, + expected: `[ + { + "id": "1" + }, + { + "id": "2" + } +] +`, + }, + { + name: "nil value", + value: nil, + expected: emptyJSONArray, + }, + { + name: "empty slice", + value: []interface{}{}, + expected: emptyJSONArray, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var buf bytes.Buffer + + cmd := &cobra.Command{} + cmd.SetOut(&buf) + AddOutputFlags(cmd) + + if err := cmd.Flags().Set(outputFlagName, string(FormatJSON)); err != nil { + t.Fatalf("failed to set flag: %v", err) + } + + err := PrintMessage(cmd, "test", "Test", tt.value) + if err != nil { + t.Fatalf("PrintMessage() error = %v", err) + } + + if got := buf.String(); got != tt.expected { + t.Errorf("PrintMessage() output = %q, want %q", got, tt.expected) + } + }) + } +} + +func TestPrintMessageJSONLFormat(t *testing.T) { + tests := []struct { + name string + value any + expected string + }{ + { + name: "single object", + value: map[string]string{"key": "value"}, + expected: "{\"key\":\"value\"}\n", + }, + { + name: "array of objects", + value: []map[string]string{{"id": "1"}, {"id": "2"}}, + expected: "{\"id\":\"1\"}\n{\"id\":\"2\"}\n", + }, + { + name: "array of strings", + value: []string{"a", "b", "c"}, + expected: "\"a\"\n\"b\"\n\"c\"\n", + }, + { + name: "nil value", + value: nil, + expected: emptyJSONArray, + }, + { + name: "empty slice", + value: []interface{}{}, + expected: emptyJSONArray, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var buf bytes.Buffer + + cmd := &cobra.Command{} + cmd.SetOut(&buf) + AddOutputFlags(cmd) + + if err := cmd.Flags().Set(outputFlagName, string(FormatJSONL)); err != nil { + t.Fatalf("failed to set flag: %v", err) + } + + err := PrintMessage(cmd, "test", "Test", tt.value) + if err != nil { + t.Fatalf("PrintMessage() error = %v", err) + } + + if got := buf.String(); got != tt.expected { + t.Errorf("PrintMessage() output = %q, want %q", got, tt.expected) + } + }) + } +} + +func TestPrintMessageRawFormat(t *testing.T) { + tests := []struct { + name string + value any + expected string + }{ + { + name: "string value", + value: "test-cid-123", + expected: "test-cid-123", + }, + { + name: "slice of strings", + value: []string{"cid1", "cid2"}, + expected: "[cid1 cid2]", + }, + { + name: "nil value", + value: nil, + expected: emptyJSONArray, + }, + { + name: "empty slice", + value: []interface{}{}, + expected: emptyJSONArray, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var buf bytes.Buffer + + cmd := &cobra.Command{} + cmd.SetOut(&buf) + AddOutputFlags(cmd) + + if err := cmd.Flags().Set(outputFlagName, string(FormatRaw)); err != nil { + t.Fatalf("failed to set flag: %v", err) + } + + err := PrintMessage(cmd, "test", "Test", tt.value) + if err != nil { + t.Fatalf("PrintMessage() error = %v", err) + } + + if got := buf.String(); got != tt.expected { + t.Errorf("PrintMessage() output = %q, want %q", got, tt.expected) + } + }) + } +} + +func TestPrintJSONL(t *testing.T) { + tests := []struct { + name string + value any + expected string + }{ + { + name: "single object", + value: map[string]string{"key": "value"}, + expected: "{\"key\":\"value\"}\n", + }, + { + name: "array of objects", + value: []map[string]int{{"count": 1}, {"count": 2}}, + expected: "{\"count\":1}\n{\"count\":2}\n", + }, + { + name: "complex nested object", + value: map[string]interface{}{ + "id": "123", + "tags": []string{"a", "b"}, + }, + expected: "{\"id\":\"123\",\"tags\":[\"a\",\"b\"]}\n", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var buf bytes.Buffer + + cmd := &cobra.Command{} + cmd.SetOut(&buf) + + err := printJSONL(cmd, tt.value) + if err != nil { + t.Fatalf("printJSONL() error = %v", err) + } + + if got := buf.String(); got != tt.expected { + t.Errorf("printJSONL() output = %q, want %q", got, tt.expected) + } + + // Verify each line is valid JSON + lines := bytes.Split(bytes.TrimSpace(buf.Bytes()), []byte("\n")) + for i, line := range lines { + var v interface{} + if err := json.Unmarshal(line, &v); err != nil { + t.Errorf("line %d is not valid JSON: %v", i, err) + } + } + }) + } +} + +func TestIsSliceOrArray(t *testing.T) { + tests := []struct { + name string + value any + expected bool + }{ + { + name: "nil value", + value: nil, + expected: false, + }, + { + name: "string value", + value: "test", + expected: false, + }, + { + name: "int value", + value: 42, + expected: false, + }, + { + name: "map value", + value: map[string]string{"key": "value"}, + expected: false, + }, + { + name: "slice of strings", + value: []string{"a", "b"}, + expected: true, + }, + { + name: "slice of interfaces", + value: []interface{}{"a", 1}, + expected: true, + }, + { + name: "array of ints", + value: [3]int{1, 2, 3}, + expected: true, + }, + { + name: "empty slice", + value: []string{}, + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := isSliceOrArray(tt.value); got != tt.expected { + t.Errorf("isSliceOrArray() = %v, want %v", got, tt.expected) + } + }) + } +} + +func TestIsEmptySlice(t *testing.T) { + tests := []struct { + name string + value any + expected bool + }{ + { + name: "nil value", + value: nil, + expected: false, + }, + { + name: "non-slice value", + value: "test", + expected: false, + }, + { + name: "empty interface slice", + value: []interface{}{}, + expected: true, + }, + { + name: "non-empty interface slice", + value: []interface{}{"item"}, + expected: false, + }, + { + name: "empty string slice", + value: []string{}, + expected: true, + }, + { + name: "non-empty string slice", + value: []string{"item"}, + expected: false, + }, + { + name: "empty array", + value: [0]int{}, + expected: true, + }, + { + name: "non-empty array", + value: [1]int{42}, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := isEmptySlice(tt.value); got != tt.expected { + t.Errorf("isEmptySlice() = %v, want %v", got, tt.expected) + } + }) + } +} + +func TestAddOutputFlags(t *testing.T) { + cmd := &cobra.Command{} + AddOutputFlags(cmd) + + // Check flag exists + flag := cmd.Flags().Lookup(outputFlagName) + if flag == nil { + t.Fatal(flagNotFoundMsg) + } + + // Check short flag exists + if shortFlag := cmd.Flags().ShorthandLookup(outputShortFlagName); shortFlag == nil { + t.Error(shortFlagNotFoundMsg) + } + + // Check default value + if flag.DefValue != defaultFormatValue { + t.Errorf("default value = %q, want %q", flag.DefValue, defaultFormatValue) + } + + // Check usage message contains all formats + usage := flag.Usage + + formats := []string{string(FormatHuman), string(FormatJSON), string(FormatJSONL), string(FormatRaw)} + for _, format := range formats { + if !containsSubstring(usage, format) { + t.Errorf("usage message missing format %q", format) + } + } +} + +func TestPrintMessageMarshalError(t *testing.T) { + // Create a value that can't be marshaled to JSON + invalidValue := make(chan int) + + var buf bytes.Buffer + + cmd := &cobra.Command{} + cmd.SetOut(&buf) + AddOutputFlags(cmd) + + if err := cmd.Flags().Set(outputFlagName, string(FormatJSON)); err != nil { + t.Fatalf("failed to set flag: %v", err) + } + + err := PrintMessage(cmd, "test", "Test", invalidValue) + if err == nil { + t.Error("PrintMessage() expected error for invalid JSON, got nil") + } +} + +func TestPrintMessageJSONLMarshalError_SingleObject(t *testing.T) { + // Create a value that can't be marshaled to JSON (channel) + invalidValue := make(chan int) + + var buf bytes.Buffer + + cmd := &cobra.Command{} + cmd.SetOut(&buf) + AddOutputFlags(cmd) + + if err := cmd.Flags().Set(outputFlagName, string(FormatJSONL)); err != nil { + t.Fatalf("failed to set flag: %v", err) + } + + err := PrintMessage(cmd, "test", "Test", invalidValue) + if err == nil { + t.Error("PrintMessage() expected error for invalid JSONL single object, got nil") + } + + if err != nil && !containsSubstring(err.Error(), "failed to marshal JSON") { + t.Errorf("PrintMessage() error = %v, should contain 'failed to marshal JSON'", err) + } +} + +func TestPrintMessageJSONLMarshalError_ArrayElement(t *testing.T) { + // Create an array with an element that can't be marshaled to JSON + invalidValue := []interface{}{ + map[string]interface{}{"id": 1, "name": "valid"}, + make(chan int), // This will cause marshal error + } + + var buf bytes.Buffer + + cmd := &cobra.Command{} + cmd.SetOut(&buf) + AddOutputFlags(cmd) + + if err := cmd.Flags().Set(outputFlagName, string(FormatJSONL)); err != nil { + t.Fatalf("failed to set flag: %v", err) + } + + err := PrintMessage(cmd, "test", "Test", invalidValue) + if err == nil { + t.Error("PrintMessage() expected error for invalid JSONL array element, got nil") + } + + if err != nil && !containsSubstring(err.Error(), "failed to marshal JSON") { + t.Errorf("PrintMessage() error = %v, should contain 'failed to marshal JSON'", err) + } +} + +func TestPrintMessageInvalidFormat(t *testing.T) { + // Test with an invalid format (not one of the defined constants) + var buf bytes.Buffer + + cmd := &cobra.Command{} + cmd.SetOut(&buf) + AddOutputFlags(cmd) + + // Set an invalid format value directly + if err := cmd.Flags().Set(outputFlagName, "invalid-format"); err != nil { + t.Fatalf("failed to set flag: %v", err) + } + + // Should handle gracefully (return nil without error) + err := PrintMessage(cmd, "test", "Test", "value") + if err != nil { + t.Errorf("PrintMessage() with invalid format should not error, got: %v", err) + } +} + +// Helper function. +func containsSubstring(s, substr string) bool { + return len(s) > 0 && len(substr) > 0 && len(s) >= len(substr) && + (s == substr || len(s) > len(substr) && findInString(s, substr)) +} + +func findInString(s, substr string) bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + + return false +} diff --git a/cli/presenter/presenter.go b/cli/presenter/presenter.go index b18156dcc..463b8737b 100644 --- a/cli/presenter/presenter.go +++ b/cli/presenter/presenter.go @@ -1,47 +1,47 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package presenter - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -// Print writes to stdout. -func Print(cmd *cobra.Command, args ...interface{}) { - _, _ = fmt.Fprint(cmd.OutOrStdout(), args...) -} - -// Println writes to stdout with a newline. -func Println(cmd *cobra.Command, args ...interface{}) { - _, _ = fmt.Fprintln(cmd.OutOrStdout(), args...) -} - -// Printf writes formatted output to stdout. -func Printf(cmd *cobra.Command, format string, args ...interface{}) { - _, _ = fmt.Fprintf(cmd.OutOrStdout(), format, args...) -} - -// PrintSmartf writes formatted output to stdout for human format, -// or to stderr for structured formats (json, jsonl, raw). -// Use this for metadata messages that should not pollute structured output. -func PrintSmartf(cmd *cobra.Command, format string, args ...interface{}) { - opts := GetOutputOptions(cmd) - if opts.IsStructuredOutput() { - Errorf(cmd, format, args...) - } else { - Printf(cmd, format, args...) - } -} - -// Error writes to stderr. -func Error(cmd *cobra.Command, args ...interface{}) { - _, _ = fmt.Fprint(cmd.ErrOrStderr(), args...) -} - -// Errorf writes formatted output to stderr. -func Errorf(cmd *cobra.Command, format string, args ...interface{}) { - _, _ = fmt.Fprintf(cmd.ErrOrStderr(), format, args...) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package presenter + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +// Print writes to stdout. +func Print(cmd *cobra.Command, args ...interface{}) { + _, _ = fmt.Fprint(cmd.OutOrStdout(), args...) +} + +// Println writes to stdout with a newline. +func Println(cmd *cobra.Command, args ...interface{}) { + _, _ = fmt.Fprintln(cmd.OutOrStdout(), args...) +} + +// Printf writes formatted output to stdout. +func Printf(cmd *cobra.Command, format string, args ...interface{}) { + _, _ = fmt.Fprintf(cmd.OutOrStdout(), format, args...) +} + +// PrintSmartf writes formatted output to stdout for human format, +// or to stderr for structured formats (json, jsonl, raw). +// Use this for metadata messages that should not pollute structured output. +func PrintSmartf(cmd *cobra.Command, format string, args ...interface{}) { + opts := GetOutputOptions(cmd) + if opts.IsStructuredOutput() { + Errorf(cmd, format, args...) + } else { + Printf(cmd, format, args...) + } +} + +// Error writes to stderr. +func Error(cmd *cobra.Command, args ...interface{}) { + _, _ = fmt.Fprint(cmd.ErrOrStderr(), args...) +} + +// Errorf writes formatted output to stderr. +func Errorf(cmd *cobra.Command, format string, args ...interface{}) { + _, _ = fmt.Fprintf(cmd.ErrOrStderr(), format, args...) +} diff --git a/cli/presenter/presenter_test.go b/cli/presenter/presenter_test.go index abd70df59..518b461cc 100644 --- a/cli/presenter/presenter_test.go +++ b/cli/presenter/presenter_test.go @@ -1,271 +1,271 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package presenter - -import ( - "bytes" - "testing" - - "github.com/spf13/cobra" -) - -// Test constants for frequently used values with semantic meaning. -const ( - emptyString = "" -) - -func TestPrint(t *testing.T) { - var buf bytes.Buffer - - cmd := &cobra.Command{} - cmd.SetOut(&buf) - - Print(cmd, "test message") - - expected := "test message" - if got := buf.String(); got != expected { - t.Errorf("Print() = %q, want %q", got, expected) - } -} - -func TestPrintln(t *testing.T) { - var buf bytes.Buffer - - cmd := &cobra.Command{} - cmd.SetOut(&buf) - - Println(cmd, "test message") - - expected := "test message\n" - if got := buf.String(); got != expected { - t.Errorf("Println() = %q, want %q", got, expected) - } -} - -func TestPrintf(t *testing.T) { - var buf bytes.Buffer - - cmd := &cobra.Command{} - cmd.SetOut(&buf) - - Printf(cmd, "hello %s, count: %d", "world", 42) - - expected := "hello world, count: 42" - if got := buf.String(); got != expected { - t.Errorf("Printf() = %q, want %q", got, expected) - } -} - -func TestError(t *testing.T) { - var buf bytes.Buffer - - cmd := &cobra.Command{} - cmd.SetErr(&buf) - - Error(cmd, "error message") - - expected := "error message" - if got := buf.String(); got != expected { - t.Errorf("Error() = %q, want %q", got, expected) - } -} - -func TestErrorf(t *testing.T) { - var buf bytes.Buffer - - cmd := &cobra.Command{} - cmd.SetErr(&buf) - - Errorf(cmd, "error: %s (code: %d)", "failed", 500) - - expected := "error: failed (code: 500)" - if got := buf.String(); got != expected { - t.Errorf("Errorf() = %q, want %q", got, expected) - } -} - -func TestPrintSmartfHumanFormat(t *testing.T) { - var ( - stdoutBuf bytes.Buffer - stderrBuf bytes.Buffer - ) - - cmd := &cobra.Command{} - cmd.SetOut(&stdoutBuf) - cmd.SetErr(&stderrBuf) - AddOutputFlags(cmd) - - // Human format (default) - should write to stdout - PrintSmartf(cmd, "metadata message\n") - - expected := "metadata message\n" - if got := stdoutBuf.String(); got != expected { - t.Errorf("PrintSmartf(human) stdout = %q, want %q", got, expected) - } - - if got := stderrBuf.String(); got != emptyString { - t.Errorf("PrintSmartf(human) stderr = %q, want %q", got, emptyString) - } -} - -func TestPrintSmartfJSONFormat(t *testing.T) { - var ( - stdoutBuf bytes.Buffer - stderrBuf bytes.Buffer - ) - - cmd := &cobra.Command{} - cmd.SetOut(&stdoutBuf) - cmd.SetErr(&stderrBuf) - AddOutputFlags(cmd) - - // Set JSON format - should write to stderr - if err := cmd.Flags().Set("output", string(FormatJSON)); err != nil { - t.Fatalf("failed to set flag: %v", err) - } - - PrintSmartf(cmd, "metadata message\n") - - expected := "metadata message\n" - - if got := stdoutBuf.String(); got != emptyString { - t.Errorf("PrintSmartf(json) stdout = %q, want %q", got, emptyString) - } - - if got := stderrBuf.String(); got != expected { - t.Errorf("PrintSmartf(json) stderr = %q, want %q", got, expected) - } -} - -func TestPrintSmartfJSONLFormat(t *testing.T) { - var ( - stdoutBuf bytes.Buffer - stderrBuf bytes.Buffer - ) - - cmd := &cobra.Command{} - cmd.SetOut(&stdoutBuf) - cmd.SetErr(&stderrBuf) - AddOutputFlags(cmd) - - // Set JSONL format - should write to stderr - if err := cmd.Flags().Set("output", string(FormatJSONL)); err != nil { - t.Fatalf("failed to set flag: %v", err) - } - - PrintSmartf(cmd, "Listening to events...\n") - - expected := "Listening to events...\n" - - if got := stdoutBuf.String(); got != emptyString { - t.Errorf("PrintSmartf(jsonl) stdout = %q, want %q", got, emptyString) - } - - if got := stderrBuf.String(); got != expected { - t.Errorf("PrintSmartf(jsonl) stderr = %q, want %q", got, expected) - } -} - -func TestPrintSmartfRawFormat(t *testing.T) { - var ( - stdoutBuf bytes.Buffer - stderrBuf bytes.Buffer - ) - - cmd := &cobra.Command{} - cmd.SetOut(&stdoutBuf) - cmd.SetErr(&stderrBuf) - AddOutputFlags(cmd) - - // Set raw format - should write to stderr - if err := cmd.Flags().Set("output", string(FormatRaw)); err != nil { - t.Fatalf("failed to set flag: %v", err) - } - - PrintSmartf(cmd, "Processing...\n") - - expected := "Processing...\n" - - if got := stdoutBuf.String(); got != emptyString { - t.Errorf("PrintSmartf(raw) stdout = %q, want %q", got, emptyString) - } - - if got := stderrBuf.String(); got != expected { - t.Errorf("PrintSmartf(raw) stderr = %q, want %q", got, expected) - } -} - -func TestPrintSmartfWithFormatting(t *testing.T) { - tests := []struct { - name string - outputFormat string - format string - args []interface{} - expectedStdout string - expectedStderr string - }{ - { - name: "human with formatting", - outputFormat: string(FormatHuman), - format: "Found %d items in %s\n", - args: []interface{}{42, "database"}, - expectedStdout: "Found 42 items in database\n", - expectedStderr: emptyString, - }, - { - name: "json with formatting", - outputFormat: string(FormatJSON), - format: "Processing item %d of %d\n", - args: []interface{}{5, 10}, - expectedStdout: emptyString, - expectedStderr: "Processing item 5 of 10\n", - }, - { - name: "jsonl with formatting", - outputFormat: string(FormatJSONL), - format: "CID: %s, Status: %s\n", - args: []interface{}{"bafy123", "complete"}, - expectedStdout: emptyString, - expectedStderr: "CID: bafy123, Status: complete\n", - }, - { - name: "raw with formatting", - outputFormat: string(FormatRaw), - format: "Sync ID: %s\n", - args: []interface{}{"sync-abc-123"}, - expectedStdout: emptyString, - expectedStderr: "Sync ID: sync-abc-123\n", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var ( - stdoutBuf bytes.Buffer - stderrBuf bytes.Buffer - ) - - cmd := &cobra.Command{} - cmd.SetOut(&stdoutBuf) - cmd.SetErr(&stderrBuf) - AddOutputFlags(cmd) - - if tt.outputFormat != string(FormatHuman) { - if err := cmd.Flags().Set("output", tt.outputFormat); err != nil { - t.Fatalf("failed to set flag: %v", err) - } - } - - PrintSmartf(cmd, tt.format, tt.args...) - - if got := stdoutBuf.String(); got != tt.expectedStdout { - t.Errorf("PrintSmartf() stdout = %q, want %q", got, tt.expectedStdout) - } - - if got := stderrBuf.String(); got != tt.expectedStderr { - t.Errorf("PrintSmartf() stderr = %q, want %q", got, tt.expectedStderr) - } - }) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package presenter + +import ( + "bytes" + "testing" + + "github.com/spf13/cobra" +) + +// Test constants for frequently used values with semantic meaning. +const ( + emptyString = "" +) + +func TestPrint(t *testing.T) { + var buf bytes.Buffer + + cmd := &cobra.Command{} + cmd.SetOut(&buf) + + Print(cmd, "test message") + + expected := "test message" + if got := buf.String(); got != expected { + t.Errorf("Print() = %q, want %q", got, expected) + } +} + +func TestPrintln(t *testing.T) { + var buf bytes.Buffer + + cmd := &cobra.Command{} + cmd.SetOut(&buf) + + Println(cmd, "test message") + + expected := "test message\n" + if got := buf.String(); got != expected { + t.Errorf("Println() = %q, want %q", got, expected) + } +} + +func TestPrintf(t *testing.T) { + var buf bytes.Buffer + + cmd := &cobra.Command{} + cmd.SetOut(&buf) + + Printf(cmd, "hello %s, count: %d", "world", 42) + + expected := "hello world, count: 42" + if got := buf.String(); got != expected { + t.Errorf("Printf() = %q, want %q", got, expected) + } +} + +func TestError(t *testing.T) { + var buf bytes.Buffer + + cmd := &cobra.Command{} + cmd.SetErr(&buf) + + Error(cmd, "error message") + + expected := "error message" + if got := buf.String(); got != expected { + t.Errorf("Error() = %q, want %q", got, expected) + } +} + +func TestErrorf(t *testing.T) { + var buf bytes.Buffer + + cmd := &cobra.Command{} + cmd.SetErr(&buf) + + Errorf(cmd, "error: %s (code: %d)", "failed", 500) + + expected := "error: failed (code: 500)" + if got := buf.String(); got != expected { + t.Errorf("Errorf() = %q, want %q", got, expected) + } +} + +func TestPrintSmartfHumanFormat(t *testing.T) { + var ( + stdoutBuf bytes.Buffer + stderrBuf bytes.Buffer + ) + + cmd := &cobra.Command{} + cmd.SetOut(&stdoutBuf) + cmd.SetErr(&stderrBuf) + AddOutputFlags(cmd) + + // Human format (default) - should write to stdout + PrintSmartf(cmd, "metadata message\n") + + expected := "metadata message\n" + if got := stdoutBuf.String(); got != expected { + t.Errorf("PrintSmartf(human) stdout = %q, want %q", got, expected) + } + + if got := stderrBuf.String(); got != emptyString { + t.Errorf("PrintSmartf(human) stderr = %q, want %q", got, emptyString) + } +} + +func TestPrintSmartfJSONFormat(t *testing.T) { + var ( + stdoutBuf bytes.Buffer + stderrBuf bytes.Buffer + ) + + cmd := &cobra.Command{} + cmd.SetOut(&stdoutBuf) + cmd.SetErr(&stderrBuf) + AddOutputFlags(cmd) + + // Set JSON format - should write to stderr + if err := cmd.Flags().Set("output", string(FormatJSON)); err != nil { + t.Fatalf("failed to set flag: %v", err) + } + + PrintSmartf(cmd, "metadata message\n") + + expected := "metadata message\n" + + if got := stdoutBuf.String(); got != emptyString { + t.Errorf("PrintSmartf(json) stdout = %q, want %q", got, emptyString) + } + + if got := stderrBuf.String(); got != expected { + t.Errorf("PrintSmartf(json) stderr = %q, want %q", got, expected) + } +} + +func TestPrintSmartfJSONLFormat(t *testing.T) { + var ( + stdoutBuf bytes.Buffer + stderrBuf bytes.Buffer + ) + + cmd := &cobra.Command{} + cmd.SetOut(&stdoutBuf) + cmd.SetErr(&stderrBuf) + AddOutputFlags(cmd) + + // Set JSONL format - should write to stderr + if err := cmd.Flags().Set("output", string(FormatJSONL)); err != nil { + t.Fatalf("failed to set flag: %v", err) + } + + PrintSmartf(cmd, "Listening to events...\n") + + expected := "Listening to events...\n" + + if got := stdoutBuf.String(); got != emptyString { + t.Errorf("PrintSmartf(jsonl) stdout = %q, want %q", got, emptyString) + } + + if got := stderrBuf.String(); got != expected { + t.Errorf("PrintSmartf(jsonl) stderr = %q, want %q", got, expected) + } +} + +func TestPrintSmartfRawFormat(t *testing.T) { + var ( + stdoutBuf bytes.Buffer + stderrBuf bytes.Buffer + ) + + cmd := &cobra.Command{} + cmd.SetOut(&stdoutBuf) + cmd.SetErr(&stderrBuf) + AddOutputFlags(cmd) + + // Set raw format - should write to stderr + if err := cmd.Flags().Set("output", string(FormatRaw)); err != nil { + t.Fatalf("failed to set flag: %v", err) + } + + PrintSmartf(cmd, "Processing...\n") + + expected := "Processing...\n" + + if got := stdoutBuf.String(); got != emptyString { + t.Errorf("PrintSmartf(raw) stdout = %q, want %q", got, emptyString) + } + + if got := stderrBuf.String(); got != expected { + t.Errorf("PrintSmartf(raw) stderr = %q, want %q", got, expected) + } +} + +func TestPrintSmartfWithFormatting(t *testing.T) { + tests := []struct { + name string + outputFormat string + format string + args []interface{} + expectedStdout string + expectedStderr string + }{ + { + name: "human with formatting", + outputFormat: string(FormatHuman), + format: "Found %d items in %s\n", + args: []interface{}{42, "database"}, + expectedStdout: "Found 42 items in database\n", + expectedStderr: emptyString, + }, + { + name: "json with formatting", + outputFormat: string(FormatJSON), + format: "Processing item %d of %d\n", + args: []interface{}{5, 10}, + expectedStdout: emptyString, + expectedStderr: "Processing item 5 of 10\n", + }, + { + name: "jsonl with formatting", + outputFormat: string(FormatJSONL), + format: "CID: %s, Status: %s\n", + args: []interface{}{"bafy123", "complete"}, + expectedStdout: emptyString, + expectedStderr: "CID: bafy123, Status: complete\n", + }, + { + name: "raw with formatting", + outputFormat: string(FormatRaw), + format: "Sync ID: %s\n", + args: []interface{}{"sync-abc-123"}, + expectedStdout: emptyString, + expectedStderr: "Sync ID: sync-abc-123\n", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + stdoutBuf bytes.Buffer + stderrBuf bytes.Buffer + ) + + cmd := &cobra.Command{} + cmd.SetOut(&stdoutBuf) + cmd.SetErr(&stderrBuf) + AddOutputFlags(cmd) + + if tt.outputFormat != string(FormatHuman) { + if err := cmd.Flags().Set("output", tt.outputFormat); err != nil { + t.Fatalf("failed to set flag: %v", err) + } + } + + PrintSmartf(cmd, tt.format, tt.args...) + + if got := stdoutBuf.String(); got != tt.expectedStdout { + t.Errorf("PrintSmartf() stdout = %q, want %q", got, tt.expectedStdout) + } + + if got := stderrBuf.String(); got != tt.expectedStderr { + t.Errorf("PrintSmartf() stderr = %q, want %q", got, tt.expectedStderr) + } + }) + } +} diff --git a/cli/util/context/context.go b/cli/util/context/context.go index b40479d39..1d6806672 100644 --- a/cli/util/context/context.go +++ b/cli/util/context/context.go @@ -1,24 +1,24 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package util - -import ( - "context" - - "github.com/agntcy/dir/client" -) - -type ClientContextKeyType string - -const ClientContextKey ClientContextKeyType = "ContextDirClient" - -func SetClientForContext(ctx context.Context, c *client.Client) context.Context { - return context.WithValue(ctx, ClientContextKey, c) -} - -func GetClientFromContext(ctx context.Context) (*client.Client, bool) { - cli, ok := ctx.Value(ClientContextKey).(*client.Client) - - return cli, ok -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package util + +import ( + "context" + + "github.com/agntcy/dir/client" +) + +type ClientContextKeyType string + +const ClientContextKey ClientContextKeyType = "ContextDirClient" + +func SetClientForContext(ctx context.Context, c *client.Client) context.Context { + return context.WithValue(ctx, ClientContextKey, c) +} + +func GetClientFromContext(ctx context.Context) (*client.Client, bool) { + cli, ok := ctx.Value(ClientContextKey).(*client.Client) + + return cli, ok +} diff --git a/client/LICENSE.md b/client/LICENSE.md index d9a10c0d8..7cd40e552 100644 --- a/client/LICENSE.md +++ b/client/LICENSE.md @@ -1,176 +1,176 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/client/README.md b/client/README.md index 69843c399..6ec5948dc 100644 --- a/client/README.md +++ b/client/README.md @@ -1,191 +1,191 @@ -# Directory Golang SDK - -## Overview - -Dir Golang SDK provides a simple way to interact with the Directory API. -It allows developers to integrate and use Directory functionality from their applications with ease. - -## Features - -The Directory SDK provides comprehensive access to all Directory APIs with a simple, intuitive interface: - -### **Store API** -- **Record Management**: Push records to the store and pull them by reference -- **Metadata Operations**: Look up record metadata without downloading full content -- **Data Lifecycle**: Delete records permanently from the store -- **Referrer Support**: Push and pull artifacts for existing records -- **Sync Management**: Manage storage synchronization policies between Directory servers - -### **Search API** -- **Flexible Search**: Search stored records using text, semantic, and structured queries -- **Advanced Filtering**: Filter results by metadata, content type, and other criteria - -### **Routing API** -- **Network Publishing**: Publish records to make them discoverable across the network -- **Content Discovery**: List and query published records across the network -- **Network Management**: Unpublish records to remove them from network discovery - -### **Signing and Verification** -- **Local Signing**: Sign records locally using private keys or OIDC-based authentication. -- **Remote Verification**: Verify record signatures using the Directory gRPC API - -### **Developer Experience** -- **Async Support**: Non-blocking operations with streaming responses for large datasets -- **Error Handling**: Comprehensive gRPC error handling with detailed error messages -- **Configuration**: Flexible configuration via environment variables or direct instantiation - -## Installation - -1. Initialize the project: -```bash -go mod init example.com/myapp -``` - -2. Add the SDK to your project: -```bash -go get github.com/agntcy/dir/client -``` - -## Configuration - -The SDK can be configured via environment variables or direct instantiation. - -### Environment Variables - -| Variable | Description | Default | -|----------|-------------|---------| -| `DIRECTORY_CLIENT_SERVER_ADDRESS` | Directory server address | `0.0.0.0:8888` | -| `DIRECTORY_CLIENT_AUTH_MODE` | Authentication mode: `x509`, `jwt`, or empty for insecure | `""` (insecure) | -| `DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH` | SPIFFE Workload API socket path | `""` | -| `DIRECTORY_CLIENT_JWT_AUDIENCE` | JWT audience for JWT authentication | `""` | - -### Authentication - -The SDK supports three authentication modes: - -#### 1. Insecure (No Authentication) - -For local development only. Not recommended for production. - -**Environment Variables:** -```bash -export DIRECTORY_CLIENT_SERVER_ADDRESS="localhost:8888" -# AUTH_MODE is empty or not set -``` - -**Code Example:** -```go -import ( - "context" - "github.com/agntcy/dir/client" -) - -ctx := context.Background() -config := &client.Config{ - ServerAddress: "localhost:8888", - // AuthMode is empty - insecure connection -} -c, err := client.New(ctx, client.WithConfig(config)) -if err != nil { - // handle error -} -defer c.Close() // Always close to cleanup resources -``` - -#### 2. X509 (X.509-SVID) - -Recommended for production. Requires SPIRE agent. - -**Environment Variables:** -```bash -export DIRECTORY_CLIENT_SERVER_ADDRESS="localhost:8888" -export DIRECTORY_CLIENT_AUTH_MODE="x509" -export DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH="unix:///run/spire/agent-sockets/api.sock" -``` - -**Code Example:** -```go -import ( - "context" - "github.com/agntcy/dir/client" -) - -ctx := context.Background() -config := &client.Config{ - ServerAddress: "localhost:8888", - AuthMode: "x509", - SpiffeSocketPath: "unix:///run/spire/agent-sockets/api.sock", -} -c, err := client.New(ctx, client.WithConfig(config)) -if err != nil { - // handle error -} -defer c.Close() // Always close to cleanup resources -``` - -#### 3. JWT (JWT-SVID) - -Alternative to X.509 for client authentication. Requires SPIRE agent. - -> **Note**: In JWT mode, the server presents its X.509-SVID via TLS for server -> authentication and encryption, while the client authenticates using a JWT-SVID. -> This provides both transport security and client authentication, following the -> [official SPIFFE JWT pattern](https://github.com/spiffe/go-spiffe/tree/main/examples/spiffe-jwt). - -**Environment Variables:** -```bash -export DIRECTORY_CLIENT_SERVER_ADDRESS="localhost:8888" -export DIRECTORY_CLIENT_AUTH_MODE="jwt" -export DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH="unix:///run/spire/agent-sockets/api.sock" -export DIRECTORY_CLIENT_JWT_AUDIENCE="spiffe://example.org/dir-server" -``` - -**Code Example:** -```go -import ( - "context" - "github.com/agntcy/dir/client" -) - -ctx := context.Background() -config := &client.Config{ - ServerAddress: "localhost:8888", - AuthMode: "jwt", - SpiffeSocketPath: "unix:///run/spire/agent-sockets/api.sock", - JWTAudience: "spiffe://example.org/dir-server", -} -c, err := client.New(ctx, client.WithConfig(config)) -if err != nil { - // handle error -} -defer c.Close() // Always close to cleanup resources -``` - -## Getting Started - -### Prerequisites - -- [Golang](https://golang.org/dl/) - Go programming language - -### 1. Server Setup - -**Option A: Local Development Server** - -```bash -# Clone the repository and start the server using Taskfile -task server:start -``` - -**Option B: Custom Server** - -```bash -# Set your Directory server address -export DIRECTORY_CLIENT_SERVER_ADDRESS="your-server:8888" -``` - -### 2. SDK Installation - -```bash -# Add the Directory SDK -go get github.com/agntcy/dir/client -``` +# Directory Golang SDK + +## Overview + +Dir Golang SDK provides a simple way to interact with the Directory API. +It allows developers to integrate and use Directory functionality from their applications with ease. + +## Features + +The Directory SDK provides comprehensive access to all Directory APIs with a simple, intuitive interface: + +### **Store API** +- **Record Management**: Push records to the store and pull them by reference +- **Metadata Operations**: Look up record metadata without downloading full content +- **Data Lifecycle**: Delete records permanently from the store +- **Referrer Support**: Push and pull artifacts for existing records +- **Sync Management**: Manage storage synchronization policies between Directory servers + +### **Search API** +- **Flexible Search**: Search stored records using text, semantic, and structured queries +- **Advanced Filtering**: Filter results by metadata, content type, and other criteria + +### **Routing API** +- **Network Publishing**: Publish records to make them discoverable across the network +- **Content Discovery**: List and query published records across the network +- **Network Management**: Unpublish records to remove them from network discovery + +### **Signing and Verification** +- **Local Signing**: Sign records locally using private keys or OIDC-based authentication. +- **Remote Verification**: Verify record signatures using the Directory gRPC API + +### **Developer Experience** +- **Async Support**: Non-blocking operations with streaming responses for large datasets +- **Error Handling**: Comprehensive gRPC error handling with detailed error messages +- **Configuration**: Flexible configuration via environment variables or direct instantiation + +## Installation + +1. Initialize the project: +```bash +go mod init example.com/myapp +``` + +2. Add the SDK to your project: +```bash +go get github.com/agntcy/dir/client +``` + +## Configuration + +The SDK can be configured via environment variables or direct instantiation. + +### Environment Variables + +| Variable | Description | Default | +|----------|-------------|---------| +| `DIRECTORY_CLIENT_SERVER_ADDRESS` | Directory server address | `0.0.0.0:8888` | +| `DIRECTORY_CLIENT_AUTH_MODE` | Authentication mode: `x509`, `jwt`, or empty for insecure | `""` (insecure) | +| `DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH` | SPIFFE Workload API socket path | `""` | +| `DIRECTORY_CLIENT_JWT_AUDIENCE` | JWT audience for JWT authentication | `""` | + +### Authentication + +The SDK supports three authentication modes: + +#### 1. Insecure (No Authentication) + +For local development only. Not recommended for production. + +**Environment Variables:** +```bash +export DIRECTORY_CLIENT_SERVER_ADDRESS="localhost:8888" +# AUTH_MODE is empty or not set +``` + +**Code Example:** +```go +import ( + "context" + "github.com/agntcy/dir/client" +) + +ctx := context.Background() +config := &client.Config{ + ServerAddress: "localhost:8888", + // AuthMode is empty - insecure connection +} +c, err := client.New(ctx, client.WithConfig(config)) +if err != nil { + // handle error +} +defer c.Close() // Always close to cleanup resources +``` + +#### 2. X509 (X.509-SVID) + +Recommended for production. Requires SPIRE agent. + +**Environment Variables:** +```bash +export DIRECTORY_CLIENT_SERVER_ADDRESS="localhost:8888" +export DIRECTORY_CLIENT_AUTH_MODE="x509" +export DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH="unix:///run/spire/agent-sockets/api.sock" +``` + +**Code Example:** +```go +import ( + "context" + "github.com/agntcy/dir/client" +) + +ctx := context.Background() +config := &client.Config{ + ServerAddress: "localhost:8888", + AuthMode: "x509", + SpiffeSocketPath: "unix:///run/spire/agent-sockets/api.sock", +} +c, err := client.New(ctx, client.WithConfig(config)) +if err != nil { + // handle error +} +defer c.Close() // Always close to cleanup resources +``` + +#### 3. JWT (JWT-SVID) + +Alternative to X.509 for client authentication. Requires SPIRE agent. + +> **Note**: In JWT mode, the server presents its X.509-SVID via TLS for server +> authentication and encryption, while the client authenticates using a JWT-SVID. +> This provides both transport security and client authentication, following the +> [official SPIFFE JWT pattern](https://github.com/spiffe/go-spiffe/tree/main/examples/spiffe-jwt). + +**Environment Variables:** +```bash +export DIRECTORY_CLIENT_SERVER_ADDRESS="localhost:8888" +export DIRECTORY_CLIENT_AUTH_MODE="jwt" +export DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH="unix:///run/spire/agent-sockets/api.sock" +export DIRECTORY_CLIENT_JWT_AUDIENCE="spiffe://example.org/dir-server" +``` + +**Code Example:** +```go +import ( + "context" + "github.com/agntcy/dir/client" +) + +ctx := context.Background() +config := &client.Config{ + ServerAddress: "localhost:8888", + AuthMode: "jwt", + SpiffeSocketPath: "unix:///run/spire/agent-sockets/api.sock", + JWTAudience: "spiffe://example.org/dir-server", +} +c, err := client.New(ctx, client.WithConfig(config)) +if err != nil { + // handle error +} +defer c.Close() // Always close to cleanup resources +``` + +## Getting Started + +### Prerequisites + +- [Golang](https://golang.org/dl/) - Go programming language + +### 1. Server Setup + +**Option A: Local Development Server** + +```bash +# Clone the repository and start the server using Taskfile +task server:start +``` + +**Option B: Custom Server** + +```bash +# Set your Directory server address +export DIRECTORY_CLIENT_SERVER_ADDRESS="your-server:8888" +``` + +### 2. SDK Installation + +```bash +# Add the Directory SDK +go get github.com/agntcy/dir/client +``` diff --git a/client/client.go b/client/client.go index 9459c0341..cb8b8276f 100644 --- a/client/client.go +++ b/client/client.go @@ -1,113 +1,113 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package client - -import ( - "context" - "fmt" - "io" - - eventsv1 "github.com/agntcy/dir/api/events/v1" - routingv1 "github.com/agntcy/dir/api/routing/v1" - searchv1 "github.com/agntcy/dir/api/search/v1" - signv1 "github.com/agntcy/dir/api/sign/v1" - storev1 "github.com/agntcy/dir/api/store/v1" - "github.com/spiffe/go-spiffe/v2/workloadapi" - "google.golang.org/grpc" -) - -type Client struct { - storev1.StoreServiceClient - routingv1.RoutingServiceClient - searchv1.SearchServiceClient - storev1.SyncServiceClient - signv1.SignServiceClient - eventsv1.EventServiceClient - - config *Config - authClient *workloadapi.Client - conn *grpc.ClientConn - - // SPIFFE sources for cleanup - bundleSrc io.Closer - x509Src io.Closer - jwtSource io.Closer -} - -func New(ctx context.Context, opts ...Option) (*Client, error) { - // Add auth options with provided context - opts = append(opts, withAuth(ctx)) - - // Load options - options := &options{} - for _, opt := range opts { - if err := opt(options); err != nil { - return nil, fmt.Errorf("failed to load options: %w", err) - } - } - - // Create gRPC client connection - conn, err := grpc.NewClient(options.config.ServerAddress, options.authOpts...) - if err != nil { - return nil, fmt.Errorf("failed to create gRPC client: %w", err) - } - - return &Client{ - StoreServiceClient: storev1.NewStoreServiceClient(conn), - RoutingServiceClient: routingv1.NewRoutingServiceClient(conn), - SearchServiceClient: searchv1.NewSearchServiceClient(conn), - SyncServiceClient: storev1.NewSyncServiceClient(conn), - SignServiceClient: signv1.NewSignServiceClient(conn), - EventServiceClient: eventsv1.NewEventServiceClient(conn), - config: options.config, - authClient: options.authClient, - conn: conn, - bundleSrc: options.bundleSrc, - x509Src: options.x509Src, - jwtSource: options.jwtSource, - }, nil -} - -func (c *Client) Close() error { - var errs []error - - // Close SPIFFE sources first (they may be using authClient) - if c.jwtSource != nil { - if err := c.jwtSource.Close(); err != nil { - errs = append(errs, fmt.Errorf("failed to close JWT source: %w", err)) - } - } - - if c.x509Src != nil { - if err := c.x509Src.Close(); err != nil { - errs = append(errs, fmt.Errorf("failed to close X.509 source: %w", err)) - } - } - - if c.bundleSrc != nil { - if err := c.bundleSrc.Close(); err != nil { - errs = append(errs, fmt.Errorf("failed to close bundle source: %w", err)) - } - } - - // Close auth client - if c.authClient != nil { - if err := c.authClient.Close(); err != nil { - errs = append(errs, fmt.Errorf("failed to close auth client: %w", err)) - } - } - - // Close gRPC connection last - if c.conn != nil { - if err := c.conn.Close(); err != nil { - errs = append(errs, fmt.Errorf("failed to close gRPC connection: %w", err)) - } - } - - if len(errs) > 0 { - return fmt.Errorf("client close errors: %v", errs) - } - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "context" + "fmt" + "io" + + eventsv1 "github.com/agntcy/dir/api/events/v1" + routingv1 "github.com/agntcy/dir/api/routing/v1" + searchv1 "github.com/agntcy/dir/api/search/v1" + signv1 "github.com/agntcy/dir/api/sign/v1" + storev1 "github.com/agntcy/dir/api/store/v1" + "github.com/spiffe/go-spiffe/v2/workloadapi" + "google.golang.org/grpc" +) + +type Client struct { + storev1.StoreServiceClient + routingv1.RoutingServiceClient + searchv1.SearchServiceClient + storev1.SyncServiceClient + signv1.SignServiceClient + eventsv1.EventServiceClient + + config *Config + authClient *workloadapi.Client + conn *grpc.ClientConn + + // SPIFFE sources for cleanup + bundleSrc io.Closer + x509Src io.Closer + jwtSource io.Closer +} + +func New(ctx context.Context, opts ...Option) (*Client, error) { + // Add auth options with provided context + opts = append(opts, withAuth(ctx)) + + // Load options + options := &options{} + for _, opt := range opts { + if err := opt(options); err != nil { + return nil, fmt.Errorf("failed to load options: %w", err) + } + } + + // Create gRPC client connection + conn, err := grpc.NewClient(options.config.ServerAddress, options.authOpts...) + if err != nil { + return nil, fmt.Errorf("failed to create gRPC client: %w", err) + } + + return &Client{ + StoreServiceClient: storev1.NewStoreServiceClient(conn), + RoutingServiceClient: routingv1.NewRoutingServiceClient(conn), + SearchServiceClient: searchv1.NewSearchServiceClient(conn), + SyncServiceClient: storev1.NewSyncServiceClient(conn), + SignServiceClient: signv1.NewSignServiceClient(conn), + EventServiceClient: eventsv1.NewEventServiceClient(conn), + config: options.config, + authClient: options.authClient, + conn: conn, + bundleSrc: options.bundleSrc, + x509Src: options.x509Src, + jwtSource: options.jwtSource, + }, nil +} + +func (c *Client) Close() error { + var errs []error + + // Close SPIFFE sources first (they may be using authClient) + if c.jwtSource != nil { + if err := c.jwtSource.Close(); err != nil { + errs = append(errs, fmt.Errorf("failed to close JWT source: %w", err)) + } + } + + if c.x509Src != nil { + if err := c.x509Src.Close(); err != nil { + errs = append(errs, fmt.Errorf("failed to close X.509 source: %w", err)) + } + } + + if c.bundleSrc != nil { + if err := c.bundleSrc.Close(); err != nil { + errs = append(errs, fmt.Errorf("failed to close bundle source: %w", err)) + } + } + + // Close auth client + if c.authClient != nil { + if err := c.authClient.Close(); err != nil { + errs = append(errs, fmt.Errorf("failed to close auth client: %w", err)) + } + } + + // Close gRPC connection last + if c.conn != nil { + if err := c.conn.Close(); err != nil { + errs = append(errs, fmt.Errorf("failed to close gRPC connection: %w", err)) + } + } + + if len(errs) > 0 { + return fmt.Errorf("client close errors: %v", errs) + } + + return nil +} diff --git a/client/client_test.go b/client/client_test.go index 458eae890..2c057a20f 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -1,783 +1,783 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package client - -import ( - "context" - "errors" - "net" - "testing" - "time" - - eventsv1 "github.com/agntcy/dir/api/events/v1" - routingv1 "github.com/agntcy/dir/api/routing/v1" - searchv1 "github.com/agntcy/dir/api/search/v1" - signv1 "github.com/agntcy/dir/api/sign/v1" - storev1 "github.com/agntcy/dir/api/store/v1" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/test/bufconn" -) - -const ( - bufSize = 1024 * 1024 - - // Test server constants. - testServerBufnet = "bufnet" - testServerLocalhost = "127.0.0.1:0" - testServerUnreachable = "localhost:9999" - testServerInsecureMode = "" // Empty string means insecure - - // Timeout constants. - testContextTimeout = 5 * time.Second - testContextShortTimeout = 1 * time.Second - testContextVeryShort = 10 * time.Millisecond - testConnectionCloseWait = 50 * time.Millisecond - testCleanupWait = 10 * time.Millisecond - testConnectionStateCheck = 100 * time.Millisecond -) - -// createTestServer creates a test gRPC server with all required services. -func createTestServer(t *testing.T) (*grpc.Server, *bufconn.Listener) { - t.Helper() - - lis := bufconn.Listen(bufSize) - s := grpc.NewServer() - - // Register minimal service implementations (just to satisfy the interface) - storev1.RegisterStoreServiceServer(s, &mockStoreService{}) - routingv1.RegisterRoutingServiceServer(s, &mockRoutingService{}) - searchv1.RegisterSearchServiceServer(s, &mockSearchService{}) - storev1.RegisterSyncServiceServer(s, &mockSyncService{}) - signv1.RegisterSignServiceServer(s, &mockSignService{}) - eventsv1.RegisterEventServiceServer(s, &mockEventService{}) - - go func() { - if err := s.Serve(lis); err != nil { - t.Logf("Server exited with error: %v", err) - } - }() - - return s, lis -} - -// bufDialer creates a dialer for bufconn listener. -func bufDialer(lis *bufconn.Listener) func(context.Context, string) (net.Conn, error) { - return func(ctx context.Context, _ string) (net.Conn, error) { - return lis.DialContext(ctx) - } -} - -// Mock service implementations (minimal). -type mockStoreService struct { - storev1.UnimplementedStoreServiceServer -} - -type mockRoutingService struct { - routingv1.UnimplementedRoutingServiceServer -} - -type mockSearchService struct { - searchv1.UnimplementedSearchServiceServer -} - -type mockSyncService struct { - storev1.UnimplementedSyncServiceServer -} - -type mockSignService struct { - signv1.UnimplementedSignServiceServer -} - -type mockEventService struct { - eventsv1.UnimplementedEventServiceServer -} - -// TestNew_StoresGRPCConnection tests that New() properly stores the gRPC connection. -func TestNew_StoresGRPCConnection(t *testing.T) { - ctx := context.Background() - - // Create test server - server, lis := createTestServer(t) - defer server.Stop() - - // Create client with bufconn dialer - conn, err := grpc.NewClient( - testServerBufnet, - grpc.WithContextDialer(bufDialer(lis)), - grpc.WithTransportCredentials(insecure.NewCredentials()), - ) - if err != nil { - t.Fatalf("Failed to create gRPC client: %v", err) - } - - client := &Client{ - StoreServiceClient: storev1.NewStoreServiceClient(conn), - RoutingServiceClient: routingv1.NewRoutingServiceClient(conn), - SearchServiceClient: searchv1.NewSearchServiceClient(conn), - SyncServiceClient: storev1.NewSyncServiceClient(conn), - SignServiceClient: signv1.NewSignServiceClient(conn), - EventServiceClient: eventsv1.NewEventServiceClient(conn), - config: &Config{ - ServerAddress: testServerBufnet, - }, - conn: conn, // This is what Issue 1 fixed - } - - // Verify connection is stored - if client.conn == nil { - t.Error("Expected conn to be stored in client, but it was nil") - } - - // Verify connection is the same instance - if client.conn != conn { - t.Error("Expected conn to match the created connection") - } - - // Clean up - if err := client.Close(); err != nil { - t.Errorf("Failed to close client: %v", err) - } - - // Wait for connection to be fully closed - _ = ctx - - time.Sleep(testCleanupWait) -} - -// TestClientClose_ClosesGRPCConnection tests that Close() properly closes the gRPC connection. -func TestClientClose_ClosesGRPCConnection(t *testing.T) { - ctx := context.Background() - - // Create test server - server, lis := createTestServer(t) - defer server.Stop() - - // Create client with bufconn dialer - conn, err := grpc.NewClient( - testServerBufnet, - grpc.WithContextDialer(bufDialer(lis)), - grpc.WithTransportCredentials(insecure.NewCredentials()), - ) - if err != nil { - t.Fatalf("Failed to create gRPC client: %v", err) - } - - client := &Client{ - StoreServiceClient: storev1.NewStoreServiceClient(conn), - RoutingServiceClient: routingv1.NewRoutingServiceClient(conn), - SearchServiceClient: searchv1.NewSearchServiceClient(conn), - SyncServiceClient: storev1.NewSyncServiceClient(conn), - SignServiceClient: signv1.NewSignServiceClient(conn), - EventServiceClient: eventsv1.NewEventServiceClient(conn), - config: &Config{ - ServerAddress: testServerBufnet, - }, - conn: conn, - } - - // Verify connection is open (check state) - initialState := conn.GetState() - t.Logf("Initial connection state: %v", initialState) - - // Close the client - if err := client.Close(); err != nil { - t.Errorf("Close() returned error: %v", err) - } - - // Give the connection time to close - time.Sleep(testConnectionCloseWait) - - // Verify connection state changed (it should be shutting down or shut down) - finalState := conn.GetState() - t.Logf("Final connection state: %v", finalState) - - // The connection should no longer be in a ready or connecting state after close - // Note: This is a best-effort check as gRPC connection state transitions are async - _ = ctx - _ = finalState -} - -// TestClientClose_WithNilConnection tests that Close() handles nil connection gracefully. -func TestClientClose_WithNilConnection(t *testing.T) { - client := &Client{ - conn: nil, // No connection - } - - // Close should not panic or error with nil connection - if err := client.Close(); err != nil { - t.Errorf("Close() with nil connection returned error: %v", err) - } -} - -// TestClientClose_MultipleCalls tests that calling Close() multiple times doesn't panic. -func TestClientClose_MultipleCalls(t *testing.T) { - // Create test server - server, lis := createTestServer(t) - defer server.Stop() - - // Create client with bufconn dialer - conn, err := grpc.NewClient( - testServerBufnet, - grpc.WithContextDialer(bufDialer(lis)), - grpc.WithTransportCredentials(insecure.NewCredentials()), - ) - if err != nil { - t.Fatalf("Failed to create gRPC client: %v", err) - } - - client := &Client{ - StoreServiceClient: storev1.NewStoreServiceClient(conn), - RoutingServiceClient: routingv1.NewRoutingServiceClient(conn), - SearchServiceClient: searchv1.NewSearchServiceClient(conn), - SyncServiceClient: storev1.NewSyncServiceClient(conn), - SignServiceClient: signv1.NewSignServiceClient(conn), - EventServiceClient: eventsv1.NewEventServiceClient(conn), - config: &Config{ - ServerAddress: testServerBufnet, - }, - conn: conn, - } - - // First close - if err := client.Close(); err != nil { - t.Errorf("First Close() returned error: %v", err) - } - - // Second close - should not panic, but may return error (closing already closed connection) - err = client.Close() - t.Logf("Second Close() returned: %v", err) - // We don't fail on error here because closing an already-closed connection may error -} - -// TestClientClose_AggregatesErrors tests that Close() properly aggregates multiple errors. -func TestClientClose_AggregatesErrors(t *testing.T) { - // Create a client with a connection that's already been closed externally - server, lis := createTestServer(t) - defer server.Stop() - - conn, err := grpc.NewClient( - testServerBufnet, - grpc.WithContextDialer(bufDialer(lis)), - grpc.WithTransportCredentials(insecure.NewCredentials()), - ) - if err != nil { - t.Fatalf("Failed to create gRPC client: %v", err) - } - - // Close connection before client.Close() - if err := conn.Close(); err != nil { - t.Fatalf("Failed to close connection: %v", err) - } - - client := &Client{ - StoreServiceClient: storev1.NewStoreServiceClient(conn), - RoutingServiceClient: routingv1.NewRoutingServiceClient(conn), - SearchServiceClient: searchv1.NewSearchServiceClient(conn), - SyncServiceClient: storev1.NewSyncServiceClient(conn), - SignServiceClient: signv1.NewSignServiceClient(conn), - EventServiceClient: eventsv1.NewEventServiceClient(conn), - config: &Config{ - ServerAddress: testServerBufnet, - }, - conn: conn, - } - - // Close should handle the already-closed connection - err = client.Close() - // We may or may not get an error depending on gRPC's handling of double-close - t.Logf("Close() on already-closed connection returned: %v", err) -} - -// TestNew_WithInsecureConfig tests creating a client with insecure configuration. -func TestNew_WithInsecureConfig(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), testContextTimeout) - defer cancel() - - // Create test server - server, lis := createTestServer(t) - defer server.Stop() - - // Start a real TCP listener to test address resolution - lc := net.ListenConfig{} - - realLis, err := lc.Listen(ctx, "tcp", testServerLocalhost) - if err != nil { - t.Fatalf("Failed to create listener: %v", err) - } - defer realLis.Close() - - addr := realLis.Addr().String() - - // Start gRPC server on real listener - realServer := grpc.NewServer() - storev1.RegisterStoreServiceServer(realServer, &mockStoreService{}) - routingv1.RegisterRoutingServiceServer(realServer, &mockRoutingService{}) - searchv1.RegisterSearchServiceServer(realServer, &mockSearchService{}) - storev1.RegisterSyncServiceServer(realServer, &mockSyncService{}) - signv1.RegisterSignServiceServer(realServer, &mockSignService{}) - eventsv1.RegisterEventServiceServer(realServer, &mockEventService{}) - - go func() { - _ = realServer.Serve(realLis) - }() - - defer realServer.Stop() - - // Create client using New() with insecure config - client, err := New(ctx, WithConfig(&Config{ - ServerAddress: addr, - AuthMode: testServerInsecureMode, - SpiffeSocketPath: "", - })) - if err != nil { - t.Fatalf("Failed to create client: %v", err) - } - - defer func() { - if err := client.Close(); err != nil { - t.Errorf("Failed to close client: %v", err) - } - }() - - // Verify connection is stored - if client.conn == nil { - t.Error("Expected conn to be stored in client after New(), but it was nil") - } - - // Verify config is stored - if client.config == nil { - t.Error("Expected config to be stored in client") - } - - if client.config.ServerAddress != addr { - t.Errorf("Expected ServerAddress to be %q, got %q", addr, client.config.ServerAddress) - } - - // Use bufconn instead for testing - _ = lis -} - -// TestNew_WithMissingConfig tests that New() returns error when config is missing. -func TestNew_WithMissingConfig(t *testing.T) { - ctx := context.Background() - - // Try to create client without config - _, err := New(ctx) - if err == nil { - t.Error("Expected error when creating client without config, got nil") - } - - // Error should mention config is required - expectedMsg := "config is required" - if err != nil && err.Error() != expectedMsg { - t.Logf("Got error: %v", err) - // Don't fail, just log - the exact error message might vary - } -} - -// ============================================================================ -// Issue 2: Client Context Support Tests -// ============================================================================ - -// TestNew_AcceptsContext tests that New() accepts a context parameter. -func TestNew_AcceptsContext(t *testing.T) { - // Create context with timeout - ctx, cancel := context.WithTimeout(context.Background(), testContextTimeout) - defer cancel() - - // Start a real TCP listener to test address resolution - lc := net.ListenConfig{} - - realLis, err := lc.Listen(ctx, "tcp", testServerLocalhost) - if err != nil { - t.Fatalf("Failed to create listener: %v", err) - } - defer realLis.Close() - - addr := realLis.Addr().String() - - // Start gRPC server on real listener - realServer := grpc.NewServer() - storev1.RegisterStoreServiceServer(realServer, &mockStoreService{}) - routingv1.RegisterRoutingServiceServer(realServer, &mockRoutingService{}) - searchv1.RegisterSearchServiceServer(realServer, &mockSearchService{}) - storev1.RegisterSyncServiceServer(realServer, &mockSyncService{}) - signv1.RegisterSignServiceServer(realServer, &mockSignService{}) - eventsv1.RegisterEventServiceServer(realServer, &mockEventService{}) - - go func() { - _ = realServer.Serve(realLis) - }() - - defer realServer.Stop() - - // New() should accept the context - client, err := New(ctx, WithConfig(&Config{ - ServerAddress: addr, - AuthMode: testServerInsecureMode, - SpiffeSocketPath: "", - })) - if err != nil { - t.Fatalf("New() with context failed: %v", err) - } - - defer func() { - if err := client.Close(); err != nil { - t.Errorf("Failed to close client: %v", err) - } - }() - - // Verify client was created successfully - if client == nil { - t.Error("Expected client to be created, got nil") - } -} - -// TestNew_WithCancelledContext tests that New() handles cancelled context appropriately. -func TestNew_WithCancelledContext(t *testing.T) { - // Create already-cancelled context - ctx, cancel := context.WithCancel(context.Background()) - cancel() // Cancel immediately - - // New() should handle cancelled context - // Note: This may or may not fail depending on how quickly gRPC detects cancellation - _, err := New(ctx, WithConfig(&Config{ - ServerAddress: testServerUnreachable, - AuthMode: testServerInsecureMode, - SpiffeSocketPath: "", - })) - - // We don't strictly require an error here because gRPC client creation is lazy - // But if there is an error, log it - if err != nil { - t.Logf("New() with cancelled context returned error (expected): %v", err) - } else { - t.Logf("New() with cancelled context succeeded (gRPC lazy connection)") - } -} - -// TestNew_WithTimeoutContext tests that New() respects context timeout. -func TestNew_WithTimeoutContext(t *testing.T) { - // Create context with very short timeout - ctx, cancel := context.WithTimeout(context.Background(), testContextVeryShort) - defer cancel() - - // Try to create client - may succeed because gRPC is lazy - client, err := New(ctx, WithConfig(&Config{ - ServerAddress: testServerUnreachable, - AuthMode: testServerInsecureMode, - SpiffeSocketPath: "", - })) - if err != nil { - t.Logf("New() with timeout context returned error: %v", err) - } else if client != nil { - t.Logf("New() with timeout context succeeded (gRPC lazy connection)") - - _ = client.Close() - } -} - -// TestNew_ContextUsedInAuth tests that the context is actually passed to auth setup. -func TestNew_ContextUsedInAuth(t *testing.T) { - // This test verifies that the context parameter is actually used - // by checking that withAuth() receives the correct context - - // Create a context with a specific value - type contextKey string - - const ( - testKey contextKey = "test-key" - testValue contextKey = "test-value" - ) - - ctx := context.WithValue(context.Background(), testKey, testValue) - - // Start a real TCP listener - lc := net.ListenConfig{} - - realLis, err := lc.Listen(ctx, "tcp", testServerLocalhost) - if err != nil { - t.Fatalf("Failed to create listener: %v", err) - } - defer realLis.Close() - - addr := realLis.Addr().String() - - // Start gRPC server - realServer := grpc.NewServer() - storev1.RegisterStoreServiceServer(realServer, &mockStoreService{}) - routingv1.RegisterRoutingServiceServer(realServer, &mockRoutingService{}) - searchv1.RegisterSearchServiceServer(realServer, &mockSearchService{}) - storev1.RegisterSyncServiceServer(realServer, &mockSyncService{}) - signv1.RegisterSignServiceServer(realServer, &mockSignService{}) - eventsv1.RegisterEventServiceServer(realServer, &mockEventService{}) - - go func() { - _ = realServer.Serve(realLis) - }() - - defer realServer.Stop() - - // Create client with context containing value - client, err := New(ctx, WithConfig(&Config{ - ServerAddress: addr, - AuthMode: testServerInsecureMode, - SpiffeSocketPath: "", - })) - if err != nil { - t.Fatalf("New() failed: %v", err) - } - - defer func() { - if err := client.Close(); err != nil { - t.Errorf("Failed to close client: %v", err) - } - }() - - // If we got here, the context was accepted - // (We can't easily verify it was used internally without modifying the code) - if client == nil { - t.Error("Expected client to be created") - } -} - -// TestNew_MultipleClientsWithDifferentContexts tests creating multiple clients with different contexts. -func TestNew_MultipleClientsWithDifferentContexts(t *testing.T) { - // Start a real TCP listener - lc := net.ListenConfig{} - - realLis, err := lc.Listen(context.Background(), "tcp", testServerLocalhost) - if err != nil { - t.Fatalf("Failed to create listener: %v", err) - } - defer realLis.Close() - - addr := realLis.Addr().String() - - // Start gRPC server - realServer := grpc.NewServer() - storev1.RegisterStoreServiceServer(realServer, &mockStoreService{}) - routingv1.RegisterRoutingServiceServer(realServer, &mockRoutingService{}) - searchv1.RegisterSearchServiceServer(realServer, &mockSearchService{}) - storev1.RegisterSyncServiceServer(realServer, &mockSyncService{}) - signv1.RegisterSignServiceServer(realServer, &mockSignService{}) - eventsv1.RegisterEventServiceServer(realServer, &mockEventService{}) - - go func() { - _ = realServer.Serve(realLis) - }() - - defer realServer.Stop() - - config := &Config{ - ServerAddress: addr, - AuthMode: testServerInsecureMode, - SpiffeSocketPath: "", - } - - // Create first client with one context - ctx1, cancel1 := context.WithTimeout(context.Background(), testContextTimeout) - defer cancel1() - - client1, err := New(ctx1, WithConfig(config)) - if err != nil { - t.Fatalf("Failed to create first client: %v", err) - } - - defer func() { - if err := client1.Close(); err != nil { - t.Errorf("Failed to close first client: %v", err) - } - }() - - // Create second client with different context - ctx2, cancel2 := context.WithTimeout(context.Background(), testContextTimeout) - defer cancel2() - - client2, err := New(ctx2, WithConfig(config)) - if err != nil { - t.Fatalf("Failed to create second client: %v", err) - } - - defer func() { - if err := client2.Close(); err != nil { - t.Errorf("Failed to close second client: %v", err) - } - }() - - // Both clients should be independent - if client1 == nil || client2 == nil { - t.Error("Expected both clients to be created") - } - - if client1 == client2 { - t.Error("Expected clients to be different instances") - } -} - -// TestNew_BackgroundContext tests that New() works with background context. -func TestNew_BackgroundContext(t *testing.T) { - // Start a real TCP listener - lc := net.ListenConfig{} - - realLis, err := lc.Listen(context.Background(), "tcp", testServerLocalhost) - if err != nil { - t.Fatalf("Failed to create listener: %v", err) - } - defer realLis.Close() - - addr := realLis.Addr().String() - - // Start gRPC server - realServer := grpc.NewServer() - storev1.RegisterStoreServiceServer(realServer, &mockStoreService{}) - routingv1.RegisterRoutingServiceServer(realServer, &mockRoutingService{}) - searchv1.RegisterSearchServiceServer(realServer, &mockSearchService{}) - storev1.RegisterSyncServiceServer(realServer, &mockSyncService{}) - signv1.RegisterSignServiceServer(realServer, &mockSignService{}) - eventsv1.RegisterEventServiceServer(realServer, &mockEventService{}) - - go func() { - _ = realServer.Serve(realLis) - }() - - defer realServer.Stop() - - // Create client with background context - client, err := New(context.Background(), WithConfig(&Config{ - ServerAddress: addr, - AuthMode: testServerInsecureMode, - SpiffeSocketPath: "", - })) - if err != nil { - t.Fatalf("New() with background context failed: %v", err) - } - - defer func() { - if err := client.Close(); err != nil { - t.Errorf("Failed to close client: %v", err) - } - }() - - if client == nil { - t.Error("Expected client to be created with background context") - } -} - -// TestClientClose_WithAllNilResources tests Close() with no resources to clean up. -func TestClientClose_WithAllNilResources(t *testing.T) { - client := &Client{ - conn: nil, - authClient: nil, - bundleSrc: nil, - x509Src: nil, - jwtSource: nil, - } - - // Should succeed without any errors - err := client.Close() - if err != nil { - t.Errorf("Close() with all nil resources returned error: %v", err) - } -} - -// TestClientClose_ErrorOrdering tests that Close() handles errors in correct order. -func TestClientClose_ErrorOrdering(t *testing.T) { - // Create test server - server, lis := createTestServer(t) - defer server.Stop() - - // Create client with connection - conn, err := grpc.NewClient( - testServerBufnet, - grpc.WithContextDialer(bufDialer(lis)), - grpc.WithTransportCredentials(insecure.NewCredentials()), - ) - if err != nil { - t.Fatalf("Failed to create gRPC client: %v", err) - } - - client := &Client{ - StoreServiceClient: storev1.NewStoreServiceClient(conn), - RoutingServiceClient: routingv1.NewRoutingServiceClient(conn), - SearchServiceClient: searchv1.NewSearchServiceClient(conn), - SyncServiceClient: storev1.NewSyncServiceClient(conn), - SignServiceClient: signv1.NewSignServiceClient(conn), - EventServiceClient: eventsv1.NewEventServiceClient(conn), - conn: conn, - // Other resources are nil - } - - // Close should succeed - err = client.Close() - if err != nil { - t.Logf("Close() returned error: %v", err) - } -} - -// TestClientClose_PartialResources tests Close() with some resources present. -func TestClientClose_PartialResources(t *testing.T) { - // Create test server - server, lis := createTestServer(t) - defer server.Stop() - - // Create client with only connection (no SPIFFE resources) - conn, err := grpc.NewClient( - testServerBufnet, - grpc.WithContextDialer(bufDialer(lis)), - grpc.WithTransportCredentials(insecure.NewCredentials()), - ) - if err != nil { - t.Fatalf("Failed to create gRPC client: %v", err) - } - - client := &Client{ - conn: conn, - authClient: nil, // No auth client - bundleSrc: nil, // No bundle source - x509Src: nil, // No x509 source - jwtSource: nil, // No JWT source - } - - // Close should handle partial resources gracefully - err = client.Close() - if err != nil { - t.Errorf("Close() with partial resources returned error: %v", err) - } -} - -// TestNew_OptionError tests that New() returns error when option fails. -func TestNew_OptionError(t *testing.T) { - ctx := context.Background() - - // Create an option that returns an error - testErr := errors.New("test option error") - errorOpt := func(opts *options) error { - return testErr - } - - // New() should fail with option error - _, err := New(ctx, errorOpt) - if err == nil { - t.Error("Expected error when option fails, got nil") - } -} - -// TestNew_GRPCClientCreationError tests error handling during gRPC client creation. -func TestNew_GRPCClientCreationError(t *testing.T) { - ctx := context.Background() - - // Use invalid address that will cause grpc.NewClient to fail - // Note: grpc.NewClient is lazy, so this might not fail immediately - _, err := New(ctx, WithConfig(&Config{ - ServerAddress: "", // Empty address - AuthMode: testServerInsecureMode, - SpiffeSocketPath: "", - })) - // This may or may not fail depending on gRPC's validation - if err != nil { - t.Logf("New() with empty address returned error (expected): %v", err) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "context" + "errors" + "net" + "testing" + "time" + + eventsv1 "github.com/agntcy/dir/api/events/v1" + routingv1 "github.com/agntcy/dir/api/routing/v1" + searchv1 "github.com/agntcy/dir/api/search/v1" + signv1 "github.com/agntcy/dir/api/sign/v1" + storev1 "github.com/agntcy/dir/api/store/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/test/bufconn" +) + +const ( + bufSize = 1024 * 1024 + + // Test server constants. + testServerBufnet = "bufnet" + testServerLocalhost = "127.0.0.1:0" + testServerUnreachable = "localhost:9999" + testServerInsecureMode = "" // Empty string means insecure + + // Timeout constants. + testContextTimeout = 5 * time.Second + testContextShortTimeout = 1 * time.Second + testContextVeryShort = 10 * time.Millisecond + testConnectionCloseWait = 50 * time.Millisecond + testCleanupWait = 10 * time.Millisecond + testConnectionStateCheck = 100 * time.Millisecond +) + +// createTestServer creates a test gRPC server with all required services. +func createTestServer(t *testing.T) (*grpc.Server, *bufconn.Listener) { + t.Helper() + + lis := bufconn.Listen(bufSize) + s := grpc.NewServer() + + // Register minimal service implementations (just to satisfy the interface) + storev1.RegisterStoreServiceServer(s, &mockStoreService{}) + routingv1.RegisterRoutingServiceServer(s, &mockRoutingService{}) + searchv1.RegisterSearchServiceServer(s, &mockSearchService{}) + storev1.RegisterSyncServiceServer(s, &mockSyncService{}) + signv1.RegisterSignServiceServer(s, &mockSignService{}) + eventsv1.RegisterEventServiceServer(s, &mockEventService{}) + + go func() { + if err := s.Serve(lis); err != nil { + t.Logf("Server exited with error: %v", err) + } + }() + + return s, lis +} + +// bufDialer creates a dialer for bufconn listener. +func bufDialer(lis *bufconn.Listener) func(context.Context, string) (net.Conn, error) { + return func(ctx context.Context, _ string) (net.Conn, error) { + return lis.DialContext(ctx) + } +} + +// Mock service implementations (minimal). +type mockStoreService struct { + storev1.UnimplementedStoreServiceServer +} + +type mockRoutingService struct { + routingv1.UnimplementedRoutingServiceServer +} + +type mockSearchService struct { + searchv1.UnimplementedSearchServiceServer +} + +type mockSyncService struct { + storev1.UnimplementedSyncServiceServer +} + +type mockSignService struct { + signv1.UnimplementedSignServiceServer +} + +type mockEventService struct { + eventsv1.UnimplementedEventServiceServer +} + +// TestNew_StoresGRPCConnection tests that New() properly stores the gRPC connection. +func TestNew_StoresGRPCConnection(t *testing.T) { + ctx := context.Background() + + // Create test server + server, lis := createTestServer(t) + defer server.Stop() + + // Create client with bufconn dialer + conn, err := grpc.NewClient( + testServerBufnet, + grpc.WithContextDialer(bufDialer(lis)), + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + t.Fatalf("Failed to create gRPC client: %v", err) + } + + client := &Client{ + StoreServiceClient: storev1.NewStoreServiceClient(conn), + RoutingServiceClient: routingv1.NewRoutingServiceClient(conn), + SearchServiceClient: searchv1.NewSearchServiceClient(conn), + SyncServiceClient: storev1.NewSyncServiceClient(conn), + SignServiceClient: signv1.NewSignServiceClient(conn), + EventServiceClient: eventsv1.NewEventServiceClient(conn), + config: &Config{ + ServerAddress: testServerBufnet, + }, + conn: conn, // This is what Issue 1 fixed + } + + // Verify connection is stored + if client.conn == nil { + t.Error("Expected conn to be stored in client, but it was nil") + } + + // Verify connection is the same instance + if client.conn != conn { + t.Error("Expected conn to match the created connection") + } + + // Clean up + if err := client.Close(); err != nil { + t.Errorf("Failed to close client: %v", err) + } + + // Wait for connection to be fully closed + _ = ctx + + time.Sleep(testCleanupWait) +} + +// TestClientClose_ClosesGRPCConnection tests that Close() properly closes the gRPC connection. +func TestClientClose_ClosesGRPCConnection(t *testing.T) { + ctx := context.Background() + + // Create test server + server, lis := createTestServer(t) + defer server.Stop() + + // Create client with bufconn dialer + conn, err := grpc.NewClient( + testServerBufnet, + grpc.WithContextDialer(bufDialer(lis)), + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + t.Fatalf("Failed to create gRPC client: %v", err) + } + + client := &Client{ + StoreServiceClient: storev1.NewStoreServiceClient(conn), + RoutingServiceClient: routingv1.NewRoutingServiceClient(conn), + SearchServiceClient: searchv1.NewSearchServiceClient(conn), + SyncServiceClient: storev1.NewSyncServiceClient(conn), + SignServiceClient: signv1.NewSignServiceClient(conn), + EventServiceClient: eventsv1.NewEventServiceClient(conn), + config: &Config{ + ServerAddress: testServerBufnet, + }, + conn: conn, + } + + // Verify connection is open (check state) + initialState := conn.GetState() + t.Logf("Initial connection state: %v", initialState) + + // Close the client + if err := client.Close(); err != nil { + t.Errorf("Close() returned error: %v", err) + } + + // Give the connection time to close + time.Sleep(testConnectionCloseWait) + + // Verify connection state changed (it should be shutting down or shut down) + finalState := conn.GetState() + t.Logf("Final connection state: %v", finalState) + + // The connection should no longer be in a ready or connecting state after close + // Note: This is a best-effort check as gRPC connection state transitions are async + _ = ctx + _ = finalState +} + +// TestClientClose_WithNilConnection tests that Close() handles nil connection gracefully. +func TestClientClose_WithNilConnection(t *testing.T) { + client := &Client{ + conn: nil, // No connection + } + + // Close should not panic or error with nil connection + if err := client.Close(); err != nil { + t.Errorf("Close() with nil connection returned error: %v", err) + } +} + +// TestClientClose_MultipleCalls tests that calling Close() multiple times doesn't panic. +func TestClientClose_MultipleCalls(t *testing.T) { + // Create test server + server, lis := createTestServer(t) + defer server.Stop() + + // Create client with bufconn dialer + conn, err := grpc.NewClient( + testServerBufnet, + grpc.WithContextDialer(bufDialer(lis)), + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + t.Fatalf("Failed to create gRPC client: %v", err) + } + + client := &Client{ + StoreServiceClient: storev1.NewStoreServiceClient(conn), + RoutingServiceClient: routingv1.NewRoutingServiceClient(conn), + SearchServiceClient: searchv1.NewSearchServiceClient(conn), + SyncServiceClient: storev1.NewSyncServiceClient(conn), + SignServiceClient: signv1.NewSignServiceClient(conn), + EventServiceClient: eventsv1.NewEventServiceClient(conn), + config: &Config{ + ServerAddress: testServerBufnet, + }, + conn: conn, + } + + // First close + if err := client.Close(); err != nil { + t.Errorf("First Close() returned error: %v", err) + } + + // Second close - should not panic, but may return error (closing already closed connection) + err = client.Close() + t.Logf("Second Close() returned: %v", err) + // We don't fail on error here because closing an already-closed connection may error +} + +// TestClientClose_AggregatesErrors tests that Close() properly aggregates multiple errors. +func TestClientClose_AggregatesErrors(t *testing.T) { + // Create a client with a connection that's already been closed externally + server, lis := createTestServer(t) + defer server.Stop() + + conn, err := grpc.NewClient( + testServerBufnet, + grpc.WithContextDialer(bufDialer(lis)), + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + t.Fatalf("Failed to create gRPC client: %v", err) + } + + // Close connection before client.Close() + if err := conn.Close(); err != nil { + t.Fatalf("Failed to close connection: %v", err) + } + + client := &Client{ + StoreServiceClient: storev1.NewStoreServiceClient(conn), + RoutingServiceClient: routingv1.NewRoutingServiceClient(conn), + SearchServiceClient: searchv1.NewSearchServiceClient(conn), + SyncServiceClient: storev1.NewSyncServiceClient(conn), + SignServiceClient: signv1.NewSignServiceClient(conn), + EventServiceClient: eventsv1.NewEventServiceClient(conn), + config: &Config{ + ServerAddress: testServerBufnet, + }, + conn: conn, + } + + // Close should handle the already-closed connection + err = client.Close() + // We may or may not get an error depending on gRPC's handling of double-close + t.Logf("Close() on already-closed connection returned: %v", err) +} + +// TestNew_WithInsecureConfig tests creating a client with insecure configuration. +func TestNew_WithInsecureConfig(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), testContextTimeout) + defer cancel() + + // Create test server + server, lis := createTestServer(t) + defer server.Stop() + + // Start a real TCP listener to test address resolution + lc := net.ListenConfig{} + + realLis, err := lc.Listen(ctx, "tcp", testServerLocalhost) + if err != nil { + t.Fatalf("Failed to create listener: %v", err) + } + defer realLis.Close() + + addr := realLis.Addr().String() + + // Start gRPC server on real listener + realServer := grpc.NewServer() + storev1.RegisterStoreServiceServer(realServer, &mockStoreService{}) + routingv1.RegisterRoutingServiceServer(realServer, &mockRoutingService{}) + searchv1.RegisterSearchServiceServer(realServer, &mockSearchService{}) + storev1.RegisterSyncServiceServer(realServer, &mockSyncService{}) + signv1.RegisterSignServiceServer(realServer, &mockSignService{}) + eventsv1.RegisterEventServiceServer(realServer, &mockEventService{}) + + go func() { + _ = realServer.Serve(realLis) + }() + + defer realServer.Stop() + + // Create client using New() with insecure config + client, err := New(ctx, WithConfig(&Config{ + ServerAddress: addr, + AuthMode: testServerInsecureMode, + SpiffeSocketPath: "", + })) + if err != nil { + t.Fatalf("Failed to create client: %v", err) + } + + defer func() { + if err := client.Close(); err != nil { + t.Errorf("Failed to close client: %v", err) + } + }() + + // Verify connection is stored + if client.conn == nil { + t.Error("Expected conn to be stored in client after New(), but it was nil") + } + + // Verify config is stored + if client.config == nil { + t.Error("Expected config to be stored in client") + } + + if client.config.ServerAddress != addr { + t.Errorf("Expected ServerAddress to be %q, got %q", addr, client.config.ServerAddress) + } + + // Use bufconn instead for testing + _ = lis +} + +// TestNew_WithMissingConfig tests that New() returns error when config is missing. +func TestNew_WithMissingConfig(t *testing.T) { + ctx := context.Background() + + // Try to create client without config + _, err := New(ctx) + if err == nil { + t.Error("Expected error when creating client without config, got nil") + } + + // Error should mention config is required + expectedMsg := "config is required" + if err != nil && err.Error() != expectedMsg { + t.Logf("Got error: %v", err) + // Don't fail, just log - the exact error message might vary + } +} + +// ============================================================================ +// Issue 2: Client Context Support Tests +// ============================================================================ + +// TestNew_AcceptsContext tests that New() accepts a context parameter. +func TestNew_AcceptsContext(t *testing.T) { + // Create context with timeout + ctx, cancel := context.WithTimeout(context.Background(), testContextTimeout) + defer cancel() + + // Start a real TCP listener to test address resolution + lc := net.ListenConfig{} + + realLis, err := lc.Listen(ctx, "tcp", testServerLocalhost) + if err != nil { + t.Fatalf("Failed to create listener: %v", err) + } + defer realLis.Close() + + addr := realLis.Addr().String() + + // Start gRPC server on real listener + realServer := grpc.NewServer() + storev1.RegisterStoreServiceServer(realServer, &mockStoreService{}) + routingv1.RegisterRoutingServiceServer(realServer, &mockRoutingService{}) + searchv1.RegisterSearchServiceServer(realServer, &mockSearchService{}) + storev1.RegisterSyncServiceServer(realServer, &mockSyncService{}) + signv1.RegisterSignServiceServer(realServer, &mockSignService{}) + eventsv1.RegisterEventServiceServer(realServer, &mockEventService{}) + + go func() { + _ = realServer.Serve(realLis) + }() + + defer realServer.Stop() + + // New() should accept the context + client, err := New(ctx, WithConfig(&Config{ + ServerAddress: addr, + AuthMode: testServerInsecureMode, + SpiffeSocketPath: "", + })) + if err != nil { + t.Fatalf("New() with context failed: %v", err) + } + + defer func() { + if err := client.Close(); err != nil { + t.Errorf("Failed to close client: %v", err) + } + }() + + // Verify client was created successfully + if client == nil { + t.Error("Expected client to be created, got nil") + } +} + +// TestNew_WithCancelledContext tests that New() handles cancelled context appropriately. +func TestNew_WithCancelledContext(t *testing.T) { + // Create already-cancelled context + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + // New() should handle cancelled context + // Note: This may or may not fail depending on how quickly gRPC detects cancellation + _, err := New(ctx, WithConfig(&Config{ + ServerAddress: testServerUnreachable, + AuthMode: testServerInsecureMode, + SpiffeSocketPath: "", + })) + + // We don't strictly require an error here because gRPC client creation is lazy + // But if there is an error, log it + if err != nil { + t.Logf("New() with cancelled context returned error (expected): %v", err) + } else { + t.Logf("New() with cancelled context succeeded (gRPC lazy connection)") + } +} + +// TestNew_WithTimeoutContext tests that New() respects context timeout. +func TestNew_WithTimeoutContext(t *testing.T) { + // Create context with very short timeout + ctx, cancel := context.WithTimeout(context.Background(), testContextVeryShort) + defer cancel() + + // Try to create client - may succeed because gRPC is lazy + client, err := New(ctx, WithConfig(&Config{ + ServerAddress: testServerUnreachable, + AuthMode: testServerInsecureMode, + SpiffeSocketPath: "", + })) + if err != nil { + t.Logf("New() with timeout context returned error: %v", err) + } else if client != nil { + t.Logf("New() with timeout context succeeded (gRPC lazy connection)") + + _ = client.Close() + } +} + +// TestNew_ContextUsedInAuth tests that the context is actually passed to auth setup. +func TestNew_ContextUsedInAuth(t *testing.T) { + // This test verifies that the context parameter is actually used + // by checking that withAuth() receives the correct context + + // Create a context with a specific value + type contextKey string + + const ( + testKey contextKey = "test-key" + testValue contextKey = "test-value" + ) + + ctx := context.WithValue(context.Background(), testKey, testValue) + + // Start a real TCP listener + lc := net.ListenConfig{} + + realLis, err := lc.Listen(ctx, "tcp", testServerLocalhost) + if err != nil { + t.Fatalf("Failed to create listener: %v", err) + } + defer realLis.Close() + + addr := realLis.Addr().String() + + // Start gRPC server + realServer := grpc.NewServer() + storev1.RegisterStoreServiceServer(realServer, &mockStoreService{}) + routingv1.RegisterRoutingServiceServer(realServer, &mockRoutingService{}) + searchv1.RegisterSearchServiceServer(realServer, &mockSearchService{}) + storev1.RegisterSyncServiceServer(realServer, &mockSyncService{}) + signv1.RegisterSignServiceServer(realServer, &mockSignService{}) + eventsv1.RegisterEventServiceServer(realServer, &mockEventService{}) + + go func() { + _ = realServer.Serve(realLis) + }() + + defer realServer.Stop() + + // Create client with context containing value + client, err := New(ctx, WithConfig(&Config{ + ServerAddress: addr, + AuthMode: testServerInsecureMode, + SpiffeSocketPath: "", + })) + if err != nil { + t.Fatalf("New() failed: %v", err) + } + + defer func() { + if err := client.Close(); err != nil { + t.Errorf("Failed to close client: %v", err) + } + }() + + // If we got here, the context was accepted + // (We can't easily verify it was used internally without modifying the code) + if client == nil { + t.Error("Expected client to be created") + } +} + +// TestNew_MultipleClientsWithDifferentContexts tests creating multiple clients with different contexts. +func TestNew_MultipleClientsWithDifferentContexts(t *testing.T) { + // Start a real TCP listener + lc := net.ListenConfig{} + + realLis, err := lc.Listen(context.Background(), "tcp", testServerLocalhost) + if err != nil { + t.Fatalf("Failed to create listener: %v", err) + } + defer realLis.Close() + + addr := realLis.Addr().String() + + // Start gRPC server + realServer := grpc.NewServer() + storev1.RegisterStoreServiceServer(realServer, &mockStoreService{}) + routingv1.RegisterRoutingServiceServer(realServer, &mockRoutingService{}) + searchv1.RegisterSearchServiceServer(realServer, &mockSearchService{}) + storev1.RegisterSyncServiceServer(realServer, &mockSyncService{}) + signv1.RegisterSignServiceServer(realServer, &mockSignService{}) + eventsv1.RegisterEventServiceServer(realServer, &mockEventService{}) + + go func() { + _ = realServer.Serve(realLis) + }() + + defer realServer.Stop() + + config := &Config{ + ServerAddress: addr, + AuthMode: testServerInsecureMode, + SpiffeSocketPath: "", + } + + // Create first client with one context + ctx1, cancel1 := context.WithTimeout(context.Background(), testContextTimeout) + defer cancel1() + + client1, err := New(ctx1, WithConfig(config)) + if err != nil { + t.Fatalf("Failed to create first client: %v", err) + } + + defer func() { + if err := client1.Close(); err != nil { + t.Errorf("Failed to close first client: %v", err) + } + }() + + // Create second client with different context + ctx2, cancel2 := context.WithTimeout(context.Background(), testContextTimeout) + defer cancel2() + + client2, err := New(ctx2, WithConfig(config)) + if err != nil { + t.Fatalf("Failed to create second client: %v", err) + } + + defer func() { + if err := client2.Close(); err != nil { + t.Errorf("Failed to close second client: %v", err) + } + }() + + // Both clients should be independent + if client1 == nil || client2 == nil { + t.Error("Expected both clients to be created") + } + + if client1 == client2 { + t.Error("Expected clients to be different instances") + } +} + +// TestNew_BackgroundContext tests that New() works with background context. +func TestNew_BackgroundContext(t *testing.T) { + // Start a real TCP listener + lc := net.ListenConfig{} + + realLis, err := lc.Listen(context.Background(), "tcp", testServerLocalhost) + if err != nil { + t.Fatalf("Failed to create listener: %v", err) + } + defer realLis.Close() + + addr := realLis.Addr().String() + + // Start gRPC server + realServer := grpc.NewServer() + storev1.RegisterStoreServiceServer(realServer, &mockStoreService{}) + routingv1.RegisterRoutingServiceServer(realServer, &mockRoutingService{}) + searchv1.RegisterSearchServiceServer(realServer, &mockSearchService{}) + storev1.RegisterSyncServiceServer(realServer, &mockSyncService{}) + signv1.RegisterSignServiceServer(realServer, &mockSignService{}) + eventsv1.RegisterEventServiceServer(realServer, &mockEventService{}) + + go func() { + _ = realServer.Serve(realLis) + }() + + defer realServer.Stop() + + // Create client with background context + client, err := New(context.Background(), WithConfig(&Config{ + ServerAddress: addr, + AuthMode: testServerInsecureMode, + SpiffeSocketPath: "", + })) + if err != nil { + t.Fatalf("New() with background context failed: %v", err) + } + + defer func() { + if err := client.Close(); err != nil { + t.Errorf("Failed to close client: %v", err) + } + }() + + if client == nil { + t.Error("Expected client to be created with background context") + } +} + +// TestClientClose_WithAllNilResources tests Close() with no resources to clean up. +func TestClientClose_WithAllNilResources(t *testing.T) { + client := &Client{ + conn: nil, + authClient: nil, + bundleSrc: nil, + x509Src: nil, + jwtSource: nil, + } + + // Should succeed without any errors + err := client.Close() + if err != nil { + t.Errorf("Close() with all nil resources returned error: %v", err) + } +} + +// TestClientClose_ErrorOrdering tests that Close() handles errors in correct order. +func TestClientClose_ErrorOrdering(t *testing.T) { + // Create test server + server, lis := createTestServer(t) + defer server.Stop() + + // Create client with connection + conn, err := grpc.NewClient( + testServerBufnet, + grpc.WithContextDialer(bufDialer(lis)), + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + t.Fatalf("Failed to create gRPC client: %v", err) + } + + client := &Client{ + StoreServiceClient: storev1.NewStoreServiceClient(conn), + RoutingServiceClient: routingv1.NewRoutingServiceClient(conn), + SearchServiceClient: searchv1.NewSearchServiceClient(conn), + SyncServiceClient: storev1.NewSyncServiceClient(conn), + SignServiceClient: signv1.NewSignServiceClient(conn), + EventServiceClient: eventsv1.NewEventServiceClient(conn), + conn: conn, + // Other resources are nil + } + + // Close should succeed + err = client.Close() + if err != nil { + t.Logf("Close() returned error: %v", err) + } +} + +// TestClientClose_PartialResources tests Close() with some resources present. +func TestClientClose_PartialResources(t *testing.T) { + // Create test server + server, lis := createTestServer(t) + defer server.Stop() + + // Create client with only connection (no SPIFFE resources) + conn, err := grpc.NewClient( + testServerBufnet, + grpc.WithContextDialer(bufDialer(lis)), + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + t.Fatalf("Failed to create gRPC client: %v", err) + } + + client := &Client{ + conn: conn, + authClient: nil, // No auth client + bundleSrc: nil, // No bundle source + x509Src: nil, // No x509 source + jwtSource: nil, // No JWT source + } + + // Close should handle partial resources gracefully + err = client.Close() + if err != nil { + t.Errorf("Close() with partial resources returned error: %v", err) + } +} + +// TestNew_OptionError tests that New() returns error when option fails. +func TestNew_OptionError(t *testing.T) { + ctx := context.Background() + + // Create an option that returns an error + testErr := errors.New("test option error") + errorOpt := func(opts *options) error { + return testErr + } + + // New() should fail with option error + _, err := New(ctx, errorOpt) + if err == nil { + t.Error("Expected error when option fails, got nil") + } +} + +// TestNew_GRPCClientCreationError tests error handling during gRPC client creation. +func TestNew_GRPCClientCreationError(t *testing.T) { + ctx := context.Background() + + // Use invalid address that will cause grpc.NewClient to fail + // Note: grpc.NewClient is lazy, so this might not fail immediately + _, err := New(ctx, WithConfig(&Config{ + ServerAddress: "", // Empty address + AuthMode: testServerInsecureMode, + SpiffeSocketPath: "", + })) + // This may or may not fail depending on gRPC's validation + if err != nil { + t.Logf("New() with empty address returned error (expected): %v", err) + } +} diff --git a/client/config.go b/client/config.go index db185e045..47aa47a36 100644 --- a/client/config.go +++ b/client/config.go @@ -1,87 +1,87 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package client - -import ( - "fmt" - "strings" - - "github.com/mitchellh/mapstructure" - "github.com/spf13/viper" -) - -const ( - DefaultEnvPrefix = "DIRECTORY_CLIENT" - - DefaultServerAddress = "0.0.0.0:8888" - DefaultTlsSkipVerify = false -) - -var DefaultConfig = Config{ - ServerAddress: DefaultServerAddress, -} - -type Config struct { - ServerAddress string `json:"server_address,omitempty" mapstructure:"server_address"` - TlsSkipVerify bool `json:"tls_skip_verify,omitempty" mapstructure:"tls_skip_verify"` - TlsCertFile string `json:"tls_cert_file,omitempty" mapstructure:"tls_cert_file"` - TlsKeyFile string `json:"tls_key_file,omitempty" mapstructure:"tls_key_file"` - TlsCAFile string `json:"tls_ca_file,omitempty" mapstructure:"tls_ca_file"` - SpiffeSocketPath string `json:"spiffe_socket_path,omitempty" mapstructure:"spiffe_socket_path"` - SpiffeToken string `json:"spiffe_token,omitempty" mapstructure:"spiffe_token"` - AuthMode string `json:"auth_mode,omitempty" mapstructure:"auth_mode"` - JWTAudience string `json:"jwt_audience,omitempty" mapstructure:"jwt_audience"` -} - -func LoadConfig() (*Config, error) { - v := viper.NewWithOptions( - viper.KeyDelimiter("."), - viper.EnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")), - ) - - v.SetEnvPrefix(DefaultEnvPrefix) - v.AllowEmptyEnv(true) - v.AutomaticEnv() - - _ = v.BindEnv("server_address") - v.SetDefault("server_address", DefaultServerAddress) - - _ = v.BindEnv("tls_skip_verify") - v.SetDefault("tls_skip_verify", DefaultTlsSkipVerify) - - _ = v.BindEnv("spiffe_socket_path") - v.SetDefault("spiffe_socket_path", "") - - _ = v.BindEnv("spiffe_token") - v.SetDefault("spiffe_token", "") - - _ = v.BindEnv("auth_mode") - v.SetDefault("auth_mode", "") - - _ = v.BindEnv("jwt_audience") - v.SetDefault("jwt_audience", "") - - _ = v.BindEnv("tls_cert_file") - v.SetDefault("tls_cert_file", "") - - _ = v.BindEnv("tls_key_file") - v.SetDefault("tls_key_file", "") - - _ = v.BindEnv("tls_ca_file") - v.SetDefault("tls_ca_file", "") - - // Load configuration into struct - decodeHooks := mapstructure.ComposeDecodeHookFunc( - mapstructure.TextUnmarshallerHookFunc(), - mapstructure.StringToTimeDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - ) - - config := &Config{} - if err := v.Unmarshal(config, viper.DecodeHook(decodeHooks)); err != nil { - return nil, fmt.Errorf("failed to load configuration: %w", err) - } - - return config, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "fmt" + "strings" + + "github.com/mitchellh/mapstructure" + "github.com/spf13/viper" +) + +const ( + DefaultEnvPrefix = "DIRECTORY_CLIENT" + + DefaultServerAddress = "0.0.0.0:8888" + DefaultTlsSkipVerify = false +) + +var DefaultConfig = Config{ + ServerAddress: DefaultServerAddress, +} + +type Config struct { + ServerAddress string `json:"server_address,omitempty" mapstructure:"server_address"` + TlsSkipVerify bool `json:"tls_skip_verify,omitempty" mapstructure:"tls_skip_verify"` + TlsCertFile string `json:"tls_cert_file,omitempty" mapstructure:"tls_cert_file"` + TlsKeyFile string `json:"tls_key_file,omitempty" mapstructure:"tls_key_file"` + TlsCAFile string `json:"tls_ca_file,omitempty" mapstructure:"tls_ca_file"` + SpiffeSocketPath string `json:"spiffe_socket_path,omitempty" mapstructure:"spiffe_socket_path"` + SpiffeToken string `json:"spiffe_token,omitempty" mapstructure:"spiffe_token"` + AuthMode string `json:"auth_mode,omitempty" mapstructure:"auth_mode"` + JWTAudience string `json:"jwt_audience,omitempty" mapstructure:"jwt_audience"` +} + +func LoadConfig() (*Config, error) { + v := viper.NewWithOptions( + viper.KeyDelimiter("."), + viper.EnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")), + ) + + v.SetEnvPrefix(DefaultEnvPrefix) + v.AllowEmptyEnv(true) + v.AutomaticEnv() + + _ = v.BindEnv("server_address") + v.SetDefault("server_address", DefaultServerAddress) + + _ = v.BindEnv("tls_skip_verify") + v.SetDefault("tls_skip_verify", DefaultTlsSkipVerify) + + _ = v.BindEnv("spiffe_socket_path") + v.SetDefault("spiffe_socket_path", "") + + _ = v.BindEnv("spiffe_token") + v.SetDefault("spiffe_token", "") + + _ = v.BindEnv("auth_mode") + v.SetDefault("auth_mode", "") + + _ = v.BindEnv("jwt_audience") + v.SetDefault("jwt_audience", "") + + _ = v.BindEnv("tls_cert_file") + v.SetDefault("tls_cert_file", "") + + _ = v.BindEnv("tls_key_file") + v.SetDefault("tls_key_file", "") + + _ = v.BindEnv("tls_ca_file") + v.SetDefault("tls_ca_file", "") + + // Load configuration into struct + decodeHooks := mapstructure.ComposeDecodeHookFunc( + mapstructure.TextUnmarshallerHookFunc(), + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + ) + + config := &Config{} + if err := v.Unmarshal(config, viper.DecodeHook(decodeHooks)); err != nil { + return nil, fmt.Errorf("failed to load configuration: %w", err) + } + + return config, nil +} diff --git a/client/events.go b/client/events.go index b7e4d12cd..4fe798daf 100644 --- a/client/events.go +++ b/client/events.go @@ -1,66 +1,66 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package client - -import ( - "context" - "fmt" - - eventsv1 "github.com/agntcy/dir/api/events/v1" - "github.com/agntcy/dir/client/streaming" -) - -// ListenStream streams events from the server with the specified filters. -// -// Returns a StreamResult that provides structured channels for receiving events, -// errors, and completion signals. -// -// Example - Listen to all events: -// -// result, err := client.ListenStream(ctx, &eventsv1.ListenRequest{}) -// if err != nil { -// return err -// } -// -// for { -// select { -// case resp := <-result.ResCh(): -// event := resp.GetEvent() -// fmt.Printf("Event: %s - %s\n", event.Type, event.ResourceId) -// case err := <-result.ErrCh(): -// return fmt.Errorf("stream error: %w", err) -// case <-result.DoneCh(): -// return nil -// case <-ctx.Done(): -// return ctx.Err() -// } -// } -// -// Example - Filter by event type: -// -// result, err := client.ListenStream(ctx, &eventsv1.ListenRequest{ -// EventTypes: []eventsv1.EventType{ -// eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, -// eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, -// }, -// }) -// -// Example - Filter by labels: -// -// result, err := client.ListenStream(ctx, &eventsv1.ListenRequest{ -// LabelFilters: []string{"/skills/AI"}, -// }) -func (c *Client) ListenStream(ctx context.Context, req *eventsv1.ListenRequest) (streaming.StreamResult[eventsv1.ListenResponse], error) { - stream, err := c.Listen(ctx, req) - if err != nil { - return nil, fmt.Errorf("failed to create event stream: %w", err) - } - - result, err := streaming.ProcessServerStream(ctx, stream) - if err != nil { - return nil, fmt.Errorf("failed to process event stream: %w", err) - } - - return result, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "context" + "fmt" + + eventsv1 "github.com/agntcy/dir/api/events/v1" + "github.com/agntcy/dir/client/streaming" +) + +// ListenStream streams events from the server with the specified filters. +// +// Returns a StreamResult that provides structured channels for receiving events, +// errors, and completion signals. +// +// Example - Listen to all events: +// +// result, err := client.ListenStream(ctx, &eventsv1.ListenRequest{}) +// if err != nil { +// return err +// } +// +// for { +// select { +// case resp := <-result.ResCh(): +// event := resp.GetEvent() +// fmt.Printf("Event: %s - %s\n", event.Type, event.ResourceId) +// case err := <-result.ErrCh(): +// return fmt.Errorf("stream error: %w", err) +// case <-result.DoneCh(): +// return nil +// case <-ctx.Done(): +// return ctx.Err() +// } +// } +// +// Example - Filter by event type: +// +// result, err := client.ListenStream(ctx, &eventsv1.ListenRequest{ +// EventTypes: []eventsv1.EventType{ +// eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, +// eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, +// }, +// }) +// +// Example - Filter by labels: +// +// result, err := client.ListenStream(ctx, &eventsv1.ListenRequest{ +// LabelFilters: []string{"/skills/AI"}, +// }) +func (c *Client) ListenStream(ctx context.Context, req *eventsv1.ListenRequest) (streaming.StreamResult[eventsv1.ListenResponse], error) { + stream, err := c.Listen(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to create event stream: %w", err) + } + + result, err := streaming.ProcessServerStream(ctx, stream) + if err != nil { + return nil, fmt.Errorf("failed to process event stream: %w", err) + } + + return result, nil +} diff --git a/client/go.mod b/client/go.mod index e87ae02a2..727e42e86 100644 --- a/client/go.mod +++ b/client/go.mod @@ -1,181 +1,181 @@ -module github.com/agntcy/dir/client - -go 1.25.2 - -replace ( - github.com/agntcy/dir/api => ../api - github.com/agntcy/dir/utils => ../utils -) - -require ( - github.com/agntcy/dir/api v0.6.0 - github.com/agntcy/dir/utils v0.6.0 - github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c - github.com/sigstore/cosign/v3 v3.0.3 - github.com/spf13/viper v1.21.0 - github.com/spiffe/go-spiffe/v2 v2.6.0 - github.com/stretchr/testify v1.11.1 - google.golang.org/grpc v1.77.0 - google.golang.org/protobuf v1.36.10 -) - -require ( - github.com/go-chi/chi/v5 v5.2.3 // indirect - github.com/go-openapi/swag/cmdutils v0.25.4 // indirect - github.com/go-openapi/swag/conv v0.25.4 // indirect - github.com/go-openapi/swag/fileutils v0.25.4 // indirect - github.com/go-openapi/swag/jsonname v0.25.4 // indirect - github.com/go-openapi/swag/jsonutils v0.25.4 // indirect - github.com/go-openapi/swag/loading v0.25.4 // indirect - github.com/go-openapi/swag/mangling v0.25.4 // indirect - github.com/go-openapi/swag/netutils v0.25.4 // indirect - github.com/go-openapi/swag/stringutils v0.25.4 // indirect - github.com/go-openapi/swag/typeutils v0.25.4 // indirect - github.com/go-openapi/swag/yamlutils v0.25.4 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - go.yaml.in/yaml/v2 v2.4.3 // indirect - go.yaml.in/yaml/v3 v3.0.4 // indirect -) - -require ( - buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 // indirect - buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 // indirect - github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect - github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/ThalesIgnite/crypto11 v1.2.5 // indirect - github.com/agntcy/oasf-sdk/pkg v0.0.14 // indirect - github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/blang/semver v3.5.1+incompatible // indirect - github.com/cenkalti/backoff/v5 v5.0.3 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect - github.com/coreos/go-oidc/v3 v3.17.0 // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect - github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect - github.com/docker/cli v29.0.3+incompatible // indirect - github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker-credential-helpers v0.9.4 // indirect - github.com/dustin/go-humanize v1.0.1 // indirect - github.com/emicklei/go-restful/v3 v3.12.2 // indirect - github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/fxamacker/cbor/v2 v2.9.0 // indirect - github.com/go-jose/go-jose/v4 v4.1.3 // indirect - github.com/go-logr/logr v1.4.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.24.1 // indirect - github.com/go-openapi/errors v0.22.4 // indirect - github.com/go-openapi/jsonpointer v0.22.1 // indirect - github.com/go-openapi/jsonreference v0.21.3 // indirect - github.com/go-openapi/loads v0.23.2 // indirect - github.com/go-openapi/runtime v0.29.2 // indirect - github.com/go-openapi/spec v0.22.1 // indirect - github.com/go-openapi/strfmt v0.25.0 // indirect - github.com/go-openapi/swag v0.25.4 // indirect - github.com/go-openapi/validate v0.25.1 // indirect - github.com/go-viper/mapstructure/v2 v2.4.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/certificate-transparency-go v1.3.2 // indirect - github.com/google/gnostic-models v0.7.0 // indirect - github.com/google/go-containerregistry v0.20.7 // indirect - github.com/google/go-github/v73 v73.0.0 // indirect - github.com/google/go-querystring v1.1.0 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect - github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-retryablehttp v0.7.8 // indirect - github.com/in-toto/attestation v1.1.2 // indirect - github.com/in-toto/in-toto-golang v0.9.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/ipfs/go-cid v0.5.0 // indirect - github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.18.1 // indirect - github.com/klauspost/cpuid/v2 v2.2.10 // indirect - github.com/letsencrypt/boulder v0.20251110.0 // indirect - github.com/miekg/pkcs11 v1.1.1 // indirect - github.com/minio/sha256-simd v1.0.1 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/moby/term v0.5.2 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect - github.com/mr-tron/base58 v1.2.0 // indirect - github.com/multiformats/go-base32 v0.1.0 // indirect - github.com/multiformats/go-base36 v0.2.0 // indirect - github.com/multiformats/go-multibase v0.2.0 // indirect - github.com/multiformats/go-multihash v0.2.3 // indirect - github.com/multiformats/go-varint v0.0.7 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect - github.com/oklog/ulid v1.3.1 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.1 // indirect - github.com/pelletier/go-toml/v2 v2.2.4 // indirect - github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/sagikazarmark/locafero v0.11.0 // indirect - github.com/sassoftware/relic v7.2.1+incompatible // indirect - github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect - github.com/shibumi/go-pathspec v1.3.0 // indirect - github.com/sigstore/protobuf-specs v0.5.0 // indirect - github.com/sigstore/rekor v1.4.3 // indirect - github.com/sigstore/rekor-tiles/v2 v2.0.1 // indirect - github.com/sigstore/sigstore v1.10.0 // indirect - github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 // indirect - github.com/sigstore/timestamp-authority/v2 v2.0.3 // indirect - github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect - github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect - github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.15.0 // indirect - github.com/spf13/cast v1.10.0 // indirect - github.com/spf13/cobra v1.10.2 // indirect - github.com/spf13/pflag v1.0.10 // indirect - github.com/subosito/gotenv v1.6.0 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect - github.com/thales-e-security/pool v0.0.2 // indirect - github.com/theupdateframework/go-tuf v0.7.0 // indirect - github.com/theupdateframework/go-tuf/v2 v2.3.0 // indirect - github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect - github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect - github.com/transparency-dev/merkle v0.0.2 // indirect - github.com/vbatts/tar-split v0.12.2 // indirect - github.com/x448/float16 v0.8.4 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xeipuuv/gojsonschema v1.2.0 // indirect - gitlab.com/gitlab-org/api/client-go v0.160.0 // indirect - go.mongodb.org/mongo-driver v1.17.6 // indirect - go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/otel v1.38.0 // indirect - go.opentelemetry.io/otel/metric v1.38.0 // indirect - go.opentelemetry.io/otel/trace v1.38.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.1 // indirect - golang.org/x/crypto v0.45.0 // indirect - golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect - golang.org/x/mod v0.30.0 // indirect - golang.org/x/net v0.47.0 // indirect - golang.org/x/oauth2 v0.33.0 // indirect - golang.org/x/sync v0.18.0 // indirect - golang.org/x/sys v0.38.0 // indirect - golang.org/x/term v0.37.0 // indirect - golang.org/x/text v0.31.0 // indirect - golang.org/x/time v0.14.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.34.2 // indirect - k8s.io/apimachinery v0.34.2 // indirect - k8s.io/client-go v0.34.2 // indirect - k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect - k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d // indirect - lukechampine.com/blake3 v1.4.0 // indirect - sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect - sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect - sigs.k8s.io/yaml v1.6.0 // indirect -) +module github.com/agntcy/dir/client + +go 1.25.2 + +replace ( + github.com/agntcy/dir/api => ../api + github.com/agntcy/dir/utils => ../utils +) + +require ( + github.com/agntcy/dir/api v0.6.0 + github.com/agntcy/dir/utils v0.6.0 + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c + github.com/sigstore/cosign/v3 v3.0.3 + github.com/spf13/viper v1.21.0 + github.com/spiffe/go-spiffe/v2 v2.6.0 + github.com/stretchr/testify v1.11.1 + google.golang.org/grpc v1.77.0 + google.golang.org/protobuf v1.36.10 +) + +require ( + github.com/go-chi/chi/v5 v5.2.3 // indirect + github.com/go-openapi/swag/cmdutils v0.25.4 // indirect + github.com/go-openapi/swag/conv v0.25.4 // indirect + github.com/go-openapi/swag/fileutils v0.25.4 // indirect + github.com/go-openapi/swag/jsonname v0.25.4 // indirect + github.com/go-openapi/swag/jsonutils v0.25.4 // indirect + github.com/go-openapi/swag/loading v0.25.4 // indirect + github.com/go-openapi/swag/mangling v0.25.4 // indirect + github.com/go-openapi/swag/netutils v0.25.4 // indirect + github.com/go-openapi/swag/stringutils v0.25.4 // indirect + github.com/go-openapi/swag/typeutils v0.25.4 // indirect + github.com/go-openapi/swag/yamlutils v0.25.4 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect +) + +require ( + buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 // indirect + buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/ThalesIgnite/crypto11 v1.2.5 // indirect + github.com/agntcy/oasf-sdk/pkg v0.0.14 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/blang/semver v3.5.1+incompatible // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect + github.com/coreos/go-oidc/v3 v3.17.0 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect + github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect + github.com/docker/cli v29.0.3+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.4 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.3 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/analysis v0.24.1 // indirect + github.com/go-openapi/errors v0.22.4 // indirect + github.com/go-openapi/jsonpointer v0.22.1 // indirect + github.com/go-openapi/jsonreference v0.21.3 // indirect + github.com/go-openapi/loads v0.23.2 // indirect + github.com/go-openapi/runtime v0.29.2 // indirect + github.com/go-openapi/spec v0.22.1 // indirect + github.com/go-openapi/strfmt v0.25.0 // indirect + github.com/go-openapi/swag v0.25.4 // indirect + github.com/go-openapi/validate v0.25.1 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/certificate-transparency-go v1.3.2 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/go-containerregistry v0.20.7 // indirect + github.com/google/go-github/v73 v73.0.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-retryablehttp v0.7.8 // indirect + github.com/in-toto/attestation v1.1.2 // indirect + github.com/in-toto/in-toto-golang v0.9.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/ipfs/go-cid v0.5.0 // indirect + github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.18.1 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect + github.com/letsencrypt/boulder v0.20251110.0 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/moby/term v0.5.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multihash v0.2.3 // indirect + github.com/multiformats/go-varint v0.0.7 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect + github.com/sassoftware/relic v7.2.1+incompatible // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect + github.com/shibumi/go-pathspec v1.3.0 // indirect + github.com/sigstore/protobuf-specs v0.5.0 // indirect + github.com/sigstore/rekor v1.4.3 // indirect + github.com/sigstore/rekor-tiles/v2 v2.0.1 // indirect + github.com/sigstore/sigstore v1.10.0 // indirect + github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 // indirect + github.com/sigstore/timestamp-authority/v2 v2.0.3 // indirect + github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/cobra v1.10.2 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/thales-e-security/pool v0.0.2 // indirect + github.com/theupdateframework/go-tuf v0.7.0 // indirect + github.com/theupdateframework/go-tuf/v2 v2.3.0 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect + github.com/transparency-dev/merkle v0.0.2 // indirect + github.com/vbatts/tar-split v0.12.2 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + gitlab.com/gitlab-org/api/client-go v0.160.0 // indirect + go.mongodb.org/mongo-driver v1.17.6 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.1 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect + golang.org/x/mod v0.30.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.33.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.14.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.34.2 // indirect + k8s.io/apimachinery v0.34.2 // indirect + k8s.io/client-go v0.34.2 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d // indirect + lukechampine.com/blake3 v1.4.0 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect +) diff --git a/client/go.sum b/client/go.sum index a3712a9e1..efcd7dfba 100644 --- a/client/go.sum +++ b/client/go.sum @@ -1,699 +1,699 @@ -al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= -al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= -buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 h1:THc6uLCGTpU393vVD5Eu5JHUdikvaP1+dqAclQe8pOE= -buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1/go.mod h1:xkbAJMbZuuebIblSFnLrfTpvmfjarhKsIid+Q9snDQ0= -buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 h1:ZObM/Cdu5dZO4ibBXNRSy+rFwG4oV86mYfKbI0Z7AAI= -buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1/go.mod h1:yJHswa2p3J+WxGLpgzuWNWn3I1CIkxdOu80Y/vN5lbE= -cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= -cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= -cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= -cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= -cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= -cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= -cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= -cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= -cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= -cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= -cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= -cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= -cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= -cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= -github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= -github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0 h1:E4MgwLBGeVB5f2MdcIVD3ELVAWpr+WD6MUe1i+tM/PA= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0/go.mod h1:Y2b/1clN4zsAoUd/pgNAQHjLDnTis/6ROkUfyob6psM= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA= -github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= -github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= -github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= -github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= -github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= -github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= -github.com/agntcy/oasf-sdk/pkg v0.0.14 h1:DNKQNf4R4SMDbnaawoSl6FVOBvkSy4O9MyqKd7iHE8I= -github.com/agntcy/oasf-sdk/pkg v0.0.14/go.mod h1:FvcEB49gsvK+JO5i6l/pt5QgTK0LZeR7KYKsdcI6ZIM= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= -github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= -github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc= -github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE= -github.com/aws/aws-sdk-go-v2/config v1.32.2 h1:4liUsdEpUUPZs5WVapsJLx5NPmQhQdez7nYFcovrytk= -github.com/aws/aws-sdk-go-v2/config v1.32.2/go.mod h1:l0hs06IFz1eCT+jTacU/qZtC33nvcnLADAPL/XyrkZI= -github.com/aws/aws-sdk-go-v2/credentials v1.19.2 h1:qZry8VUyTK4VIo5aEdUcBjPZHL2v4FyQ3QEOaWcFLu4= -github.com/aws/aws-sdk-go-v2/credentials v1.19.2/go.mod h1:YUqm5a1/kBnoK+/NY5WEiMocZihKSo15/tJdmdXnM5g= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 h1:WZVR5DbDgxzA0BJeudId89Kmgy6DIU4ORpxwsVHz0qA= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14/go.mod h1:Dadl9QO0kHgbrH1GRqGiZdYtW5w+IXXaBNCHTIaheM4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 h1:FIouAnCE46kyYqyhs0XEBDFFSREtdnr8HQuLPQPLCrY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14/go.mod h1:UTwDc5COa5+guonQU8qBikJo1ZJ4ln2r1MkF7Dqag1E= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.1 h1:U0asSZ3ifpuIehDPkRI2rxHbmFUMplDA2VeR9Uogrmw= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.1/go.mod h1:NZo9WJqQ0sxQ1Yqu1IwCHQFQunTms2MlVgejg16S1rY= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 h1:MxMBdKTYBjPQChlJhi4qlEueqB1p1KcbTEa7tD5aqPs= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.2/go.mod h1:iS6EPmNeqCsGo+xQmXv0jIMjyYtQfnwg36zl2FwEouk= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 h1:ksUT5KtgpZd3SAiFJNJ0AFEJVva3gjBmN7eXUZjzUwQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.5/go.mod h1:av+ArJpoYf3pgyrj6tcehSFW+y9/QvAY8kMooR9bZCw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 h1:GtsxyiF3Nd3JahRBJbxLCCdYW9ltGQYrFWg8XdkGDd8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10/go.mod h1:/j67Z5XBVDx8nZVp9EuFM9/BS5dvBznbqILGuu73hug= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 h1:a5UTtD4mHBU3t0o6aHQZFJTNKVfxFWfPX7J0Lr7G+uY= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.2/go.mod h1:6TxbXoDSgBQ225Qd8Q+MbxUxUh6TtNKwbRt/EPS9xso= -github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= -github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= -github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= -github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= -github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= -github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= -github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= -github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= -github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= -github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/danieljoos/wincred v1.2.3 h1:v7dZC2x32Ut3nEfRH+vhoZGvN72+dQ/snVXo/vMFLdQ= -github.com/danieljoos/wincred v1.2.3/go.mod h1:6qqX0WNrS4RzPZ1tnroDzq9kY3fu1KwE7MRLQK4X0bs= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= -github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= -github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= -github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= -github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= -github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= -github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= -github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= -github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= -github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= -github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= -github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= -github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= -github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= -github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= -github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= -github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rBOesYM= -github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84= -github.com/go-openapi/errors v0.22.4 h1:oi2K9mHTOb5DPW2Zjdzs/NIvwi2N3fARKaTJLdNabaM= -github.com/go-openapi/errors v0.22.4/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= -github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= -github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= -github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc= -github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4= -github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4= -github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY= -github.com/go-openapi/runtime v0.29.2 h1:UmwSGWNmWQqKm1c2MGgXVpC2FTGwPDQeUsBMufc5Yj0= -github.com/go-openapi/runtime v0.29.2/go.mod h1:biq5kJXRJKBJxTDJXAa00DOTa/anflQPhT0/wmjuy+0= -github.com/go-openapi/spec v0.22.1 h1:beZMa5AVQzRspNjvhe5aG1/XyBSMeX1eEOs7dMoXh/k= -github.com/go-openapi/spec v0.22.1/go.mod h1:c7aeIQT175dVowfp7FeCvXXnjN/MrpaONStibD2WtDA= -github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= -github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= -github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= -github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= -github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= -github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= -github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= -github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= -github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= -github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= -github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= -github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= -github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= -github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= -github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= -github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= -github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= -github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= -github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= -github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= -github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= -github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= -github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= -github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= -github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= -github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= -github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= -github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= -github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw= -github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc= -github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= -github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= -github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= -github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= -github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= -github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= -github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= -github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= -github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= -github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= -github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= -github.com/google/go-github/v73 v73.0.0 h1:aR+Utnh+Y4mMkS+2qLQwcQ/cF9mOTpdwnzlaw//rG24= -github.com/google/go-github/v73 v73.0.0/go.mod h1:fa6w8+/V+edSU0muqdhCVY7Beh1M8F1IlQPZIANKIYw= -github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= -github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e h1:FJta/0WsADCe1r9vQjdHbd3KuiLPu7Y9WlyLGwMUNyE= -github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= -github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= -github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= -github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= -github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= -github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= -github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= -github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= -github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= -github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= -github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= -github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= -github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/vault/api v1.22.0 h1:+HYFquE35/B74fHoIeXlZIP2YADVboaPjaSicHEZiH0= -github.com/hashicorp/vault/api v1.22.0/go.mod h1:IUZA2cDvr4Ok3+NtK2Oq/r+lJeXkeCrHRmqdyWfpmGM= -github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= -github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E= -github.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM= -github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= -github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= -github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= -github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= -github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= -github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= -github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= -github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= -github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= -github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= -github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= -github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= -github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= -github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= -github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/letsencrypt/boulder v0.20251110.0 h1:J8MnKICeilO91dyQ2n5eBbab24neHzUpYMUIOdOtbjc= -github.com/letsencrypt/boulder v0.20251110.0/go.mod h1:ogKCJQwll82m7OVHWyTuf8eeFCjuzdRQlgnZcCl0V+8= -github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= -github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= -github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= -github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= -github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= -github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= -github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= -github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= -github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= -github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= -github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= -github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= -github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= -github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= -github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= -github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= -github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= -github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= -github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= -github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= -github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= -github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= -github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= -github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= -github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= -github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= -github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= -github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= -github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= -github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= -github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= -github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= -github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= -github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= -github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= -github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= -github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= -github.com/sigstore/cosign/v3 v3.0.3 h1:IknuTUYM+tZ/ToghM7mvg9V0O31NG3rev97u1IJIuYA= -github.com/sigstore/cosign/v3 v3.0.3/go.mod h1:poeQqwvpDNIDyim7a2ljUhonVKpCys+fx3SY0Lkmi/4= -github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= -github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= -github.com/sigstore/rekor v1.4.3 h1:2+aw4Gbgumv8vYM/QVg6b+hvr4x4Cukur8stJrVPKU0= -github.com/sigstore/rekor v1.4.3/go.mod h1:o0zgY087Q21YwohVvGwV9vK1/tliat5mfnPiVI3i75o= -github.com/sigstore/rekor-tiles/v2 v2.0.1 h1:1Wfz15oSRNGF5Dzb0lWn5W8+lfO50ork4PGIfEKjZeo= -github.com/sigstore/rekor-tiles/v2 v2.0.1/go.mod h1:Pjsbhzj5hc3MKY8FfVTYHBUHQEnP0ozC4huatu4x7OU= -github.com/sigstore/sigstore v1.10.0 h1:lQrmdzqlR8p9SCfWIpFoGUqdXEzJSZT2X+lTXOMPaQI= -github.com/sigstore/sigstore v1.10.0/go.mod h1:Ygq+L/y9Bm3YnjpJTlQrOk/gXyrjkpn3/AEJpmk1n9Y= -github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 h1:K8hnZhun6XacjxAdCdxkowSi7+FpmfYnAcMhTXZQyPg= -github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894/go.mod h1:uuR+Edo6P+iwi0HKscycUm8mxXL748nAureqSg6jFLA= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0 h1:UOHpiyezCj5RuixgIvCV3QyuxIGQT+N6nGZEXA7OTTY= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0/go.mod h1:U0CZmA2psabDa8DdiV7yXab0AHODzfKqvD2isH7Hrvw= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0 h1:fq4+8Y4YadxeF8mzhoMRPZ1mVvDYXmI3BfS0vlkPT7M= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0/go.mod h1:u05nqPWY05lmcdHhv2lPaWTH3FGUhJzO7iW2hbboK3Q= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0 h1:iUEf5MZYOuXGnXxdF/WrarJrk0DTVHqeIOjYdtpVXtc= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0/go.mod h1:i6vg5JfEQix46R1rhQlrKmUtJoeH91drltyYOJEk1T4= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0 h1:dUvPv/MP23ZPIXZUW45kvCIgC0ZRfYxEof57AB6bAtU= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0/go.mod h1:fR/gDdPvJWGWL70/NgBBIL1O0/3Wma6JHs3tSSYg3s4= -github.com/sigstore/timestamp-authority/v2 v2.0.3 h1:sRyYNtdED/ttLCMdaYnwpf0zre1A9chvjTnCmWWxN8Y= -github.com/sigstore/timestamp-authority/v2 v2.0.3/go.mod h1:mDaHxkt3HmZYoIlwYj4QWo0RUr7VjYU52aVO5f5Qb3I= -github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= -github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= -github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= -github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= -github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= -github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= -github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= -github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= -github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= -github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= -github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= -github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= -github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= -github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= -github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= -github.com/theupdateframework/go-tuf/v2 v2.3.0 h1:gt3X8xT8qu/HT4w+n1jgv+p7koi5ad8XEkLXXZqG9AA= -github.com/theupdateframework/go-tuf/v2 v2.3.0/go.mod h1:xW8yNvgXRncmovMLvBxKwrKpsOwJZu/8x+aB0KtFcdw= -github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= -github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= -github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= -github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= -github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= -github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= -github.com/tink-crypto/tink-go/v2 v2.5.0 h1:B8KLF6AofxdBIE4UJIaFbmoj5/1ehEtt7/MmzfI4Zpw= -github.com/tink-crypto/tink-go/v2 v2.5.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8= -github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= -github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= -github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c h1:5a2XDQ2LiAUV+/RjckMyq9sXudfrPSuCY4FuPC1NyAw= -github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c/go.mod h1:g85IafeFJZLxlzZCDRu4JLpfS7HKzR+Hw9qRh3bVzDI= -github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= -github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= -github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= -github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= -github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= -github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= -github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= -github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= -github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= -github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= -github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= -github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= -github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/zalando/go-keyring v0.2.6 h1:r7Yc3+H+Ux0+M72zacZoItR3UDxeWfKTcabvkI8ua9s= -github.com/zalando/go-keyring v0.2.6/go.mod h1:2TCrxYrbUNYfNS/Kgy/LSrkSQzZ5UPVH85RwfczwvcI= -gitlab.com/gitlab-org/api/client-go v0.160.0 h1:aMQzbcE8zFe0lR/J+a3zneEgH+/EBFs8rD8Chrr4Snw= -gitlab.com/gitlab-org/api/client-go v0.160.0/go.mod h1:ooCNtKB7OyP7GBa279+HrUS3eeJF6Yi6XABZZy7RTSk= -go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= -go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= -go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= -go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= -go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= -go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= -go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= -go.step.sm/crypto v0.74.0 h1:/APBEv45yYR4qQFg47HA8w1nesIGcxh44pGyQNw6JRA= -go.step.sm/crypto v0.74.0/go.mod h1:UoXqCAJjjRgzPte0Llaqen7O9P7XjPmgjgTHQGkKCDk= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= -go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= -go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= -go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= -go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= -golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= -golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= -golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= -golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= -golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= -golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= -golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= -golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= -golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= -gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI= -google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964= -google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 h1:LvZVVaPE0JSqL+ZWb6ErZfnEOKIqqFWUJE2D0fObSmc= -google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9/go.mod h1:QFOrLhdAe2PsTp3vQY4quuLKTi9j3XG3r6JPPaw7MSc= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= -google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= -google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= -google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 h1:ExN12ndbJ608cboPYflpTny6mXSzPrDLh0iTaVrRrds= -google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6/go.mod h1:6ytKWczdvnpnO+m+JiG9NjEDzR1FJfsnmJdG7B8QVZ8= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= -google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= -gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= -k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY= -k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw= -k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= -k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= -k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M= -k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE= -k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= -k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= -k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= -k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= -lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= -sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= -sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= -sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= -software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= -software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= +al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= +al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= +buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 h1:THc6uLCGTpU393vVD5Eu5JHUdikvaP1+dqAclQe8pOE= +buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1/go.mod h1:xkbAJMbZuuebIblSFnLrfTpvmfjarhKsIid+Q9snDQ0= +buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 h1:ZObM/Cdu5dZO4ibBXNRSy+rFwG4oV86mYfKbI0Z7AAI= +buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1/go.mod h1:yJHswa2p3J+WxGLpgzuWNWn3I1CIkxdOu80Y/vN5lbE= +cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= +cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= +cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= +cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= +cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= +cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= +cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0 h1:E4MgwLBGeVB5f2MdcIVD3ELVAWpr+WD6MUe1i+tM/PA= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0/go.mod h1:Y2b/1clN4zsAoUd/pgNAQHjLDnTis/6ROkUfyob6psM= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= +github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= +github.com/agntcy/oasf-sdk/pkg v0.0.14 h1:DNKQNf4R4SMDbnaawoSl6FVOBvkSy4O9MyqKd7iHE8I= +github.com/agntcy/oasf-sdk/pkg v0.0.14/go.mod h1:FvcEB49gsvK+JO5i6l/pt5QgTK0LZeR7KYKsdcI6ZIM= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= +github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= +github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc= +github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE= +github.com/aws/aws-sdk-go-v2/config v1.32.2 h1:4liUsdEpUUPZs5WVapsJLx5NPmQhQdez7nYFcovrytk= +github.com/aws/aws-sdk-go-v2/config v1.32.2/go.mod h1:l0hs06IFz1eCT+jTacU/qZtC33nvcnLADAPL/XyrkZI= +github.com/aws/aws-sdk-go-v2/credentials v1.19.2 h1:qZry8VUyTK4VIo5aEdUcBjPZHL2v4FyQ3QEOaWcFLu4= +github.com/aws/aws-sdk-go-v2/credentials v1.19.2/go.mod h1:YUqm5a1/kBnoK+/NY5WEiMocZihKSo15/tJdmdXnM5g= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 h1:WZVR5DbDgxzA0BJeudId89Kmgy6DIU4ORpxwsVHz0qA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14/go.mod h1:Dadl9QO0kHgbrH1GRqGiZdYtW5w+IXXaBNCHTIaheM4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 h1:FIouAnCE46kyYqyhs0XEBDFFSREtdnr8HQuLPQPLCrY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14/go.mod h1:UTwDc5COa5+guonQU8qBikJo1ZJ4ln2r1MkF7Dqag1E= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.1 h1:U0asSZ3ifpuIehDPkRI2rxHbmFUMplDA2VeR9Uogrmw= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.1/go.mod h1:NZo9WJqQ0sxQ1Yqu1IwCHQFQunTms2MlVgejg16S1rY= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 h1:MxMBdKTYBjPQChlJhi4qlEueqB1p1KcbTEa7tD5aqPs= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.2/go.mod h1:iS6EPmNeqCsGo+xQmXv0jIMjyYtQfnwg36zl2FwEouk= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 h1:ksUT5KtgpZd3SAiFJNJ0AFEJVva3gjBmN7eXUZjzUwQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.5/go.mod h1:av+ArJpoYf3pgyrj6tcehSFW+y9/QvAY8kMooR9bZCw= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 h1:GtsxyiF3Nd3JahRBJbxLCCdYW9ltGQYrFWg8XdkGDd8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10/go.mod h1:/j67Z5XBVDx8nZVp9EuFM9/BS5dvBznbqILGuu73hug= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 h1:a5UTtD4mHBU3t0o6aHQZFJTNKVfxFWfPX7J0Lr7G+uY= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.2/go.mod h1:6TxbXoDSgBQ225Qd8Q+MbxUxUh6TtNKwbRt/EPS9xso= +github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= +github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= +github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= +github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= +github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= +github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/danieljoos/wincred v1.2.3 h1:v7dZC2x32Ut3nEfRH+vhoZGvN72+dQ/snVXo/vMFLdQ= +github.com/danieljoos/wincred v1.2.3/go.mod h1:6qqX0WNrS4RzPZ1tnroDzq9kY3fu1KwE7MRLQK4X0bs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= +github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= +github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= +github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rBOesYM= +github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84= +github.com/go-openapi/errors v0.22.4 h1:oi2K9mHTOb5DPW2Zjdzs/NIvwi2N3fARKaTJLdNabaM= +github.com/go-openapi/errors v0.22.4/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= +github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= +github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= +github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc= +github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4= +github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4= +github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY= +github.com/go-openapi/runtime v0.29.2 h1:UmwSGWNmWQqKm1c2MGgXVpC2FTGwPDQeUsBMufc5Yj0= +github.com/go-openapi/runtime v0.29.2/go.mod h1:biq5kJXRJKBJxTDJXAa00DOTa/anflQPhT0/wmjuy+0= +github.com/go-openapi/spec v0.22.1 h1:beZMa5AVQzRspNjvhe5aG1/XyBSMeX1eEOs7dMoXh/k= +github.com/go-openapi/spec v0.22.1/go.mod h1:c7aeIQT175dVowfp7FeCvXXnjN/MrpaONStibD2WtDA= +github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= +github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= +github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= +github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= +github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= +github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= +github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= +github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= +github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= +github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= +github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= +github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= +github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= +github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= +github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= +github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= +github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= +github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= +github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= +github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= +github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= +github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw= +github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc= +github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= +github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= +github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= +github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= +github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= +github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= +github.com/google/go-github/v73 v73.0.0 h1:aR+Utnh+Y4mMkS+2qLQwcQ/cF9mOTpdwnzlaw//rG24= +github.com/google/go-github/v73 v73.0.0/go.mod h1:fa6w8+/V+edSU0muqdhCVY7Beh1M8F1IlQPZIANKIYw= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e h1:FJta/0WsADCe1r9vQjdHbd3KuiLPu7Y9WlyLGwMUNyE= +github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= +github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= +github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/vault/api v1.22.0 h1:+HYFquE35/B74fHoIeXlZIP2YADVboaPjaSicHEZiH0= +github.com/hashicorp/vault/api v1.22.0/go.mod h1:IUZA2cDvr4Ok3+NtK2Oq/r+lJeXkeCrHRmqdyWfpmGM= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E= +github.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM= +github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= +github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= +github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= +github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= +github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= +github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= +github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= +github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/letsencrypt/boulder v0.20251110.0 h1:J8MnKICeilO91dyQ2n5eBbab24neHzUpYMUIOdOtbjc= +github.com/letsencrypt/boulder v0.20251110.0/go.mod h1:ogKCJQwll82m7OVHWyTuf8eeFCjuzdRQlgnZcCl0V+8= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= +github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= +github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= +github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= +github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= +github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= +github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= +github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= +github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= +github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= +github.com/sigstore/cosign/v3 v3.0.3 h1:IknuTUYM+tZ/ToghM7mvg9V0O31NG3rev97u1IJIuYA= +github.com/sigstore/cosign/v3 v3.0.3/go.mod h1:poeQqwvpDNIDyim7a2ljUhonVKpCys+fx3SY0Lkmi/4= +github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= +github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.4.3 h1:2+aw4Gbgumv8vYM/QVg6b+hvr4x4Cukur8stJrVPKU0= +github.com/sigstore/rekor v1.4.3/go.mod h1:o0zgY087Q21YwohVvGwV9vK1/tliat5mfnPiVI3i75o= +github.com/sigstore/rekor-tiles/v2 v2.0.1 h1:1Wfz15oSRNGF5Dzb0lWn5W8+lfO50ork4PGIfEKjZeo= +github.com/sigstore/rekor-tiles/v2 v2.0.1/go.mod h1:Pjsbhzj5hc3MKY8FfVTYHBUHQEnP0ozC4huatu4x7OU= +github.com/sigstore/sigstore v1.10.0 h1:lQrmdzqlR8p9SCfWIpFoGUqdXEzJSZT2X+lTXOMPaQI= +github.com/sigstore/sigstore v1.10.0/go.mod h1:Ygq+L/y9Bm3YnjpJTlQrOk/gXyrjkpn3/AEJpmk1n9Y= +github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 h1:K8hnZhun6XacjxAdCdxkowSi7+FpmfYnAcMhTXZQyPg= +github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894/go.mod h1:uuR+Edo6P+iwi0HKscycUm8mxXL748nAureqSg6jFLA= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0 h1:UOHpiyezCj5RuixgIvCV3QyuxIGQT+N6nGZEXA7OTTY= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0/go.mod h1:U0CZmA2psabDa8DdiV7yXab0AHODzfKqvD2isH7Hrvw= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0 h1:fq4+8Y4YadxeF8mzhoMRPZ1mVvDYXmI3BfS0vlkPT7M= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0/go.mod h1:u05nqPWY05lmcdHhv2lPaWTH3FGUhJzO7iW2hbboK3Q= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0 h1:iUEf5MZYOuXGnXxdF/WrarJrk0DTVHqeIOjYdtpVXtc= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0/go.mod h1:i6vg5JfEQix46R1rhQlrKmUtJoeH91drltyYOJEk1T4= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0 h1:dUvPv/MP23ZPIXZUW45kvCIgC0ZRfYxEof57AB6bAtU= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0/go.mod h1:fR/gDdPvJWGWL70/NgBBIL1O0/3Wma6JHs3tSSYg3s4= +github.com/sigstore/timestamp-authority/v2 v2.0.3 h1:sRyYNtdED/ttLCMdaYnwpf0zre1A9chvjTnCmWWxN8Y= +github.com/sigstore/timestamp-authority/v2 v2.0.3/go.mod h1:mDaHxkt3HmZYoIlwYj4QWo0RUr7VjYU52aVO5f5Qb3I= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= +github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= +github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= +github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= +github.com/theupdateframework/go-tuf/v2 v2.3.0 h1:gt3X8xT8qu/HT4w+n1jgv+p7koi5ad8XEkLXXZqG9AA= +github.com/theupdateframework/go-tuf/v2 v2.3.0/go.mod h1:xW8yNvgXRncmovMLvBxKwrKpsOwJZu/8x+aB0KtFcdw= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= +github.com/tink-crypto/tink-go/v2 v2.5.0 h1:B8KLF6AofxdBIE4UJIaFbmoj5/1ehEtt7/MmzfI4Zpw= +github.com/tink-crypto/tink-go/v2 v2.5.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c h1:5a2XDQ2LiAUV+/RjckMyq9sXudfrPSuCY4FuPC1NyAw= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c/go.mod h1:g85IafeFJZLxlzZCDRu4JLpfS7HKzR+Hw9qRh3bVzDI= +github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= +github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= +github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= +github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= +github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= +github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= +github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= +github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= +github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= +github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= +github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= +github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= +github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zalando/go-keyring v0.2.6 h1:r7Yc3+H+Ux0+M72zacZoItR3UDxeWfKTcabvkI8ua9s= +github.com/zalando/go-keyring v0.2.6/go.mod h1:2TCrxYrbUNYfNS/Kgy/LSrkSQzZ5UPVH85RwfczwvcI= +gitlab.com/gitlab-org/api/client-go v0.160.0 h1:aMQzbcE8zFe0lR/J+a3zneEgH+/EBFs8rD8Chrr4Snw= +gitlab.com/gitlab-org/api/client-go v0.160.0/go.mod h1:ooCNtKB7OyP7GBa279+HrUS3eeJF6Yi6XABZZy7RTSk= +go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= +go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.step.sm/crypto v0.74.0 h1:/APBEv45yYR4qQFg47HA8w1nesIGcxh44pGyQNw6JRA= +go.step.sm/crypto v0.74.0/go.mod h1:UoXqCAJjjRgzPte0Llaqen7O9P7XjPmgjgTHQGkKCDk= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= +golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI= +google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964= +google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 h1:LvZVVaPE0JSqL+ZWb6ErZfnEOKIqqFWUJE2D0fObSmc= +google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9/go.mod h1:QFOrLhdAe2PsTp3vQY4quuLKTi9j3XG3r6JPPaw7MSc= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 h1:ExN12ndbJ608cboPYflpTny6mXSzPrDLh0iTaVrRrds= +google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6/go.mod h1:6ytKWczdvnpnO+m+JiG9NjEDzR1FJfsnmJdG7B8QVZ8= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY= +k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw= +k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= +k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M= +k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= +lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/client/jwt.go b/client/jwt.go index 9704c1eb6..1755379ad 100644 --- a/client/jwt.go +++ b/client/jwt.go @@ -1,48 +1,48 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package client - -import ( - "context" - "fmt" - - "github.com/spiffe/go-spiffe/v2/svid/jwtsvid" - "github.com/spiffe/go-spiffe/v2/workloadapi" - "google.golang.org/grpc/credentials" -) - -// jwtPerRPCCredentials implements credentials.PerRPCCredentials for JWT authentication. -type jwtPerRPCCredentials struct { - jwtSource *workloadapi.JWTSource - audience string -} - -// GetRequestMetadata gets the current JWT token and attaches it to the request metadata. -func (c *jwtPerRPCCredentials) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { - // Fetch JWT-SVID for the configured audience - jwtSVID, err := c.jwtSource.FetchJWTSVID(ctx, jwtsvid.Params{ - Audience: c.audience, - }) - if err != nil { - return nil, fmt.Errorf("failed to fetch JWT-SVID: %w", err) - } - - // Return the token as a Bearer token in the authorization header - return map[string]string{ - "authorization": "Bearer " + jwtSVID.Marshal(), - }, nil -} - -// Returns true because JWT-SVID authentication should be used over TLS. -func (c *jwtPerRPCCredentials) RequireTransportSecurity() bool { - return true -} - -// newJWTCredentials creates a new PerRPCCredentials that injects JWT-SVIDs. -func newJWTCredentials(jwtSource *workloadapi.JWTSource, audience string) credentials.PerRPCCredentials { - return &jwtPerRPCCredentials{ - jwtSource: jwtSource, - audience: audience, - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "context" + "fmt" + + "github.com/spiffe/go-spiffe/v2/svid/jwtsvid" + "github.com/spiffe/go-spiffe/v2/workloadapi" + "google.golang.org/grpc/credentials" +) + +// jwtPerRPCCredentials implements credentials.PerRPCCredentials for JWT authentication. +type jwtPerRPCCredentials struct { + jwtSource *workloadapi.JWTSource + audience string +} + +// GetRequestMetadata gets the current JWT token and attaches it to the request metadata. +func (c *jwtPerRPCCredentials) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { + // Fetch JWT-SVID for the configured audience + jwtSVID, err := c.jwtSource.FetchJWTSVID(ctx, jwtsvid.Params{ + Audience: c.audience, + }) + if err != nil { + return nil, fmt.Errorf("failed to fetch JWT-SVID: %w", err) + } + + // Return the token as a Bearer token in the authorization header + return map[string]string{ + "authorization": "Bearer " + jwtSVID.Marshal(), + }, nil +} + +// Returns true because JWT-SVID authentication should be used over TLS. +func (c *jwtPerRPCCredentials) RequireTransportSecurity() bool { + return true +} + +// newJWTCredentials creates a new PerRPCCredentials that injects JWT-SVIDs. +func newJWTCredentials(jwtSource *workloadapi.JWTSource, audience string) credentials.PerRPCCredentials { + return &jwtPerRPCCredentials{ + jwtSource: jwtSource, + audience: audience, + } +} diff --git a/client/options.go b/client/options.go index ef72a55a5..c784e188f 100644 --- a/client/options.go +++ b/client/options.go @@ -1,393 +1,393 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package client - -import ( - "context" - "crypto/tls" - "crypto/x509" - "encoding/base64" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" - "os" - - "github.com/agntcy/dir/utils/logging" - "github.com/agntcy/dir/utils/spiffe" - "github.com/spiffe/go-spiffe/v2/spiffegrpc/grpccredentials" - "github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig" - "github.com/spiffe/go-spiffe/v2/workloadapi" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/insecure" -) - -var authLogger = logging.Logger("client.auth") - -type Option func(*options) error - -// TODO: options need to be granular per key rather than for full config. -type options struct { - config *Config - authOpts []grpc.DialOption - authClient *workloadapi.Client - - // SPIFFE sources for cleanup - bundleSrc io.Closer - x509Src io.Closer - jwtSource io.Closer -} - -func WithEnvConfig() Option { - return func(opts *options) error { - var err error - - opts.config, err = LoadConfig() - - return err - } -} - -func WithConfig(config *Config) Option { - return func(opts *options) error { - opts.config = config - - return nil - } -} - -func withAuth(ctx context.Context) Option { - return func(o *options) error { - // Validate config exists before dereferencing - if o.config == nil { - return errors.New("config is required: use WithConfig() or WithEnvConfig()") - } - - // Setup authentication based on AuthMode - switch o.config.AuthMode { - case "jwt": - // NOTE: jwt source must live for the entire client lifetime, not just the initialization phase - return o.setupJWTAuth(ctx) //nolint:contextcheck - case "x509": - // NOTE: x509 source must live for the entire client lifetime, not just the initialization phase - return o.setupX509Auth(ctx) //nolint:contextcheck - case "token": - return o.setupSpiffeAuth(ctx) - case "tls": - return o.setupTlsAuth(ctx) - case "": - // Empty auth mode - use insecure connection (for development/testing only) - o.authOpts = append(o.authOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) - - return nil - default: - // Invalid auth mode specified - return error to prevent silent security issues - return fmt.Errorf("unsupported auth mode: %s (supported: 'jwt', 'x509', 'token', or empty for insecure)", o.config.AuthMode) - } - } -} - -func (o *options) setupJWTAuth(ctx context.Context) error { - // Validate SPIFFE socket path is set - if o.config.SpiffeSocketPath == "" { - return errors.New("spiffe socket path is required for JWT authentication") - } - - // Validate JWT audience is set - if o.config.JWTAudience == "" { - return errors.New("JWT audience is required for JWT authentication") - } - - // Create SPIFFE client - client, err := workloadapi.New(ctx, workloadapi.WithAddr(o.config.SpiffeSocketPath)) - if err != nil { - return fmt.Errorf("failed to create SPIFFE client: %w", err) - } - - // Create bundle source for verifying server's TLS certificate (X.509-SVID) - // Note: Use context.Background() because this source must live for the entire client lifetime, - // not just the initialization phase. It will be properly closed in client.Close(). - bundleSrc, err := workloadapi.NewBundleSource(context.Background(), workloadapi.WithClient(client)) //nolint:contextcheck - if err != nil { - _ = client.Close() - - return fmt.Errorf("failed to create bundle source: %w", err) - } - - // Create JWT source for fetching JWT-SVIDs - // Note: Use context.Background() because this source must live for the entire client lifetime, - // not just the initialization phase. It will be properly closed in client.Close(). - jwtSource, err := workloadapi.NewJWTSource(context.Background(), workloadapi.WithClient(client)) //nolint:contextcheck - if err != nil { - _ = client.Close() - _ = bundleSrc.Close() - - return fmt.Errorf("failed to create JWT source: %w", err) - } - - // Use TLS for transport security (server presents X.509-SVID) - // Client authenticates with JWT-SVID via PerRPCCredentials - o.authClient = client - o.bundleSrc = bundleSrc - o.jwtSource = jwtSource - o.authOpts = append(o.authOpts, - grpc.WithTransportCredentials( - grpccredentials.TLSClientCredentials(bundleSrc, tlsconfig.AuthorizeAny()), - ), - grpc.WithPerRPCCredentials(newJWTCredentials(jwtSource, o.config.JWTAudience)), - ) - - return nil -} - -func (o *options) setupX509Auth(ctx context.Context) error { - // Validate SPIFFE socket path is set - if o.config.SpiffeSocketPath == "" { - return errors.New("spiffe socket path is required for x509 authentication") - } - - authLogger.Debug("Setting up X509 authentication", "spiffe_socket_path", o.config.SpiffeSocketPath) - - // Create SPIFFE client - client, err := workloadapi.New(ctx, workloadapi.WithAddr(o.config.SpiffeSocketPath)) - if err != nil { - return fmt.Errorf("failed to create SPIFFE client: %w", err) - } - - authLogger.Debug("Created SPIFFE workload API client") - - // Create SPIFFE x509 services - // Note: Use context.Background() because this source must live for the entire client lifetime, - // not just the initialization phase. It will be properly closed in client.Close(). - x509Src, err := workloadapi.NewX509Source(context.Background(), workloadapi.WithClient(client)) //nolint:contextcheck - if err != nil { - _ = client.Close() - - return fmt.Errorf("failed to create x509 source: %w", err) - } - - authLogger.Debug("Created X509 source, starting retry logic to get valid SVID") - - // Wait for X509-SVID to be available with retry logic - // This handles timing issues where the SPIRE entry hasn't been synced to the agent yet - // (common with CronJobs and other short-lived workloads) - // The agent may return a certificate without a URI SAN (SPIFFE ID) if the entry hasn't synced, - // so we must validate that the certificate actually contains a valid SPIFFE ID. - svid, svidErr := spiffe.GetX509SVIDWithRetry( - x509Src, - spiffe.DefaultMaxRetries, - spiffe.DefaultInitialBackoff, - spiffe.DefaultMaxBackoff, - authLogger, - ) - if svidErr != nil { - _ = client.Close() - _ = x509Src.Close() - - authLogger.Error("Failed to get valid X509-SVID after retries", "error", svidErr, "max_retries", spiffe.DefaultMaxRetries) - - return fmt.Errorf("failed to get valid X509-SVID after retries (SPIRE entry may not be synced yet): %w", svidErr) - } - - authLogger.Info("Successfully obtained valid X509-SVID", "spiffe_id", svid.ID.String()) - - // Wrap x509Src with retry logic so GetX509SVID() calls during TLS handshake also retry - // This is critical because grpccredentials.MTLSClientCredentials calls GetX509SVID() - // during the actual TLS handshake, not just during setup. Without this wrapper, - // the TLS handshake may fail if the certificate doesn't have a URI SAN at that moment. - // - // Connection flow: dirctl → Ingress (TLS passthrough) → apiserver pod - // The TLS handshake happens between dirctl and apiserver, and during this handshake, - // grpccredentials.MTLSClientCredentials calls GetX509SVID() again. - // - // Note: x509Src is *workloadapi.X509Source (concrete type that implements x509svid.Source). - // We use it directly as the Source interface and also as io.Closer. - wrappedX509Src := spiffe.NewX509SourceWithRetry( - x509Src, // Use pointer directly (implements x509svid.Source) - x509Src, // Same pointer (implements io.Closer) - authLogger, - spiffe.DefaultMaxRetries, - spiffe.DefaultInitialBackoff, - spiffe.DefaultMaxBackoff, - ) - - authLogger.Debug("Created X509SourceWithRetry wrapper", - "wrapped_type", fmt.Sprintf("%T", wrappedX509Src), - "src_type", fmt.Sprintf("%T", x509Src), - "implements_source", true) - - // Create SPIFFE bundle services - // Note: Use context.Background() because this source must live for the entire client lifetime, - // not just the initialization phase. It will be properly closed in client.Close(). - bundleSrc, err := workloadapi.NewBundleSource(context.Background(), workloadapi.WithClient(client)) //nolint:contextcheck - if err != nil { - _ = client.Close() - _ = x509Src.Close() // Fix Issue #4: Close x509Src on error - - return fmt.Errorf("failed to create bundle source: %w", err) - } - - // Update options - o.authClient = client - o.x509Src = wrappedX509Src // Store wrapped source for cleanup - o.bundleSrc = bundleSrc - - authLogger.Debug("Creating MTLSClientCredentials with wrapped source", - "wrapped_source_type", fmt.Sprintf("%T", wrappedX509Src), - "wrapped_implements_source", true) - - creds := grpccredentials.MTLSClientCredentials(wrappedX509Src, bundleSrc, tlsconfig.AuthorizeAny()) - o.authOpts = append(o.authOpts, grpc.WithTransportCredentials(creds)) - - authLogger.Debug("MTLSClientCredentials created successfully, wrapper will be used for TLS handshake") - - return nil -} - -func (o *options) setupSpiffeAuth(_ context.Context) error { - // Validate token file is set - if o.config.SpiffeToken == "" { - return errors.New("spiffe token file path is required for token authentication") - } - - // Read token file - tokenData, err := os.ReadFile(o.config.SpiffeToken) - if err != nil { - return fmt.Errorf("failed to read SPIFFE token file: %w", err) - } - - // SpiffeTokenData represents the structure of SPIFFE token JSON - type SpiffeTokenData struct { - X509SVID []string `json:"x509_svid"` // DER-encoded certificates in base64 - PrivateKey string `json:"private_key"` // DER-encoded private key in base64 - RootCAs []string `json:"root_cas"` // DER-encoded root CA certificates in base64 - } - - // Parse SPIFFE token JSON - var spiffeData []SpiffeTokenData - if err := json.Unmarshal(tokenData, &spiffeData); err != nil { - return fmt.Errorf("failed to parse SPIFFE token: %w", err) - } - - if len(spiffeData) == 0 { - return errors.New("no SPIFFE data found in token") - } - - // Use the first SPIFFE data entry - data := spiffeData[0] - - // Parse the certificate chain - if len(data.X509SVID) == 0 { - return errors.New("no X.509 SVID certificates found") - } - - // From base64 DER to PEM - certDER, err := base64.StdEncoding.DecodeString(data.X509SVID[0]) - if err != nil { - return fmt.Errorf("failed to decode certificate: %w", err) - } - - certPEM := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: certDER, - }) - - // The private key is base64-encoded DER format - keyDER, err := base64.StdEncoding.DecodeString(data.PrivateKey) - if err != nil { - return fmt.Errorf("failed to decode private key: %w", err) - } - - keyPEM := pem.EncodeToMemory(&pem.Block{ - Type: "PRIVATE KEY", - Bytes: keyDER, - }) - - // Create certificate from PEM data - cert, err := tls.X509KeyPair(certPEM, keyPEM) - if err != nil { - return fmt.Errorf("failed to create certificate from SPIFFE data: %w", err) - } - - // Create CA pool from root CAs - capool := x509.NewCertPool() - - for _, rootCA := range data.RootCAs { - // Root CAs are also base64-encoded DER - caDER, err := base64.StdEncoding.DecodeString(rootCA) - if err != nil { - return fmt.Errorf("failed to decode root CA: %w", err) - } - - caPEM := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: caDER, - }) - - if !capool.AppendCertsFromPEM(caPEM) { - return errors.New("failed to append root CA certificate to CA pool") - } - } - - // Create TLS config - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{cert}, - RootCAs: capool, - InsecureSkipVerify: o.config.TlsSkipVerify, //nolint:gosec - } - - // Update options - o.authOpts = append(o.authOpts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) - - return nil -} - -func (o *options) setupTlsAuth(_ context.Context) error { - // Validate TLS config is set - if o.config.TlsCAFile == "" || o.config.TlsCertFile == "" || o.config.TlsKeyFile == "" { - return errors.New("TLS CA, cert, and key file paths are required for TLS authentication") - } - - // Load TLS data for tlsConfig - caData, err := os.ReadFile(o.config.TlsCAFile) - if err != nil { - return fmt.Errorf("failed to read TLS CA file: %w", err) - } - - certData, err := os.ReadFile(o.config.TlsCertFile) - if err != nil { - return fmt.Errorf("failed to read TLS cert file: %w", err) - } - - keyData, err := os.ReadFile(o.config.TlsKeyFile) - if err != nil { - return fmt.Errorf("failed to read TLS key file: %w", err) - } - - // Create certificate from PEM data - cert, err := tls.X509KeyPair(certData, keyData) - if err != nil { - return fmt.Errorf("failed to create certificate from TLS data: %w", err) - } - - // Create CA pool from root CAs - capool := x509.NewCertPool() - if !capool.AppendCertsFromPEM(caData) { - return errors.New("failed to append root CA certificate to CA pool") - } - - // Create TLS config - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{cert}, - RootCAs: capool, - InsecureSkipVerify: o.config.TlsSkipVerify, //nolint:gosec - } - - // Update options - o.authOpts = append(o.authOpts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "os" + + "github.com/agntcy/dir/utils/logging" + "github.com/agntcy/dir/utils/spiffe" + "github.com/spiffe/go-spiffe/v2/spiffegrpc/grpccredentials" + "github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig" + "github.com/spiffe/go-spiffe/v2/workloadapi" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" +) + +var authLogger = logging.Logger("client.auth") + +type Option func(*options) error + +// TODO: options need to be granular per key rather than for full config. +type options struct { + config *Config + authOpts []grpc.DialOption + authClient *workloadapi.Client + + // SPIFFE sources for cleanup + bundleSrc io.Closer + x509Src io.Closer + jwtSource io.Closer +} + +func WithEnvConfig() Option { + return func(opts *options) error { + var err error + + opts.config, err = LoadConfig() + + return err + } +} + +func WithConfig(config *Config) Option { + return func(opts *options) error { + opts.config = config + + return nil + } +} + +func withAuth(ctx context.Context) Option { + return func(o *options) error { + // Validate config exists before dereferencing + if o.config == nil { + return errors.New("config is required: use WithConfig() or WithEnvConfig()") + } + + // Setup authentication based on AuthMode + switch o.config.AuthMode { + case "jwt": + // NOTE: jwt source must live for the entire client lifetime, not just the initialization phase + return o.setupJWTAuth(ctx) //nolint:contextcheck + case "x509": + // NOTE: x509 source must live for the entire client lifetime, not just the initialization phase + return o.setupX509Auth(ctx) //nolint:contextcheck + case "token": + return o.setupSpiffeAuth(ctx) + case "tls": + return o.setupTlsAuth(ctx) + case "": + // Empty auth mode - use insecure connection (for development/testing only) + o.authOpts = append(o.authOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) + + return nil + default: + // Invalid auth mode specified - return error to prevent silent security issues + return fmt.Errorf("unsupported auth mode: %s (supported: 'jwt', 'x509', 'token', or empty for insecure)", o.config.AuthMode) + } + } +} + +func (o *options) setupJWTAuth(ctx context.Context) error { + // Validate SPIFFE socket path is set + if o.config.SpiffeSocketPath == "" { + return errors.New("spiffe socket path is required for JWT authentication") + } + + // Validate JWT audience is set + if o.config.JWTAudience == "" { + return errors.New("JWT audience is required for JWT authentication") + } + + // Create SPIFFE client + client, err := workloadapi.New(ctx, workloadapi.WithAddr(o.config.SpiffeSocketPath)) + if err != nil { + return fmt.Errorf("failed to create SPIFFE client: %w", err) + } + + // Create bundle source for verifying server's TLS certificate (X.509-SVID) + // Note: Use context.Background() because this source must live for the entire client lifetime, + // not just the initialization phase. It will be properly closed in client.Close(). + bundleSrc, err := workloadapi.NewBundleSource(context.Background(), workloadapi.WithClient(client)) //nolint:contextcheck + if err != nil { + _ = client.Close() + + return fmt.Errorf("failed to create bundle source: %w", err) + } + + // Create JWT source for fetching JWT-SVIDs + // Note: Use context.Background() because this source must live for the entire client lifetime, + // not just the initialization phase. It will be properly closed in client.Close(). + jwtSource, err := workloadapi.NewJWTSource(context.Background(), workloadapi.WithClient(client)) //nolint:contextcheck + if err != nil { + _ = client.Close() + _ = bundleSrc.Close() + + return fmt.Errorf("failed to create JWT source: %w", err) + } + + // Use TLS for transport security (server presents X.509-SVID) + // Client authenticates with JWT-SVID via PerRPCCredentials + o.authClient = client + o.bundleSrc = bundleSrc + o.jwtSource = jwtSource + o.authOpts = append(o.authOpts, + grpc.WithTransportCredentials( + grpccredentials.TLSClientCredentials(bundleSrc, tlsconfig.AuthorizeAny()), + ), + grpc.WithPerRPCCredentials(newJWTCredentials(jwtSource, o.config.JWTAudience)), + ) + + return nil +} + +func (o *options) setupX509Auth(ctx context.Context) error { + // Validate SPIFFE socket path is set + if o.config.SpiffeSocketPath == "" { + return errors.New("spiffe socket path is required for x509 authentication") + } + + authLogger.Debug("Setting up X509 authentication", "spiffe_socket_path", o.config.SpiffeSocketPath) + + // Create SPIFFE client + client, err := workloadapi.New(ctx, workloadapi.WithAddr(o.config.SpiffeSocketPath)) + if err != nil { + return fmt.Errorf("failed to create SPIFFE client: %w", err) + } + + authLogger.Debug("Created SPIFFE workload API client") + + // Create SPIFFE x509 services + // Note: Use context.Background() because this source must live for the entire client lifetime, + // not just the initialization phase. It will be properly closed in client.Close(). + x509Src, err := workloadapi.NewX509Source(context.Background(), workloadapi.WithClient(client)) //nolint:contextcheck + if err != nil { + _ = client.Close() + + return fmt.Errorf("failed to create x509 source: %w", err) + } + + authLogger.Debug("Created X509 source, starting retry logic to get valid SVID") + + // Wait for X509-SVID to be available with retry logic + // This handles timing issues where the SPIRE entry hasn't been synced to the agent yet + // (common with CronJobs and other short-lived workloads) + // The agent may return a certificate without a URI SAN (SPIFFE ID) if the entry hasn't synced, + // so we must validate that the certificate actually contains a valid SPIFFE ID. + svid, svidErr := spiffe.GetX509SVIDWithRetry( + x509Src, + spiffe.DefaultMaxRetries, + spiffe.DefaultInitialBackoff, + spiffe.DefaultMaxBackoff, + authLogger, + ) + if svidErr != nil { + _ = client.Close() + _ = x509Src.Close() + + authLogger.Error("Failed to get valid X509-SVID after retries", "error", svidErr, "max_retries", spiffe.DefaultMaxRetries) + + return fmt.Errorf("failed to get valid X509-SVID after retries (SPIRE entry may not be synced yet): %w", svidErr) + } + + authLogger.Info("Successfully obtained valid X509-SVID", "spiffe_id", svid.ID.String()) + + // Wrap x509Src with retry logic so GetX509SVID() calls during TLS handshake also retry + // This is critical because grpccredentials.MTLSClientCredentials calls GetX509SVID() + // during the actual TLS handshake, not just during setup. Without this wrapper, + // the TLS handshake may fail if the certificate doesn't have a URI SAN at that moment. + // + // Connection flow: dirctl → Ingress (TLS passthrough) → apiserver pod + // The TLS handshake happens between dirctl and apiserver, and during this handshake, + // grpccredentials.MTLSClientCredentials calls GetX509SVID() again. + // + // Note: x509Src is *workloadapi.X509Source (concrete type that implements x509svid.Source). + // We use it directly as the Source interface and also as io.Closer. + wrappedX509Src := spiffe.NewX509SourceWithRetry( + x509Src, // Use pointer directly (implements x509svid.Source) + x509Src, // Same pointer (implements io.Closer) + authLogger, + spiffe.DefaultMaxRetries, + spiffe.DefaultInitialBackoff, + spiffe.DefaultMaxBackoff, + ) + + authLogger.Debug("Created X509SourceWithRetry wrapper", + "wrapped_type", fmt.Sprintf("%T", wrappedX509Src), + "src_type", fmt.Sprintf("%T", x509Src), + "implements_source", true) + + // Create SPIFFE bundle services + // Note: Use context.Background() because this source must live for the entire client lifetime, + // not just the initialization phase. It will be properly closed in client.Close(). + bundleSrc, err := workloadapi.NewBundleSource(context.Background(), workloadapi.WithClient(client)) //nolint:contextcheck + if err != nil { + _ = client.Close() + _ = x509Src.Close() // Fix Issue #4: Close x509Src on error + + return fmt.Errorf("failed to create bundle source: %w", err) + } + + // Update options + o.authClient = client + o.x509Src = wrappedX509Src // Store wrapped source for cleanup + o.bundleSrc = bundleSrc + + authLogger.Debug("Creating MTLSClientCredentials with wrapped source", + "wrapped_source_type", fmt.Sprintf("%T", wrappedX509Src), + "wrapped_implements_source", true) + + creds := grpccredentials.MTLSClientCredentials(wrappedX509Src, bundleSrc, tlsconfig.AuthorizeAny()) + o.authOpts = append(o.authOpts, grpc.WithTransportCredentials(creds)) + + authLogger.Debug("MTLSClientCredentials created successfully, wrapper will be used for TLS handshake") + + return nil +} + +func (o *options) setupSpiffeAuth(_ context.Context) error { + // Validate token file is set + if o.config.SpiffeToken == "" { + return errors.New("spiffe token file path is required for token authentication") + } + + // Read token file + tokenData, err := os.ReadFile(o.config.SpiffeToken) + if err != nil { + return fmt.Errorf("failed to read SPIFFE token file: %w", err) + } + + // SpiffeTokenData represents the structure of SPIFFE token JSON + type SpiffeTokenData struct { + X509SVID []string `json:"x509_svid"` // DER-encoded certificates in base64 + PrivateKey string `json:"private_key"` // DER-encoded private key in base64 + RootCAs []string `json:"root_cas"` // DER-encoded root CA certificates in base64 + } + + // Parse SPIFFE token JSON + var spiffeData []SpiffeTokenData + if err := json.Unmarshal(tokenData, &spiffeData); err != nil { + return fmt.Errorf("failed to parse SPIFFE token: %w", err) + } + + if len(spiffeData) == 0 { + return errors.New("no SPIFFE data found in token") + } + + // Use the first SPIFFE data entry + data := spiffeData[0] + + // Parse the certificate chain + if len(data.X509SVID) == 0 { + return errors.New("no X.509 SVID certificates found") + } + + // From base64 DER to PEM + certDER, err := base64.StdEncoding.DecodeString(data.X509SVID[0]) + if err != nil { + return fmt.Errorf("failed to decode certificate: %w", err) + } + + certPEM := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: certDER, + }) + + // The private key is base64-encoded DER format + keyDER, err := base64.StdEncoding.DecodeString(data.PrivateKey) + if err != nil { + return fmt.Errorf("failed to decode private key: %w", err) + } + + keyPEM := pem.EncodeToMemory(&pem.Block{ + Type: "PRIVATE KEY", + Bytes: keyDER, + }) + + // Create certificate from PEM data + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + return fmt.Errorf("failed to create certificate from SPIFFE data: %w", err) + } + + // Create CA pool from root CAs + capool := x509.NewCertPool() + + for _, rootCA := range data.RootCAs { + // Root CAs are also base64-encoded DER + caDER, err := base64.StdEncoding.DecodeString(rootCA) + if err != nil { + return fmt.Errorf("failed to decode root CA: %w", err) + } + + caPEM := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: caDER, + }) + + if !capool.AppendCertsFromPEM(caPEM) { + return errors.New("failed to append root CA certificate to CA pool") + } + } + + // Create TLS config + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: capool, + InsecureSkipVerify: o.config.TlsSkipVerify, //nolint:gosec + } + + // Update options + o.authOpts = append(o.authOpts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) + + return nil +} + +func (o *options) setupTlsAuth(_ context.Context) error { + // Validate TLS config is set + if o.config.TlsCAFile == "" || o.config.TlsCertFile == "" || o.config.TlsKeyFile == "" { + return errors.New("TLS CA, cert, and key file paths are required for TLS authentication") + } + + // Load TLS data for tlsConfig + caData, err := os.ReadFile(o.config.TlsCAFile) + if err != nil { + return fmt.Errorf("failed to read TLS CA file: %w", err) + } + + certData, err := os.ReadFile(o.config.TlsCertFile) + if err != nil { + return fmt.Errorf("failed to read TLS cert file: %w", err) + } + + keyData, err := os.ReadFile(o.config.TlsKeyFile) + if err != nil { + return fmt.Errorf("failed to read TLS key file: %w", err) + } + + // Create certificate from PEM data + cert, err := tls.X509KeyPair(certData, keyData) + if err != nil { + return fmt.Errorf("failed to create certificate from TLS data: %w", err) + } + + // Create CA pool from root CAs + capool := x509.NewCertPool() + if !capool.AppendCertsFromPEM(caData) { + return errors.New("failed to append root CA certificate to CA pool") + } + + // Create TLS config + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: capool, + InsecureSkipVerify: o.config.TlsSkipVerify, //nolint:gosec + } + + // Update options + o.authOpts = append(o.authOpts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) + + return nil +} diff --git a/client/options_test.go b/client/options_test.go index 4da60f7c2..83f382178 100644 --- a/client/options_test.go +++ b/client/options_test.go @@ -1,438 +1,438 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package client - -import ( - "context" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// Test constants. -const ( - testServerAddr = "localhost:9999" - testSpiffeSocket = "/tmp/test-spiffe.sock" - testJWTAudience = "test-audience" - testInvalidAuthMode = "invalid-auth" -) - -func TestWithConfig(t *testing.T) { - t.Run("should set config", func(t *testing.T) { - cfg := &Config{ - ServerAddress: testServerAddr, - } - - opts := &options{} - opt := WithConfig(cfg) - err := opt(opts) - - require.NoError(t, err) - assert.Equal(t, cfg, opts.config) - assert.Equal(t, testServerAddr, opts.config.ServerAddress) - }) - - t.Run("should allow nil config", func(t *testing.T) { - opts := &options{} - opt := WithConfig(nil) - err := opt(opts) - - require.NoError(t, err) - assert.Nil(t, opts.config) - }) -} - -func TestWithEnvConfig(t *testing.T) { - t.Run("should load default config when no env vars", func(t *testing.T) { - // Clear any existing env vars by unsetting them - // Note: We use os.Unsetenv here (not t.Setenv) because t.Setenv("VAR", "") - // sets to empty string, not unset. We need truly unset vars to test defaults. - oldAddr := os.Getenv("DIRECTORY_CLIENT_SERVER_ADDRESS") - oldSocket := os.Getenv("DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH") - oldAuth := os.Getenv("DIRECTORY_CLIENT_AUTH_MODE") - oldAud := os.Getenv("DIRECTORY_CLIENT_JWT_AUDIENCE") - - os.Unsetenv("DIRECTORY_CLIENT_SERVER_ADDRESS") - os.Unsetenv("DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH") - os.Unsetenv("DIRECTORY_CLIENT_AUTH_MODE") - os.Unsetenv("DIRECTORY_CLIENT_JWT_AUDIENCE") - - defer func() { - // Restore original values - must use os.Setenv (not t.Setenv) to restore after os.Unsetenv - //nolint:usetesting // Can't use t.Setenv in defer for restoration after os.Unsetenv - if oldAddr != "" { - os.Setenv("DIRECTORY_CLIENT_SERVER_ADDRESS", oldAddr) - } - //nolint:usetesting // Can't use t.Setenv in defer for restoration after os.Unsetenv - if oldSocket != "" { - os.Setenv("DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH", oldSocket) - } - //nolint:usetesting // Can't use t.Setenv in defer for restoration after os.Unsetenv - if oldAuth != "" { - os.Setenv("DIRECTORY_CLIENT_AUTH_MODE", oldAuth) - } - //nolint:usetesting // Can't use t.Setenv in defer for restoration after os.Unsetenv - if oldAud != "" { - os.Setenv("DIRECTORY_CLIENT_JWT_AUDIENCE", oldAud) - } - }() - - opts := &options{} - opt := WithEnvConfig() - err := opt(opts) - - require.NoError(t, err) - require.NotNil(t, opts.config) - assert.Equal(t, DefaultServerAddress, opts.config.ServerAddress) - assert.Empty(t, opts.config.SpiffeSocketPath) - assert.Empty(t, opts.config.AuthMode) - assert.Empty(t, opts.config.JWTAudience) - }) - - t.Run("should load config from environment variables", func(t *testing.T) { - // Set env vars - t.Setenv automatically restores after test - t.Setenv("DIRECTORY_CLIENT_SERVER_ADDRESS", testServerAddr) - t.Setenv("DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH", testSpiffeSocket) - t.Setenv("DIRECTORY_CLIENT_AUTH_MODE", "jwt") - t.Setenv("DIRECTORY_CLIENT_JWT_AUDIENCE", testJWTAudience) - - opts := &options{} - opt := WithEnvConfig() - err := opt(opts) - - require.NoError(t, err) - require.NotNil(t, opts.config) - assert.Equal(t, testServerAddr, opts.config.ServerAddress) - assert.Equal(t, testSpiffeSocket, opts.config.SpiffeSocketPath) - assert.Equal(t, "jwt", opts.config.AuthMode) - assert.Equal(t, testJWTAudience, opts.config.JWTAudience) - }) -} - -func TestWithAuth_ConfigValidation(t *testing.T) { - t.Run("should error when config is nil", func(t *testing.T) { - opts := &options{ - config: nil, - } - - ctx := context.Background() - opt := withAuth(ctx) - err := opt(opts) - - require.Error(t, err) - assert.Contains(t, err.Error(), "config is required") - }) - - t.Run("should use insecure credentials when no SPIFFE socket", func(t *testing.T) { - opts := &options{ - config: &Config{ - ServerAddress: testServerAddr, - SpiffeSocketPath: "", // No SPIFFE - AuthMode: "", - }, - } - - ctx := context.Background() - opt := withAuth(ctx) - err := opt(opts) - - require.NoError(t, err) - assert.NotEmpty(t, opts.authOpts) - assert.Nil(t, opts.authClient) - }) - - t.Run("should use insecure credentials when no auth mode", func(t *testing.T) { - opts := &options{ - config: &Config{ - ServerAddress: testServerAddr, - SpiffeSocketPath: testSpiffeSocket, - AuthMode: "", // No auth mode - }, - } - - ctx := context.Background() - opt := withAuth(ctx) - err := opt(opts) - - require.NoError(t, err) - assert.NotEmpty(t, opts.authOpts) - assert.Nil(t, opts.authClient) - }) -} - -func TestWithAuth_InvalidAuthMode(t *testing.T) { - t.Run("should error on unsupported auth mode", func(t *testing.T) { - // Skip this test if we can't connect to SPIFFE socket - // (SPIFFE connection will fail before we can test invalid auth mode) - if _, err := os.Stat(testSpiffeSocket); os.IsNotExist(err) { - t.Skip("SPIFFE socket not available for testing") - } - - opts := &options{ - config: &Config{ - ServerAddress: testServerAddr, - SpiffeSocketPath: testSpiffeSocket, - AuthMode: testInvalidAuthMode, - }, - } - - ctx := context.Background() - opt := withAuth(ctx) - err := opt(opts) - - // Will error either from SPIFFE connection or invalid auth mode - require.Error(t, err) - }) -} - -func TestOptions_Chaining(t *testing.T) { - t.Run("should apply multiple options in order", func(t *testing.T) { - cfg1 := &Config{ServerAddress: "server1:8888"} - cfg2 := &Config{ServerAddress: "server2:9999"} - - opts := &options{} - - // Apply first config - opt1 := WithConfig(cfg1) - err := opt1(opts) - require.NoError(t, err) - assert.Equal(t, "server1:8888", opts.config.ServerAddress) - - // Apply second config (should override) - opt2 := WithConfig(cfg2) - err = opt2(opts) - require.NoError(t, err) - assert.Equal(t, "server2:9999", opts.config.ServerAddress) - }) -} - -func TestOptions_DefaultValues(t *testing.T) { - t.Run("should use default server address", func(t *testing.T) { - assert.Equal(t, "0.0.0.0:8888", DefaultServerAddress) - assert.Equal(t, DefaultServerAddress, DefaultConfig.ServerAddress) - }) - - t.Run("should have correct env prefix", func(t *testing.T) { - assert.Equal(t, "DIRECTORY_CLIENT", DefaultEnvPrefix) - }) -} - -func TestOptions_ContextUsage(t *testing.T) { - t.Run("should accept cancelled context", func(t *testing.T) { - // Create already-cancelled context - ctx, cancel := context.WithCancel(context.Background()) - cancel() // Cancel immediately - - opts := &options{ - config: &Config{ - ServerAddress: testServerAddr, - // No SPIFFE - should use insecure - }, - } - - opt := withAuth(ctx) - err := opt(opts) - - // Should succeed because no actual I/O happens with insecure mode - require.NoError(t, err) - }) -} - -func TestOptions_ResourceFields(t *testing.T) { - t.Run("should initialize with nil resources", func(t *testing.T) { - opts := &options{} - - assert.Nil(t, opts.config) - assert.Nil(t, opts.authClient) - assert.Nil(t, opts.bundleSrc) - assert.Nil(t, opts.x509Src) - assert.Nil(t, opts.jwtSource) - assert.Empty(t, opts.authOpts) - }) - - t.Run("should store config correctly", func(t *testing.T) { - cfg := &Config{ - ServerAddress: testServerAddr, - SpiffeSocketPath: testSpiffeSocket, - AuthMode: "jwt", - JWTAudience: testJWTAudience, - } - - opts := &options{} - opt := WithConfig(cfg) - err := opt(opts) - - require.NoError(t, err) - assert.NotNil(t, opts.config) - assert.Equal(t, testServerAddr, opts.config.ServerAddress) - assert.Equal(t, testSpiffeSocket, opts.config.SpiffeSocketPath) - assert.Equal(t, "jwt", opts.config.AuthMode) - assert.Equal(t, testJWTAudience, opts.config.JWTAudience) - }) -} - -func TestSetupJWTAuth_Validation(t *testing.T) { - t.Run("should error when JWT audience is missing", func(t *testing.T) { - // This test validates that JWT authentication requires an audience - opts := &options{ - config: &Config{ - ServerAddress: testServerAddr, - SpiffeSocketPath: testSpiffeSocket, - AuthMode: "jwt", - JWTAudience: "", // Missing audience - }, - } - - // We need a mock client to test setupJWTAuth - // Since we can't create a real SPIFFE client without the socket, - // we test this through withAuth which calls setupJWTAuth - ctx := context.Background() - opt := withAuth(ctx) - err := opt(opts) - - // Should fail because we can't connect to SPIFFE socket - // OR because JWT audience is missing (depending on order of checks) - require.Error(t, err) - // The error could be about SPIFFE connection or missing JWT audience - t.Logf("Error (expected): %v", err) - }) -} - -func TestSetupX509Auth_Validation(t *testing.T) { - t.Run("should attempt x509 auth setup", func(t *testing.T) { - opts := &options{ - config: &Config{ - ServerAddress: testServerAddr, - SpiffeSocketPath: testSpiffeSocket, - AuthMode: "x509", - }, - } - - ctx := context.Background() - opt := withAuth(ctx) - err := opt(opts) - - // Should fail because we can't connect to SPIFFE socket - require.Error(t, err) - // Error should be about SPIFFE connection - t.Logf("Error (expected): %v", err) - }) -} - -func TestWithAuth_SPIFFESocketConnection(t *testing.T) { - t.Run("should error when SPIFFE socket does not exist", func(t *testing.T) { - // Use a non-existent socket path - nonExistentSocket := "/tmp/non-existent-spiffe-" + t.Name() + ".sock" - - opts := &options{ - config: &Config{ - ServerAddress: testServerAddr, - SpiffeSocketPath: nonExistentSocket, - AuthMode: "jwt", - JWTAudience: testJWTAudience, - }, - } - - ctx := context.Background() - opt := withAuth(ctx) - err := opt(opts) - - // Should error because socket doesn't exist - require.Error(t, err) - assert.Contains(t, err.Error(), "failed to create SPIFFE client") - }) - - t.Run("should error with x509 auth and non-existent socket", func(t *testing.T) { - nonExistentSocket := "/tmp/non-existent-spiffe-x509-" + t.Name() + ".sock" - - opts := &options{ - config: &Config{ - ServerAddress: testServerAddr, - SpiffeSocketPath: nonExistentSocket, - AuthMode: "x509", - }, - } - - ctx := context.Background() - opt := withAuth(ctx) - err := opt(opts) - - // Should error because socket doesn't exist - require.Error(t, err) - assert.Contains(t, err.Error(), "failed to create SPIFFE client") - }) -} - -func TestWithAuth_AllAuthModes(t *testing.T) { - testCases := []struct { - name string - authMode string - jwtAudience string - expectError bool - errorContains string - }{ - { - name: "jwt mode without socket", - authMode: "jwt", - jwtAudience: testJWTAudience, - expectError: true, - errorContains: "failed to create SPIFFE client", - }, - { - name: "x509 mode without socket", - authMode: "x509", - jwtAudience: "", - expectError: true, - errorContains: "failed to create SPIFFE client", - }, - { - name: "invalid mode without socket", - authMode: "invalid", - jwtAudience: "", - expectError: true, - errorContains: "unsupported auth mode", - }, - { - name: "empty mode with socket path", - authMode: "", - jwtAudience: "", - expectError: false, - errorContains: "", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - socketPath := "" - if tc.authMode != "" { - socketPath = "/tmp/test-socket-" + tc.name + ".sock" - } - - opts := &options{ - config: &Config{ - ServerAddress: testServerAddr, - SpiffeSocketPath: socketPath, - AuthMode: tc.authMode, - JWTAudience: tc.jwtAudience, - }, - } - - ctx := context.Background() - opt := withAuth(ctx) - err := opt(opts) - - if tc.expectError { - require.Error(t, err) - - if tc.errorContains != "" { - assert.Contains(t, err.Error(), tc.errorContains) - } - } else { - require.NoError(t, err) - } - }) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "context" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Test constants. +const ( + testServerAddr = "localhost:9999" + testSpiffeSocket = "/tmp/test-spiffe.sock" + testJWTAudience = "test-audience" + testInvalidAuthMode = "invalid-auth" +) + +func TestWithConfig(t *testing.T) { + t.Run("should set config", func(t *testing.T) { + cfg := &Config{ + ServerAddress: testServerAddr, + } + + opts := &options{} + opt := WithConfig(cfg) + err := opt(opts) + + require.NoError(t, err) + assert.Equal(t, cfg, opts.config) + assert.Equal(t, testServerAddr, opts.config.ServerAddress) + }) + + t.Run("should allow nil config", func(t *testing.T) { + opts := &options{} + opt := WithConfig(nil) + err := opt(opts) + + require.NoError(t, err) + assert.Nil(t, opts.config) + }) +} + +func TestWithEnvConfig(t *testing.T) { + t.Run("should load default config when no env vars", func(t *testing.T) { + // Clear any existing env vars by unsetting them + // Note: We use os.Unsetenv here (not t.Setenv) because t.Setenv("VAR", "") + // sets to empty string, not unset. We need truly unset vars to test defaults. + oldAddr := os.Getenv("DIRECTORY_CLIENT_SERVER_ADDRESS") + oldSocket := os.Getenv("DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH") + oldAuth := os.Getenv("DIRECTORY_CLIENT_AUTH_MODE") + oldAud := os.Getenv("DIRECTORY_CLIENT_JWT_AUDIENCE") + + os.Unsetenv("DIRECTORY_CLIENT_SERVER_ADDRESS") + os.Unsetenv("DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH") + os.Unsetenv("DIRECTORY_CLIENT_AUTH_MODE") + os.Unsetenv("DIRECTORY_CLIENT_JWT_AUDIENCE") + + defer func() { + // Restore original values - must use os.Setenv (not t.Setenv) to restore after os.Unsetenv + //nolint:usetesting // Can't use t.Setenv in defer for restoration after os.Unsetenv + if oldAddr != "" { + os.Setenv("DIRECTORY_CLIENT_SERVER_ADDRESS", oldAddr) + } + //nolint:usetesting // Can't use t.Setenv in defer for restoration after os.Unsetenv + if oldSocket != "" { + os.Setenv("DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH", oldSocket) + } + //nolint:usetesting // Can't use t.Setenv in defer for restoration after os.Unsetenv + if oldAuth != "" { + os.Setenv("DIRECTORY_CLIENT_AUTH_MODE", oldAuth) + } + //nolint:usetesting // Can't use t.Setenv in defer for restoration after os.Unsetenv + if oldAud != "" { + os.Setenv("DIRECTORY_CLIENT_JWT_AUDIENCE", oldAud) + } + }() + + opts := &options{} + opt := WithEnvConfig() + err := opt(opts) + + require.NoError(t, err) + require.NotNil(t, opts.config) + assert.Equal(t, DefaultServerAddress, opts.config.ServerAddress) + assert.Empty(t, opts.config.SpiffeSocketPath) + assert.Empty(t, opts.config.AuthMode) + assert.Empty(t, opts.config.JWTAudience) + }) + + t.Run("should load config from environment variables", func(t *testing.T) { + // Set env vars - t.Setenv automatically restores after test + t.Setenv("DIRECTORY_CLIENT_SERVER_ADDRESS", testServerAddr) + t.Setenv("DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH", testSpiffeSocket) + t.Setenv("DIRECTORY_CLIENT_AUTH_MODE", "jwt") + t.Setenv("DIRECTORY_CLIENT_JWT_AUDIENCE", testJWTAudience) + + opts := &options{} + opt := WithEnvConfig() + err := opt(opts) + + require.NoError(t, err) + require.NotNil(t, opts.config) + assert.Equal(t, testServerAddr, opts.config.ServerAddress) + assert.Equal(t, testSpiffeSocket, opts.config.SpiffeSocketPath) + assert.Equal(t, "jwt", opts.config.AuthMode) + assert.Equal(t, testJWTAudience, opts.config.JWTAudience) + }) +} + +func TestWithAuth_ConfigValidation(t *testing.T) { + t.Run("should error when config is nil", func(t *testing.T) { + opts := &options{ + config: nil, + } + + ctx := context.Background() + opt := withAuth(ctx) + err := opt(opts) + + require.Error(t, err) + assert.Contains(t, err.Error(), "config is required") + }) + + t.Run("should use insecure credentials when no SPIFFE socket", func(t *testing.T) { + opts := &options{ + config: &Config{ + ServerAddress: testServerAddr, + SpiffeSocketPath: "", // No SPIFFE + AuthMode: "", + }, + } + + ctx := context.Background() + opt := withAuth(ctx) + err := opt(opts) + + require.NoError(t, err) + assert.NotEmpty(t, opts.authOpts) + assert.Nil(t, opts.authClient) + }) + + t.Run("should use insecure credentials when no auth mode", func(t *testing.T) { + opts := &options{ + config: &Config{ + ServerAddress: testServerAddr, + SpiffeSocketPath: testSpiffeSocket, + AuthMode: "", // No auth mode + }, + } + + ctx := context.Background() + opt := withAuth(ctx) + err := opt(opts) + + require.NoError(t, err) + assert.NotEmpty(t, opts.authOpts) + assert.Nil(t, opts.authClient) + }) +} + +func TestWithAuth_InvalidAuthMode(t *testing.T) { + t.Run("should error on unsupported auth mode", func(t *testing.T) { + // Skip this test if we can't connect to SPIFFE socket + // (SPIFFE connection will fail before we can test invalid auth mode) + if _, err := os.Stat(testSpiffeSocket); os.IsNotExist(err) { + t.Skip("SPIFFE socket not available for testing") + } + + opts := &options{ + config: &Config{ + ServerAddress: testServerAddr, + SpiffeSocketPath: testSpiffeSocket, + AuthMode: testInvalidAuthMode, + }, + } + + ctx := context.Background() + opt := withAuth(ctx) + err := opt(opts) + + // Will error either from SPIFFE connection or invalid auth mode + require.Error(t, err) + }) +} + +func TestOptions_Chaining(t *testing.T) { + t.Run("should apply multiple options in order", func(t *testing.T) { + cfg1 := &Config{ServerAddress: "server1:8888"} + cfg2 := &Config{ServerAddress: "server2:9999"} + + opts := &options{} + + // Apply first config + opt1 := WithConfig(cfg1) + err := opt1(opts) + require.NoError(t, err) + assert.Equal(t, "server1:8888", opts.config.ServerAddress) + + // Apply second config (should override) + opt2 := WithConfig(cfg2) + err = opt2(opts) + require.NoError(t, err) + assert.Equal(t, "server2:9999", opts.config.ServerAddress) + }) +} + +func TestOptions_DefaultValues(t *testing.T) { + t.Run("should use default server address", func(t *testing.T) { + assert.Equal(t, "0.0.0.0:8888", DefaultServerAddress) + assert.Equal(t, DefaultServerAddress, DefaultConfig.ServerAddress) + }) + + t.Run("should have correct env prefix", func(t *testing.T) { + assert.Equal(t, "DIRECTORY_CLIENT", DefaultEnvPrefix) + }) +} + +func TestOptions_ContextUsage(t *testing.T) { + t.Run("should accept cancelled context", func(t *testing.T) { + // Create already-cancelled context + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + opts := &options{ + config: &Config{ + ServerAddress: testServerAddr, + // No SPIFFE - should use insecure + }, + } + + opt := withAuth(ctx) + err := opt(opts) + + // Should succeed because no actual I/O happens with insecure mode + require.NoError(t, err) + }) +} + +func TestOptions_ResourceFields(t *testing.T) { + t.Run("should initialize with nil resources", func(t *testing.T) { + opts := &options{} + + assert.Nil(t, opts.config) + assert.Nil(t, opts.authClient) + assert.Nil(t, opts.bundleSrc) + assert.Nil(t, opts.x509Src) + assert.Nil(t, opts.jwtSource) + assert.Empty(t, opts.authOpts) + }) + + t.Run("should store config correctly", func(t *testing.T) { + cfg := &Config{ + ServerAddress: testServerAddr, + SpiffeSocketPath: testSpiffeSocket, + AuthMode: "jwt", + JWTAudience: testJWTAudience, + } + + opts := &options{} + opt := WithConfig(cfg) + err := opt(opts) + + require.NoError(t, err) + assert.NotNil(t, opts.config) + assert.Equal(t, testServerAddr, opts.config.ServerAddress) + assert.Equal(t, testSpiffeSocket, opts.config.SpiffeSocketPath) + assert.Equal(t, "jwt", opts.config.AuthMode) + assert.Equal(t, testJWTAudience, opts.config.JWTAudience) + }) +} + +func TestSetupJWTAuth_Validation(t *testing.T) { + t.Run("should error when JWT audience is missing", func(t *testing.T) { + // This test validates that JWT authentication requires an audience + opts := &options{ + config: &Config{ + ServerAddress: testServerAddr, + SpiffeSocketPath: testSpiffeSocket, + AuthMode: "jwt", + JWTAudience: "", // Missing audience + }, + } + + // We need a mock client to test setupJWTAuth + // Since we can't create a real SPIFFE client without the socket, + // we test this through withAuth which calls setupJWTAuth + ctx := context.Background() + opt := withAuth(ctx) + err := opt(opts) + + // Should fail because we can't connect to SPIFFE socket + // OR because JWT audience is missing (depending on order of checks) + require.Error(t, err) + // The error could be about SPIFFE connection or missing JWT audience + t.Logf("Error (expected): %v", err) + }) +} + +func TestSetupX509Auth_Validation(t *testing.T) { + t.Run("should attempt x509 auth setup", func(t *testing.T) { + opts := &options{ + config: &Config{ + ServerAddress: testServerAddr, + SpiffeSocketPath: testSpiffeSocket, + AuthMode: "x509", + }, + } + + ctx := context.Background() + opt := withAuth(ctx) + err := opt(opts) + + // Should fail because we can't connect to SPIFFE socket + require.Error(t, err) + // Error should be about SPIFFE connection + t.Logf("Error (expected): %v", err) + }) +} + +func TestWithAuth_SPIFFESocketConnection(t *testing.T) { + t.Run("should error when SPIFFE socket does not exist", func(t *testing.T) { + // Use a non-existent socket path + nonExistentSocket := "/tmp/non-existent-spiffe-" + t.Name() + ".sock" + + opts := &options{ + config: &Config{ + ServerAddress: testServerAddr, + SpiffeSocketPath: nonExistentSocket, + AuthMode: "jwt", + JWTAudience: testJWTAudience, + }, + } + + ctx := context.Background() + opt := withAuth(ctx) + err := opt(opts) + + // Should error because socket doesn't exist + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to create SPIFFE client") + }) + + t.Run("should error with x509 auth and non-existent socket", func(t *testing.T) { + nonExistentSocket := "/tmp/non-existent-spiffe-x509-" + t.Name() + ".sock" + + opts := &options{ + config: &Config{ + ServerAddress: testServerAddr, + SpiffeSocketPath: nonExistentSocket, + AuthMode: "x509", + }, + } + + ctx := context.Background() + opt := withAuth(ctx) + err := opt(opts) + + // Should error because socket doesn't exist + require.Error(t, err) + assert.Contains(t, err.Error(), "failed to create SPIFFE client") + }) +} + +func TestWithAuth_AllAuthModes(t *testing.T) { + testCases := []struct { + name string + authMode string + jwtAudience string + expectError bool + errorContains string + }{ + { + name: "jwt mode without socket", + authMode: "jwt", + jwtAudience: testJWTAudience, + expectError: true, + errorContains: "failed to create SPIFFE client", + }, + { + name: "x509 mode without socket", + authMode: "x509", + jwtAudience: "", + expectError: true, + errorContains: "failed to create SPIFFE client", + }, + { + name: "invalid mode without socket", + authMode: "invalid", + jwtAudience: "", + expectError: true, + errorContains: "unsupported auth mode", + }, + { + name: "empty mode with socket path", + authMode: "", + jwtAudience: "", + expectError: false, + errorContains: "", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + socketPath := "" + if tc.authMode != "" { + socketPath = "/tmp/test-socket-" + tc.name + ".sock" + } + + opts := &options{ + config: &Config{ + ServerAddress: testServerAddr, + SpiffeSocketPath: socketPath, + AuthMode: tc.authMode, + JWTAudience: tc.jwtAudience, + }, + } + + ctx := context.Background() + opt := withAuth(ctx) + err := opt(opts) + + if tc.expectError { + require.Error(t, err) + + if tc.errorContains != "" { + assert.Contains(t, err.Error(), tc.errorContains) + } + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/client/routing.go b/client/routing.go index 300e671bf..29502e22f 100644 --- a/client/routing.go +++ b/client/routing.go @@ -1,110 +1,110 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package client - -import ( - "context" - "errors" - "fmt" - "io" - - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/utils/logging" -) - -var logger = logging.Logger("client") - -func (c *Client) Publish(ctx context.Context, req *routingv1.PublishRequest) error { - _, err := c.RoutingServiceClient.Publish(ctx, req) - if err != nil { - return fmt.Errorf("failed to publish object: %w", err) - } - - return nil -} - -func (c *Client) List(ctx context.Context, req *routingv1.ListRequest) (<-chan *routingv1.ListResponse, error) { - stream, err := c.RoutingServiceClient.List(ctx, req) - if err != nil { - return nil, fmt.Errorf("failed to create list stream: %w", err) - } - - resCh := make(chan *routingv1.ListResponse, 100) //nolint:mnd - - go func() { - defer close(resCh) - - for { - obj, err := stream.Recv() - if errors.Is(err, io.EOF) { - break - } - - if err != nil { - logger.Error("error receiving object", "error", err) - - return - } - - // Stream ListResponse directly (no legacy wrapper) - // Use select to prevent goroutine leak if consumer stops reading - select { - case resCh <- obj: - case <-ctx.Done(): - logger.Error("context cancelled while receiving list response", "error", ctx.Err()) - - return - } - } - }() - - return resCh, nil -} - -func (c *Client) SearchRouting(ctx context.Context, req *routingv1.SearchRequest) (<-chan *routingv1.SearchResponse, error) { - stream, err := c.Search(ctx, req) - if err != nil { - return nil, fmt.Errorf("failed to create search stream: %w", err) - } - - resCh := make(chan *routingv1.SearchResponse, 100) //nolint:mnd - - go func() { - defer close(resCh) - - for { - obj, err := stream.Recv() - if errors.Is(err, io.EOF) { - break - } - - if err != nil { - logger.Error("error receiving search result", "error", err) - - return - } - - // Stream SearchResponse directly - // Use select to prevent goroutine leak if consumer stops reading - select { - case resCh <- obj: - case <-ctx.Done(): - logger.Error("context cancelled while receiving search response", "error", ctx.Err()) - - return - } - } - }() - - return resCh, nil -} - -func (c *Client) Unpublish(ctx context.Context, req *routingv1.UnpublishRequest) error { - _, err := c.RoutingServiceClient.Unpublish(ctx, req) - if err != nil { - return fmt.Errorf("failed to unpublish object: %w", err) - } - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "context" + "errors" + "fmt" + "io" + + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/utils/logging" +) + +var logger = logging.Logger("client") + +func (c *Client) Publish(ctx context.Context, req *routingv1.PublishRequest) error { + _, err := c.RoutingServiceClient.Publish(ctx, req) + if err != nil { + return fmt.Errorf("failed to publish object: %w", err) + } + + return nil +} + +func (c *Client) List(ctx context.Context, req *routingv1.ListRequest) (<-chan *routingv1.ListResponse, error) { + stream, err := c.RoutingServiceClient.List(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to create list stream: %w", err) + } + + resCh := make(chan *routingv1.ListResponse, 100) //nolint:mnd + + go func() { + defer close(resCh) + + for { + obj, err := stream.Recv() + if errors.Is(err, io.EOF) { + break + } + + if err != nil { + logger.Error("error receiving object", "error", err) + + return + } + + // Stream ListResponse directly (no legacy wrapper) + // Use select to prevent goroutine leak if consumer stops reading + select { + case resCh <- obj: + case <-ctx.Done(): + logger.Error("context cancelled while receiving list response", "error", ctx.Err()) + + return + } + } + }() + + return resCh, nil +} + +func (c *Client) SearchRouting(ctx context.Context, req *routingv1.SearchRequest) (<-chan *routingv1.SearchResponse, error) { + stream, err := c.Search(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to create search stream: %w", err) + } + + resCh := make(chan *routingv1.SearchResponse, 100) //nolint:mnd + + go func() { + defer close(resCh) + + for { + obj, err := stream.Recv() + if errors.Is(err, io.EOF) { + break + } + + if err != nil { + logger.Error("error receiving search result", "error", err) + + return + } + + // Stream SearchResponse directly + // Use select to prevent goroutine leak if consumer stops reading + select { + case resCh <- obj: + case <-ctx.Done(): + logger.Error("context cancelled while receiving search response", "error", ctx.Err()) + + return + } + } + }() + + return resCh, nil +} + +func (c *Client) Unpublish(ctx context.Context, req *routingv1.UnpublishRequest) error { + _, err := c.RoutingServiceClient.Unpublish(ctx, req) + if err != nil { + return fmt.Errorf("failed to unpublish object: %w", err) + } + + return nil +} diff --git a/client/routing_test.go b/client/routing_test.go index d6a2d15ca..6f1f0c819 100644 --- a/client/routing_test.go +++ b/client/routing_test.go @@ -1,620 +1,620 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package client - -import ( - "context" - "io" - "runtime" - "testing" - "time" - - corev1 "github.com/agntcy/dir/api/core/v1" - routingv1 "github.com/agntcy/dir/api/routing/v1" - "google.golang.org/grpc" -) - -const ( - // Test data constants. - testRecordCID = "test-cid" - - // Test size constants. - testResponseCountSmall = 10 - testResponseCountMedium = 100 - testResponseCountLarge = 1000 - - // Test timeout constants. - testSlowServerDelay = 10 * time.Millisecond - testFastServerDelay = 1 * time.Millisecond - testMediumServerDelay = 50 * time.Millisecond - testResponseTimeout = 1 * time.Second - testDrainTimeout = 500 * time.Millisecond - testCleanupDelay = 50 * time.Millisecond - testLongCleanupDelay = 200 * time.Millisecond - testBenchmarkCleanupDelay = 100 * time.Millisecond - - // Goroutine leak tolerance. - testGoroutineLeakTolerance = 2 - testBenchmarkGoroutineLeakTolerance = 10 - - // Test read counts. - testPartialReadCount = 5 - testSmallReadCount = 10 -) - -// ============================================================================ -// Issue 5: Blocked Goroutine Leaks Tests -// ============================================================================ - -// mockListStream simulates a gRPC List stream. -type mockListStream struct { - responses []*routingv1.ListResponse - index int - delay time.Duration // Delay between sends to simulate slow server - grpc.ClientStream -} - -func (m *mockListStream) Recv() (*routingv1.ListResponse, error) { - if m.delay > 0 { - time.Sleep(m.delay) - } - - if m.index >= len(m.responses) { - return nil, io.EOF - } - - resp := m.responses[m.index] - m.index++ - - return resp, nil -} - -// mockSearchStream simulates a gRPC Search stream. -type mockSearchStream struct { - responses []*routingv1.SearchResponse - index int - delay time.Duration - grpc.ClientStream -} - -func (m *mockSearchStream) Recv() (*routingv1.SearchResponse, error) { - if m.delay > 0 { - time.Sleep(m.delay) - } - - if m.index >= len(m.responses) { - return nil, io.EOF - } - - resp := m.responses[m.index] - m.index++ - - return resp, nil -} - -// mockRoutingServiceClient is a mock for testing routing methods. -type mockRoutingServiceClient struct { - listResponses []*routingv1.ListResponse - searchResponses []*routingv1.SearchResponse - listDelay time.Duration - searchDelay time.Duration - routingv1.RoutingServiceClient -} - -func (m *mockRoutingServiceClient) List(ctx context.Context, req *routingv1.ListRequest, opts ...grpc.CallOption) (routingv1.RoutingService_ListClient, error) { - return &mockListStream{ - responses: m.listResponses, - delay: m.listDelay, - }, nil -} - -func (m *mockRoutingServiceClient) Search(ctx context.Context, req *routingv1.SearchRequest, opts ...grpc.CallOption) (routingv1.RoutingService_SearchClient, error) { - return &mockSearchStream{ - responses: m.searchResponses, - delay: m.searchDelay, - }, nil -} - -// countGoroutines returns the current number of goroutines. -func countGoroutines() int { - return runtime.NumGoroutine() -} - -// testContextCancellation is a helper that tests context cancellation for streaming methods. -func testContextCancellation(t *testing.T, startStream func(context.Context) (<-chan interface{}, error), name string) { - t.Helper() - - initialGoroutines := countGoroutines() - t.Logf("Initial goroutines: %d", initialGoroutines) - - ctx, cancel := context.WithCancel(context.Background()) - - resCh, err := startStream(ctx) - if err != nil { - t.Fatalf("%s failed: %v", name, err) - } - - // Read a few responses - readCount := 0 - - for range testPartialReadCount { - select { - case _, ok := <-resCh: - if !ok { - t.Fatal("Channel closed unexpectedly") - } - - readCount++ - case <-time.After(testResponseTimeout): - t.Fatal("Timeout waiting for response") - } - } - - t.Logf("Read %d responses", readCount) - - // Cancel context (consumer stops reading) - cancel() - - // Drain remaining responses to allow goroutine to exit - drained := 0 - drainTimeout := time.After(testDrainTimeout) - -drainLoop: - for { - select { - case _, ok := <-resCh: - if !ok { - // Channel closed, good! - break drainLoop - } - - drained++ - case <-drainTimeout: - t.Log("Timeout while draining channel") - - break drainLoop - } - } - - t.Logf("Drained %d additional responses", drained) - - // Wait for goroutine to clean up - time.Sleep(testCleanupDelay) - - // Count goroutines after - finalGoroutines := countGoroutines() - t.Logf("Final goroutines: %d", finalGoroutines) - - // Verify no goroutine leak (allow some tolerance for test framework goroutines) - if finalGoroutines > initialGoroutines+testGoroutineLeakTolerance { - t.Errorf("Goroutine leak detected: initial=%d, final=%d, leaked=%d", - initialGoroutines, finalGoroutines, finalGoroutines-initialGoroutines) - } -} - -// testConsumerStopsReading is a helper that tests consumer stopping reading for streaming methods. -func testConsumerStopsReading(t *testing.T, startStream func(context.Context) (<-chan interface{}, error), name string) { - t.Helper() - - initialGoroutines := countGoroutines() - t.Logf("Initial goroutines: %d", initialGoroutines) - - ctx, cancel := context.WithTimeout(context.Background(), testContextTimeout) - defer cancel() - - resCh, err := startStream(ctx) - if err != nil { - t.Fatalf("%s failed: %v", name, err) - } - - // Read only a few responses and stop (consumer stops reading) - readCount := 0 - - for range testSmallReadCount { - <-resCh - - readCount++ - } - - t.Logf("Read %d responses, then stopped", readCount) - - // Cancel context to signal we're done - cancel() - - // Wait for cleanup - time.Sleep(testLongCleanupDelay) - - finalGoroutines := countGoroutines() - t.Logf("Final goroutines: %d", finalGoroutines) - - // Verify no significant goroutine leak - if finalGoroutines > initialGoroutines+testGoroutineLeakTolerance { - t.Errorf("Goroutine leak detected: initial=%d, final=%d, leaked=%d", - initialGoroutines, finalGoroutines, finalGoroutines-initialGoroutines) - } -} - -// TestList_ContextCancellation tests that List() properly handles context cancellation. -func TestList_ContextCancellation(t *testing.T) { - // Create mock responses - responses := make([]*routingv1.ListResponse, testResponseCountMedium) - for i := range testResponseCountMedium { - responses[i] = &routingv1.ListResponse{ - RecordRef: &corev1.RecordRef{ - Cid: testRecordCID, - }, - } - } - - mockClient := &mockRoutingServiceClient{ - listResponses: responses, - listDelay: testSlowServerDelay, - } - - client := &Client{ - RoutingServiceClient: mockClient, - } - - // Use helper to test context cancellation - testContextCancellation(t, func(ctx context.Context) (<-chan interface{}, error) { - ch, err := client.List(ctx, &routingv1.ListRequest{}) - if err != nil { - return nil, err - } - // Convert typed channel to interface{} channel - outCh := make(chan interface{}) - - go func() { - defer close(outCh) - - for v := range ch { - outCh <- v - } - }() - - return outCh, nil - }, "List()") -} - -// TestList_ConsumerStopsReading tests that List() handles consumer stopping reading. -func TestList_ConsumerStopsReading(t *testing.T) { - // Create many mock responses - responses := make([]*routingv1.ListResponse, testResponseCountLarge) - for i := range testResponseCountLarge { - responses[i] = &routingv1.ListResponse{ - RecordRef: &corev1.RecordRef{ - Cid: testRecordCID, - }, - } - } - - mockClient := &mockRoutingServiceClient{ - listResponses: responses, - listDelay: testFastServerDelay, - } - - client := &Client{ - RoutingServiceClient: mockClient, - } - - // Use helper to test consumer stops reading - testConsumerStopsReading(t, func(ctx context.Context) (<-chan interface{}, error) { - ch, err := client.List(ctx, &routingv1.ListRequest{}) - if err != nil { - return nil, err - } - // Convert typed channel to interface{} channel - outCh := make(chan interface{}) - - go func() { - defer close(outCh) - - for v := range ch { - outCh <- v - } - }() - - return outCh, nil - }, "List()") -} - -// TestList_FullConsumption tests that List() works correctly when consumer reads everything. -func TestList_FullConsumption(t *testing.T) { - responses := make([]*routingv1.ListResponse, testResponseCountSmall) - for i := range testResponseCountSmall { - responses[i] = &routingv1.ListResponse{ - RecordRef: &corev1.RecordRef{ - Cid: testRecordCID, - }, - } - } - - mockClient := &mockRoutingServiceClient{ - listResponses: responses, - } - - client := &Client{ - RoutingServiceClient: mockClient, - } - - ctx := context.Background() - - resCh, err := client.List(ctx, &routingv1.ListRequest{}) - if err != nil { - t.Fatalf("List() failed: %v", err) - } - - // Read all responses - count := 0 - for range resCh { - count++ - } - - if count != len(responses) { - t.Errorf("Expected to receive %d responses, got %d", len(responses), count) - } -} - -// TestSearchRouting_ContextCancellation tests that SearchRouting() properly handles context cancellation. -func TestSearchRouting_ContextCancellation(t *testing.T) { - responses := make([]*routingv1.SearchResponse, testResponseCountMedium) - for i := range testResponseCountMedium { - responses[i] = &routingv1.SearchResponse{ - RecordRef: &corev1.RecordRef{ - Cid: testRecordCID, - }, - } - } - - mockClient := &mockRoutingServiceClient{ - searchResponses: responses, - searchDelay: testSlowServerDelay, - } - - client := &Client{ - RoutingServiceClient: mockClient, - } - - // Use helper to test context cancellation - testContextCancellation(t, func(ctx context.Context) (<-chan interface{}, error) { - ch, err := client.SearchRouting(ctx, &routingv1.SearchRequest{}) - if err != nil { - return nil, err - } - // Convert typed channel to interface{} channel - outCh := make(chan interface{}) - - go func() { - defer close(outCh) - - for v := range ch { - outCh <- v - } - }() - - return outCh, nil - }, "SearchRouting()") -} - -// TestSearchRouting_ConsumerStopsReading tests that SearchRouting() handles consumer stopping reading. -func TestSearchRouting_ConsumerStopsReading(t *testing.T) { - responses := make([]*routingv1.SearchResponse, testResponseCountLarge) - for i := range testResponseCountLarge { - responses[i] = &routingv1.SearchResponse{ - RecordRef: &corev1.RecordRef{ - Cid: testRecordCID, - }, - } - } - - mockClient := &mockRoutingServiceClient{ - searchResponses: responses, - searchDelay: testFastServerDelay, - } - - client := &Client{ - RoutingServiceClient: mockClient, - } - - // Use helper to test consumer stops reading - testConsumerStopsReading(t, func(ctx context.Context) (<-chan interface{}, error) { - ch, err := client.SearchRouting(ctx, &routingv1.SearchRequest{}) - if err != nil { - return nil, err - } - // Convert typed channel to interface{} channel - outCh := make(chan interface{}) - - go func() { - defer close(outCh) - - for v := range ch { - outCh <- v - } - }() - - return outCh, nil - }, "SearchRouting()") -} - -// TestSearchRouting_FullConsumption tests that SearchRouting() works correctly when consumer reads everything. -func TestSearchRouting_FullConsumption(t *testing.T) { - responses := make([]*routingv1.SearchResponse, testResponseCountSmall) - for i := range testResponseCountSmall { - responses[i] = &routingv1.SearchResponse{ - RecordRef: &corev1.RecordRef{ - Cid: testRecordCID, - }, - } - } - - mockClient := &mockRoutingServiceClient{ - searchResponses: responses, - } - - client := &Client{ - RoutingServiceClient: mockClient, - } - - ctx := context.Background() - - resCh, err := client.SearchRouting(ctx, &routingv1.SearchRequest{}) - if err != nil { - t.Fatalf("SearchRouting() failed: %v", err) - } - - // Read all responses - count := 0 - for range resCh { - count++ - } - - if count != len(responses) { - t.Errorf("Expected to receive %d responses, got %d", len(responses), count) - } -} - -// TestList_ImmediateCancellation tests List() with immediate context cancellation. -func TestList_ImmediateCancellation(t *testing.T) { - responses := make([]*routingv1.ListResponse, testResponseCountMedium) - for i := range testResponseCountMedium { - responses[i] = &routingv1.ListResponse{ - RecordRef: &corev1.RecordRef{ - Cid: testRecordCID, - }, - } - } - - mockClient := &mockRoutingServiceClient{ - listResponses: responses, - listDelay: testMediumServerDelay, - } - - client := &Client{ - RoutingServiceClient: mockClient, - } - - // Create already-cancelled context - ctx, cancel := context.WithCancel(context.Background()) - cancel() // Cancel immediately - - resCh, err := client.List(ctx, &routingv1.ListRequest{}) - if err != nil { - t.Fatalf("List() failed: %v", err) - } - - // Channel should close quickly due to cancelled context - select { - case _, ok := <-resCh: - if ok { - // If we got a response, that's OK - might have sent before cancel was processed - t.Logf("Got response before cancellation was processed") - } - case <-time.After(testDrainTimeout): - t.Error("Channel should close when context is already cancelled") - } - - // Wait for cleanup - time.Sleep(testBenchmarkCleanupDelay) -} - -// TestSearchRouting_ImmediateCancellation tests SearchRouting() with immediate context cancellation. -func TestSearchRouting_ImmediateCancellation(t *testing.T) { - responses := make([]*routingv1.SearchResponse, testResponseCountMedium) - for i := range testResponseCountMedium { - responses[i] = &routingv1.SearchResponse{ - RecordRef: &corev1.RecordRef{ - Cid: testRecordCID, - }, - } - } - - mockClient := &mockRoutingServiceClient{ - searchResponses: responses, - searchDelay: testMediumServerDelay, - } - - client := &Client{ - RoutingServiceClient: mockClient, - } - - // Create already-cancelled context - ctx, cancel := context.WithCancel(context.Background()) - cancel() - - resCh, err := client.SearchRouting(ctx, &routingv1.SearchRequest{}) - if err != nil { - t.Fatalf("SearchRouting() failed: %v", err) - } - - // Channel should close quickly - select { - case _, ok := <-resCh: - if ok { - t.Logf("Got response before cancellation was processed") - } - case <-time.After(testDrainTimeout): - t.Error("Channel should close when context is already cancelled") - } - - // Wait for cleanup - time.Sleep(testBenchmarkCleanupDelay) -} - -// BenchmarkList_NoLeak benchmarks List() to detect goroutine leaks under load. -func BenchmarkList_NoLeak(b *testing.B) { - responses := make([]*routingv1.ListResponse, testResponseCountSmall) - for i := range testResponseCountSmall { - responses[i] = &routingv1.ListResponse{ - RecordRef: &corev1.RecordRef{ - Cid: testRecordCID, - }, - } - } - - mockClient := &mockRoutingServiceClient{ - listResponses: responses, - } - - client := &Client{ - RoutingServiceClient: mockClient, - } - - initialGoroutines := countGoroutines() - - b.ResetTimer() - - for range b.N { - ctx, cancel := context.WithCancel(context.Background()) - - resCh, err := client.List(ctx, &routingv1.ListRequest{}) - if err != nil { - b.Fatalf("List() failed: %v", err) - } - - // Read a few then cancel - for range testPartialReadCount - 2 { - <-resCh - } - - cancel() - - // Drain channel - for range resCh { - } - } - - b.StopTimer() - - // Check for goroutine leaks - runtime.GC() - time.Sleep(testBenchmarkCleanupDelay) - - finalGoroutines := countGoroutines() - - if finalGoroutines > initialGoroutines+testBenchmarkGoroutineLeakTolerance { - b.Errorf("Potential goroutine leak: initial=%d, final=%d, leaked=%d", - initialGoroutines, finalGoroutines, finalGoroutines-initialGoroutines) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "context" + "io" + "runtime" + "testing" + "time" + + corev1 "github.com/agntcy/dir/api/core/v1" + routingv1 "github.com/agntcy/dir/api/routing/v1" + "google.golang.org/grpc" +) + +const ( + // Test data constants. + testRecordCID = "test-cid" + + // Test size constants. + testResponseCountSmall = 10 + testResponseCountMedium = 100 + testResponseCountLarge = 1000 + + // Test timeout constants. + testSlowServerDelay = 10 * time.Millisecond + testFastServerDelay = 1 * time.Millisecond + testMediumServerDelay = 50 * time.Millisecond + testResponseTimeout = 1 * time.Second + testDrainTimeout = 500 * time.Millisecond + testCleanupDelay = 50 * time.Millisecond + testLongCleanupDelay = 200 * time.Millisecond + testBenchmarkCleanupDelay = 100 * time.Millisecond + + // Goroutine leak tolerance. + testGoroutineLeakTolerance = 2 + testBenchmarkGoroutineLeakTolerance = 10 + + // Test read counts. + testPartialReadCount = 5 + testSmallReadCount = 10 +) + +// ============================================================================ +// Issue 5: Blocked Goroutine Leaks Tests +// ============================================================================ + +// mockListStream simulates a gRPC List stream. +type mockListStream struct { + responses []*routingv1.ListResponse + index int + delay time.Duration // Delay between sends to simulate slow server + grpc.ClientStream +} + +func (m *mockListStream) Recv() (*routingv1.ListResponse, error) { + if m.delay > 0 { + time.Sleep(m.delay) + } + + if m.index >= len(m.responses) { + return nil, io.EOF + } + + resp := m.responses[m.index] + m.index++ + + return resp, nil +} + +// mockSearchStream simulates a gRPC Search stream. +type mockSearchStream struct { + responses []*routingv1.SearchResponse + index int + delay time.Duration + grpc.ClientStream +} + +func (m *mockSearchStream) Recv() (*routingv1.SearchResponse, error) { + if m.delay > 0 { + time.Sleep(m.delay) + } + + if m.index >= len(m.responses) { + return nil, io.EOF + } + + resp := m.responses[m.index] + m.index++ + + return resp, nil +} + +// mockRoutingServiceClient is a mock for testing routing methods. +type mockRoutingServiceClient struct { + listResponses []*routingv1.ListResponse + searchResponses []*routingv1.SearchResponse + listDelay time.Duration + searchDelay time.Duration + routingv1.RoutingServiceClient +} + +func (m *mockRoutingServiceClient) List(ctx context.Context, req *routingv1.ListRequest, opts ...grpc.CallOption) (routingv1.RoutingService_ListClient, error) { + return &mockListStream{ + responses: m.listResponses, + delay: m.listDelay, + }, nil +} + +func (m *mockRoutingServiceClient) Search(ctx context.Context, req *routingv1.SearchRequest, opts ...grpc.CallOption) (routingv1.RoutingService_SearchClient, error) { + return &mockSearchStream{ + responses: m.searchResponses, + delay: m.searchDelay, + }, nil +} + +// countGoroutines returns the current number of goroutines. +func countGoroutines() int { + return runtime.NumGoroutine() +} + +// testContextCancellation is a helper that tests context cancellation for streaming methods. +func testContextCancellation(t *testing.T, startStream func(context.Context) (<-chan interface{}, error), name string) { + t.Helper() + + initialGoroutines := countGoroutines() + t.Logf("Initial goroutines: %d", initialGoroutines) + + ctx, cancel := context.WithCancel(context.Background()) + + resCh, err := startStream(ctx) + if err != nil { + t.Fatalf("%s failed: %v", name, err) + } + + // Read a few responses + readCount := 0 + + for range testPartialReadCount { + select { + case _, ok := <-resCh: + if !ok { + t.Fatal("Channel closed unexpectedly") + } + + readCount++ + case <-time.After(testResponseTimeout): + t.Fatal("Timeout waiting for response") + } + } + + t.Logf("Read %d responses", readCount) + + // Cancel context (consumer stops reading) + cancel() + + // Drain remaining responses to allow goroutine to exit + drained := 0 + drainTimeout := time.After(testDrainTimeout) + +drainLoop: + for { + select { + case _, ok := <-resCh: + if !ok { + // Channel closed, good! + break drainLoop + } + + drained++ + case <-drainTimeout: + t.Log("Timeout while draining channel") + + break drainLoop + } + } + + t.Logf("Drained %d additional responses", drained) + + // Wait for goroutine to clean up + time.Sleep(testCleanupDelay) + + // Count goroutines after + finalGoroutines := countGoroutines() + t.Logf("Final goroutines: %d", finalGoroutines) + + // Verify no goroutine leak (allow some tolerance for test framework goroutines) + if finalGoroutines > initialGoroutines+testGoroutineLeakTolerance { + t.Errorf("Goroutine leak detected: initial=%d, final=%d, leaked=%d", + initialGoroutines, finalGoroutines, finalGoroutines-initialGoroutines) + } +} + +// testConsumerStopsReading is a helper that tests consumer stopping reading for streaming methods. +func testConsumerStopsReading(t *testing.T, startStream func(context.Context) (<-chan interface{}, error), name string) { + t.Helper() + + initialGoroutines := countGoroutines() + t.Logf("Initial goroutines: %d", initialGoroutines) + + ctx, cancel := context.WithTimeout(context.Background(), testContextTimeout) + defer cancel() + + resCh, err := startStream(ctx) + if err != nil { + t.Fatalf("%s failed: %v", name, err) + } + + // Read only a few responses and stop (consumer stops reading) + readCount := 0 + + for range testSmallReadCount { + <-resCh + + readCount++ + } + + t.Logf("Read %d responses, then stopped", readCount) + + // Cancel context to signal we're done + cancel() + + // Wait for cleanup + time.Sleep(testLongCleanupDelay) + + finalGoroutines := countGoroutines() + t.Logf("Final goroutines: %d", finalGoroutines) + + // Verify no significant goroutine leak + if finalGoroutines > initialGoroutines+testGoroutineLeakTolerance { + t.Errorf("Goroutine leak detected: initial=%d, final=%d, leaked=%d", + initialGoroutines, finalGoroutines, finalGoroutines-initialGoroutines) + } +} + +// TestList_ContextCancellation tests that List() properly handles context cancellation. +func TestList_ContextCancellation(t *testing.T) { + // Create mock responses + responses := make([]*routingv1.ListResponse, testResponseCountMedium) + for i := range testResponseCountMedium { + responses[i] = &routingv1.ListResponse{ + RecordRef: &corev1.RecordRef{ + Cid: testRecordCID, + }, + } + } + + mockClient := &mockRoutingServiceClient{ + listResponses: responses, + listDelay: testSlowServerDelay, + } + + client := &Client{ + RoutingServiceClient: mockClient, + } + + // Use helper to test context cancellation + testContextCancellation(t, func(ctx context.Context) (<-chan interface{}, error) { + ch, err := client.List(ctx, &routingv1.ListRequest{}) + if err != nil { + return nil, err + } + // Convert typed channel to interface{} channel + outCh := make(chan interface{}) + + go func() { + defer close(outCh) + + for v := range ch { + outCh <- v + } + }() + + return outCh, nil + }, "List()") +} + +// TestList_ConsumerStopsReading tests that List() handles consumer stopping reading. +func TestList_ConsumerStopsReading(t *testing.T) { + // Create many mock responses + responses := make([]*routingv1.ListResponse, testResponseCountLarge) + for i := range testResponseCountLarge { + responses[i] = &routingv1.ListResponse{ + RecordRef: &corev1.RecordRef{ + Cid: testRecordCID, + }, + } + } + + mockClient := &mockRoutingServiceClient{ + listResponses: responses, + listDelay: testFastServerDelay, + } + + client := &Client{ + RoutingServiceClient: mockClient, + } + + // Use helper to test consumer stops reading + testConsumerStopsReading(t, func(ctx context.Context) (<-chan interface{}, error) { + ch, err := client.List(ctx, &routingv1.ListRequest{}) + if err != nil { + return nil, err + } + // Convert typed channel to interface{} channel + outCh := make(chan interface{}) + + go func() { + defer close(outCh) + + for v := range ch { + outCh <- v + } + }() + + return outCh, nil + }, "List()") +} + +// TestList_FullConsumption tests that List() works correctly when consumer reads everything. +func TestList_FullConsumption(t *testing.T) { + responses := make([]*routingv1.ListResponse, testResponseCountSmall) + for i := range testResponseCountSmall { + responses[i] = &routingv1.ListResponse{ + RecordRef: &corev1.RecordRef{ + Cid: testRecordCID, + }, + } + } + + mockClient := &mockRoutingServiceClient{ + listResponses: responses, + } + + client := &Client{ + RoutingServiceClient: mockClient, + } + + ctx := context.Background() + + resCh, err := client.List(ctx, &routingv1.ListRequest{}) + if err != nil { + t.Fatalf("List() failed: %v", err) + } + + // Read all responses + count := 0 + for range resCh { + count++ + } + + if count != len(responses) { + t.Errorf("Expected to receive %d responses, got %d", len(responses), count) + } +} + +// TestSearchRouting_ContextCancellation tests that SearchRouting() properly handles context cancellation. +func TestSearchRouting_ContextCancellation(t *testing.T) { + responses := make([]*routingv1.SearchResponse, testResponseCountMedium) + for i := range testResponseCountMedium { + responses[i] = &routingv1.SearchResponse{ + RecordRef: &corev1.RecordRef{ + Cid: testRecordCID, + }, + } + } + + mockClient := &mockRoutingServiceClient{ + searchResponses: responses, + searchDelay: testSlowServerDelay, + } + + client := &Client{ + RoutingServiceClient: mockClient, + } + + // Use helper to test context cancellation + testContextCancellation(t, func(ctx context.Context) (<-chan interface{}, error) { + ch, err := client.SearchRouting(ctx, &routingv1.SearchRequest{}) + if err != nil { + return nil, err + } + // Convert typed channel to interface{} channel + outCh := make(chan interface{}) + + go func() { + defer close(outCh) + + for v := range ch { + outCh <- v + } + }() + + return outCh, nil + }, "SearchRouting()") +} + +// TestSearchRouting_ConsumerStopsReading tests that SearchRouting() handles consumer stopping reading. +func TestSearchRouting_ConsumerStopsReading(t *testing.T) { + responses := make([]*routingv1.SearchResponse, testResponseCountLarge) + for i := range testResponseCountLarge { + responses[i] = &routingv1.SearchResponse{ + RecordRef: &corev1.RecordRef{ + Cid: testRecordCID, + }, + } + } + + mockClient := &mockRoutingServiceClient{ + searchResponses: responses, + searchDelay: testFastServerDelay, + } + + client := &Client{ + RoutingServiceClient: mockClient, + } + + // Use helper to test consumer stops reading + testConsumerStopsReading(t, func(ctx context.Context) (<-chan interface{}, error) { + ch, err := client.SearchRouting(ctx, &routingv1.SearchRequest{}) + if err != nil { + return nil, err + } + // Convert typed channel to interface{} channel + outCh := make(chan interface{}) + + go func() { + defer close(outCh) + + for v := range ch { + outCh <- v + } + }() + + return outCh, nil + }, "SearchRouting()") +} + +// TestSearchRouting_FullConsumption tests that SearchRouting() works correctly when consumer reads everything. +func TestSearchRouting_FullConsumption(t *testing.T) { + responses := make([]*routingv1.SearchResponse, testResponseCountSmall) + for i := range testResponseCountSmall { + responses[i] = &routingv1.SearchResponse{ + RecordRef: &corev1.RecordRef{ + Cid: testRecordCID, + }, + } + } + + mockClient := &mockRoutingServiceClient{ + searchResponses: responses, + } + + client := &Client{ + RoutingServiceClient: mockClient, + } + + ctx := context.Background() + + resCh, err := client.SearchRouting(ctx, &routingv1.SearchRequest{}) + if err != nil { + t.Fatalf("SearchRouting() failed: %v", err) + } + + // Read all responses + count := 0 + for range resCh { + count++ + } + + if count != len(responses) { + t.Errorf("Expected to receive %d responses, got %d", len(responses), count) + } +} + +// TestList_ImmediateCancellation tests List() with immediate context cancellation. +func TestList_ImmediateCancellation(t *testing.T) { + responses := make([]*routingv1.ListResponse, testResponseCountMedium) + for i := range testResponseCountMedium { + responses[i] = &routingv1.ListResponse{ + RecordRef: &corev1.RecordRef{ + Cid: testRecordCID, + }, + } + } + + mockClient := &mockRoutingServiceClient{ + listResponses: responses, + listDelay: testMediumServerDelay, + } + + client := &Client{ + RoutingServiceClient: mockClient, + } + + // Create already-cancelled context + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + resCh, err := client.List(ctx, &routingv1.ListRequest{}) + if err != nil { + t.Fatalf("List() failed: %v", err) + } + + // Channel should close quickly due to cancelled context + select { + case _, ok := <-resCh: + if ok { + // If we got a response, that's OK - might have sent before cancel was processed + t.Logf("Got response before cancellation was processed") + } + case <-time.After(testDrainTimeout): + t.Error("Channel should close when context is already cancelled") + } + + // Wait for cleanup + time.Sleep(testBenchmarkCleanupDelay) +} + +// TestSearchRouting_ImmediateCancellation tests SearchRouting() with immediate context cancellation. +func TestSearchRouting_ImmediateCancellation(t *testing.T) { + responses := make([]*routingv1.SearchResponse, testResponseCountMedium) + for i := range testResponseCountMedium { + responses[i] = &routingv1.SearchResponse{ + RecordRef: &corev1.RecordRef{ + Cid: testRecordCID, + }, + } + } + + mockClient := &mockRoutingServiceClient{ + searchResponses: responses, + searchDelay: testMediumServerDelay, + } + + client := &Client{ + RoutingServiceClient: mockClient, + } + + // Create already-cancelled context + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + resCh, err := client.SearchRouting(ctx, &routingv1.SearchRequest{}) + if err != nil { + t.Fatalf("SearchRouting() failed: %v", err) + } + + // Channel should close quickly + select { + case _, ok := <-resCh: + if ok { + t.Logf("Got response before cancellation was processed") + } + case <-time.After(testDrainTimeout): + t.Error("Channel should close when context is already cancelled") + } + + // Wait for cleanup + time.Sleep(testBenchmarkCleanupDelay) +} + +// BenchmarkList_NoLeak benchmarks List() to detect goroutine leaks under load. +func BenchmarkList_NoLeak(b *testing.B) { + responses := make([]*routingv1.ListResponse, testResponseCountSmall) + for i := range testResponseCountSmall { + responses[i] = &routingv1.ListResponse{ + RecordRef: &corev1.RecordRef{ + Cid: testRecordCID, + }, + } + } + + mockClient := &mockRoutingServiceClient{ + listResponses: responses, + } + + client := &Client{ + RoutingServiceClient: mockClient, + } + + initialGoroutines := countGoroutines() + + b.ResetTimer() + + for range b.N { + ctx, cancel := context.WithCancel(context.Background()) + + resCh, err := client.List(ctx, &routingv1.ListRequest{}) + if err != nil { + b.Fatalf("List() failed: %v", err) + } + + // Read a few then cancel + for range testPartialReadCount - 2 { + <-resCh + } + + cancel() + + // Drain channel + for range resCh { + } + } + + b.StopTimer() + + // Check for goroutine leaks + runtime.GC() + time.Sleep(testBenchmarkCleanupDelay) + + finalGoroutines := countGoroutines() + + if finalGoroutines > initialGoroutines+testBenchmarkGoroutineLeakTolerance { + b.Errorf("Potential goroutine leak: initial=%d, final=%d, leaked=%d", + initialGoroutines, finalGoroutines, finalGoroutines-initialGoroutines) + } +} diff --git a/client/search.go b/client/search.go index 500e19044..d7d340a62 100644 --- a/client/search.go +++ b/client/search.go @@ -1,42 +1,42 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package client - -import ( - "context" - "fmt" - - searchv1 "github.com/agntcy/dir/api/search/v1" - "github.com/agntcy/dir/client/streaming" -) - -// SearchCIDs searches for record CIDs matching the given request. -func (c *Client) SearchCIDs(ctx context.Context, req *searchv1.SearchCIDsRequest) (streaming.StreamResult[searchv1.SearchCIDsResponse], error) { - stream, err := c.SearchServiceClient.SearchCIDs(ctx, req) - if err != nil { - return nil, fmt.Errorf("failed to create search CIDs stream: %w", err) - } - - result, err := streaming.ProcessServerStream(ctx, stream) - if err != nil { - return nil, fmt.Errorf("failed to process search CIDs stream: %w", err) - } - - return result, nil -} - -// SearchRecords searches for full records matching the given request. -func (c *Client) SearchRecords(ctx context.Context, req *searchv1.SearchRecordsRequest) (streaming.StreamResult[searchv1.SearchRecordsResponse], error) { - stream, err := c.SearchServiceClient.SearchRecords(ctx, req) - if err != nil { - return nil, fmt.Errorf("failed to create search records stream: %w", err) - } - - result, err := streaming.ProcessServerStream(ctx, stream) - if err != nil { - return nil, fmt.Errorf("failed to process search records stream: %w", err) - } - - return result, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "context" + "fmt" + + searchv1 "github.com/agntcy/dir/api/search/v1" + "github.com/agntcy/dir/client/streaming" +) + +// SearchCIDs searches for record CIDs matching the given request. +func (c *Client) SearchCIDs(ctx context.Context, req *searchv1.SearchCIDsRequest) (streaming.StreamResult[searchv1.SearchCIDsResponse], error) { + stream, err := c.SearchServiceClient.SearchCIDs(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to create search CIDs stream: %w", err) + } + + result, err := streaming.ProcessServerStream(ctx, stream) + if err != nil { + return nil, fmt.Errorf("failed to process search CIDs stream: %w", err) + } + + return result, nil +} + +// SearchRecords searches for full records matching the given request. +func (c *Client) SearchRecords(ctx context.Context, req *searchv1.SearchRecordsRequest) (streaming.StreamResult[searchv1.SearchRecordsResponse], error) { + stream, err := c.SearchServiceClient.SearchRecords(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to create search records stream: %w", err) + } + + result, err := streaming.ProcessServerStream(ctx, stream) + if err != nil { + return nil, fmt.Errorf("failed to process search records stream: %w", err) + } + + return result, nil +} diff --git a/client/sign.go b/client/sign.go index 678a97e98..ccd467ef6 100644 --- a/client/sign.go +++ b/client/sign.go @@ -1,197 +1,197 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package client - -import ( - "context" - "errors" - "fmt" - - corev1 "github.com/agntcy/dir/api/core/v1" - signv1 "github.com/agntcy/dir/api/sign/v1" - storev1 "github.com/agntcy/dir/api/store/v1" - "github.com/agntcy/dir/utils/cosign" -) - -type SignOpts struct { - FulcioURL string - RekorURL string - TimestampURL string - OIDCProviderURL string - OIDCClientID string - OIDCToken string - Key string -} - -// Sign routes to the appropriate signing method based on provider type. -// This is the main entry point for signing operations. -func (c *Client) Sign(ctx context.Context, req *signv1.SignRequest) (*signv1.SignResponse, error) { - if req.GetProvider() == nil { - return nil, errors.New("signature provider must be specified") - } - - switch provider := req.GetProvider().GetRequest().(type) { - case *signv1.SignRequestProvider_Key: - return c.SignWithKey(ctx, req) - case *signv1.SignRequestProvider_Oidc: - return c.SignWithOIDC(ctx, req) - default: - return nil, fmt.Errorf("unsupported signature provider type: %T", provider) - } -} - -// SignWithOIDC signs the record using keyless OIDC service-based signing. -// The OIDC ID Token can be provided by the caller, or cosign will handle interactive OIDC flow. -// This implementation uses cosign sign-blob command for OIDC signing. -func (c *Client) SignWithOIDC(ctx context.Context, req *signv1.SignRequest) (*signv1.SignResponse, error) { - // Validate request. - if req.GetRecordRef() == nil { - return nil, errors.New("record ref must be set") - } - - oidcSigner := req.GetProvider().GetOidc() - - digest, err := corev1.ConvertCIDToDigest(req.GetRecordRef().GetCid()) - if err != nil { - return nil, fmt.Errorf("failed to convert CID to digest: %w", err) - } - - payloadBytes, err := cosign.GeneratePayload(digest.String()) - if err != nil { - return nil, fmt.Errorf("failed to generate payload: %w", err) - } - - // Prepare options for signing - signOpts := &cosign.SignBlobOIDCOptions{ - Payload: payloadBytes, - IDToken: oidcSigner.GetIdToken(), - } - - // Set URLs from options if provided - if opts := oidcSigner.GetOptions(); opts != nil { - signOpts.FulcioURL = opts.GetFulcioUrl() - signOpts.RekorURL = opts.GetRekorUrl() - signOpts.TimestampURL = opts.GetTimestampUrl() - signOpts.OIDCProviderURL = opts.GetOidcProviderUrl() - } - - // Sign using utility function - result, err := cosign.SignBlobWithOIDC(ctx, signOpts) - if err != nil { - return nil, fmt.Errorf("failed to sign with OIDC: %w", err) - } - - signatureObj := &signv1.Signature{ - Signature: result.Signature, - Annotations: map[string]string{ - "payload": string(payloadBytes), - }, - } - - // Push signature and public key to store - err = c.pushReferrersToStore(ctx, req.GetRecordRef().GetCid(), signatureObj, result.PublicKey) - if err != nil { - return nil, fmt.Errorf("failed to push referrers to store: %w", err) - } - - return &signv1.SignResponse{ - Signature: signatureObj, - }, nil -} - -func (c *Client) SignWithKey(ctx context.Context, req *signv1.SignRequest) (*signv1.SignResponse, error) { - keySigner := req.GetProvider().GetKey() - - password := keySigner.GetPassword() - if password == nil { - password = []byte("") // Empty password is valid for cosign. - } - - digest, err := corev1.ConvertCIDToDigest(req.GetRecordRef().GetCid()) - if err != nil { - return nil, fmt.Errorf("failed to convert CID to digest: %w", err) - } - - payloadBytes, err := cosign.GeneratePayload(digest.String()) - if err != nil { - return nil, fmt.Errorf("failed to generate payload: %w", err) - } - - // Prepare options for signing - signOpts := &cosign.SignBlobKeyOptions{ - Payload: payloadBytes, - PrivateKey: keySigner.GetPrivateKey(), - Password: password, - } - - // Sign using utility function - result, err := cosign.SignBlobWithKey(ctx, signOpts) - if err != nil { - return nil, fmt.Errorf("failed to sign with key: %w", err) - } - - // Create the signature object - signatureObj := &signv1.Signature{ - Signature: result.Signature, - Annotations: map[string]string{ - "payload": string(payloadBytes), - }, - } - - // Push signature and public key to store - err = c.pushReferrersToStore(ctx, req.GetRecordRef().GetCid(), signatureObj, result.PublicKey) - if err != nil { - return nil, fmt.Errorf("failed to push referrers to store: %w", err) - } - - return &signv1.SignResponse{ - Signature: signatureObj, - }, nil -} - -func (c *Client) pushReferrersToStore(ctx context.Context, recordCID string, signature *signv1.Signature, publicKey string) error { - if recordCID == "" { - return errors.New("record CID is required") - } - - // Create public key referrer - pk := &signv1.PublicKey{ - Key: publicKey, - } - - publicKeyReferrer, err := pk.MarshalReferrer() - if err != nil { - return fmt.Errorf("failed to encode public key to referrer: %w", err) - } - - // Push public key to store as a referrer - err = c.PushReferrer(ctx, &storev1.PushReferrerRequest{ - RecordRef: &corev1.RecordRef{ - Cid: recordCID, - }, - Referrer: publicKeyReferrer, - }) - if err != nil { - return fmt.Errorf("failed to store public key: %w", err) - } - - // Create signature referrer - signatureReferrer, err := signature.MarshalReferrer() - if err != nil { - return fmt.Errorf("failed to encode signature to referrer: %w", err) - } - - // Push signature to store as a referrer - err = c.PushReferrer(ctx, &storev1.PushReferrerRequest{ - RecordRef: &corev1.RecordRef{ - Cid: recordCID, - }, - Referrer: signatureReferrer, - }) - if err != nil { - return fmt.Errorf("failed to store signature: %w", err) - } - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "context" + "errors" + "fmt" + + corev1 "github.com/agntcy/dir/api/core/v1" + signv1 "github.com/agntcy/dir/api/sign/v1" + storev1 "github.com/agntcy/dir/api/store/v1" + "github.com/agntcy/dir/utils/cosign" +) + +type SignOpts struct { + FulcioURL string + RekorURL string + TimestampURL string + OIDCProviderURL string + OIDCClientID string + OIDCToken string + Key string +} + +// Sign routes to the appropriate signing method based on provider type. +// This is the main entry point for signing operations. +func (c *Client) Sign(ctx context.Context, req *signv1.SignRequest) (*signv1.SignResponse, error) { + if req.GetProvider() == nil { + return nil, errors.New("signature provider must be specified") + } + + switch provider := req.GetProvider().GetRequest().(type) { + case *signv1.SignRequestProvider_Key: + return c.SignWithKey(ctx, req) + case *signv1.SignRequestProvider_Oidc: + return c.SignWithOIDC(ctx, req) + default: + return nil, fmt.Errorf("unsupported signature provider type: %T", provider) + } +} + +// SignWithOIDC signs the record using keyless OIDC service-based signing. +// The OIDC ID Token can be provided by the caller, or cosign will handle interactive OIDC flow. +// This implementation uses cosign sign-blob command for OIDC signing. +func (c *Client) SignWithOIDC(ctx context.Context, req *signv1.SignRequest) (*signv1.SignResponse, error) { + // Validate request. + if req.GetRecordRef() == nil { + return nil, errors.New("record ref must be set") + } + + oidcSigner := req.GetProvider().GetOidc() + + digest, err := corev1.ConvertCIDToDigest(req.GetRecordRef().GetCid()) + if err != nil { + return nil, fmt.Errorf("failed to convert CID to digest: %w", err) + } + + payloadBytes, err := cosign.GeneratePayload(digest.String()) + if err != nil { + return nil, fmt.Errorf("failed to generate payload: %w", err) + } + + // Prepare options for signing + signOpts := &cosign.SignBlobOIDCOptions{ + Payload: payloadBytes, + IDToken: oidcSigner.GetIdToken(), + } + + // Set URLs from options if provided + if opts := oidcSigner.GetOptions(); opts != nil { + signOpts.FulcioURL = opts.GetFulcioUrl() + signOpts.RekorURL = opts.GetRekorUrl() + signOpts.TimestampURL = opts.GetTimestampUrl() + signOpts.OIDCProviderURL = opts.GetOidcProviderUrl() + } + + // Sign using utility function + result, err := cosign.SignBlobWithOIDC(ctx, signOpts) + if err != nil { + return nil, fmt.Errorf("failed to sign with OIDC: %w", err) + } + + signatureObj := &signv1.Signature{ + Signature: result.Signature, + Annotations: map[string]string{ + "payload": string(payloadBytes), + }, + } + + // Push signature and public key to store + err = c.pushReferrersToStore(ctx, req.GetRecordRef().GetCid(), signatureObj, result.PublicKey) + if err != nil { + return nil, fmt.Errorf("failed to push referrers to store: %w", err) + } + + return &signv1.SignResponse{ + Signature: signatureObj, + }, nil +} + +func (c *Client) SignWithKey(ctx context.Context, req *signv1.SignRequest) (*signv1.SignResponse, error) { + keySigner := req.GetProvider().GetKey() + + password := keySigner.GetPassword() + if password == nil { + password = []byte("") // Empty password is valid for cosign. + } + + digest, err := corev1.ConvertCIDToDigest(req.GetRecordRef().GetCid()) + if err != nil { + return nil, fmt.Errorf("failed to convert CID to digest: %w", err) + } + + payloadBytes, err := cosign.GeneratePayload(digest.String()) + if err != nil { + return nil, fmt.Errorf("failed to generate payload: %w", err) + } + + // Prepare options for signing + signOpts := &cosign.SignBlobKeyOptions{ + Payload: payloadBytes, + PrivateKey: keySigner.GetPrivateKey(), + Password: password, + } + + // Sign using utility function + result, err := cosign.SignBlobWithKey(ctx, signOpts) + if err != nil { + return nil, fmt.Errorf("failed to sign with key: %w", err) + } + + // Create the signature object + signatureObj := &signv1.Signature{ + Signature: result.Signature, + Annotations: map[string]string{ + "payload": string(payloadBytes), + }, + } + + // Push signature and public key to store + err = c.pushReferrersToStore(ctx, req.GetRecordRef().GetCid(), signatureObj, result.PublicKey) + if err != nil { + return nil, fmt.Errorf("failed to push referrers to store: %w", err) + } + + return &signv1.SignResponse{ + Signature: signatureObj, + }, nil +} + +func (c *Client) pushReferrersToStore(ctx context.Context, recordCID string, signature *signv1.Signature, publicKey string) error { + if recordCID == "" { + return errors.New("record CID is required") + } + + // Create public key referrer + pk := &signv1.PublicKey{ + Key: publicKey, + } + + publicKeyReferrer, err := pk.MarshalReferrer() + if err != nil { + return fmt.Errorf("failed to encode public key to referrer: %w", err) + } + + // Push public key to store as a referrer + err = c.PushReferrer(ctx, &storev1.PushReferrerRequest{ + RecordRef: &corev1.RecordRef{ + Cid: recordCID, + }, + Referrer: publicKeyReferrer, + }) + if err != nil { + return fmt.Errorf("failed to store public key: %w", err) + } + + // Create signature referrer + signatureReferrer, err := signature.MarshalReferrer() + if err != nil { + return fmt.Errorf("failed to encode signature to referrer: %w", err) + } + + // Push signature to store as a referrer + err = c.PushReferrer(ctx, &storev1.PushReferrerRequest{ + RecordRef: &corev1.RecordRef{ + Cid: recordCID, + }, + Referrer: signatureReferrer, + }) + if err != nil { + return fmt.Errorf("failed to store signature: %w", err) + } + + return nil +} diff --git a/client/spiffe_test.go b/client/spiffe_test.go index f99443056..2a98435d3 100644 --- a/client/spiffe_test.go +++ b/client/spiffe_test.go @@ -1,475 +1,475 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package client - -import ( - "errors" - "sync" - "testing" - "time" - - eventsv1 "github.com/agntcy/dir/api/events/v1" - routingv1 "github.com/agntcy/dir/api/routing/v1" - searchv1 "github.com/agntcy/dir/api/search/v1" - signv1 "github.com/agntcy/dir/api/sign/v1" - storev1 "github.com/agntcy/dir/api/store/v1" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" -) - -const ( - // Test server constants for SPIFFE tests. - spiffeTestServerBufnet = "bufnet" - spiffeTestCleanupWait = 10 * time.Millisecond -) - -// ============================================================================ -// Issue 3: SPIFFE Sources Resource Leaks Tests -// ============================================================================ - -// mockCloser is a mock implementation of io.Closer for testing. -type mockCloser struct { - closed bool - closeErr error - closeChan chan struct{} // Signal when Close() is called -} - -func newMockCloser() *mockCloser { - return &mockCloser{ - closeChan: make(chan struct{}, 1), - } -} - -func (m *mockCloser) Close() error { - m.closed = true - select { - case m.closeChan <- struct{}{}: - default: - } - - return m.closeErr -} - -// orderTrackingCloser wraps a closer and tracks when it's closed. -type orderTrackingCloser struct { - name string - closeOrder *[]string - mu *sync.Mutex -} - -func (o *orderTrackingCloser) Close() error { - o.mu.Lock() - *o.closeOrder = append(*o.closeOrder, o.name) - o.mu.Unlock() - - return nil -} - -// TestClientClose_ClosesSPIFFESources tests that Close() properly closes all SPIFFE sources. -func TestClientClose_ClosesSPIFFESources(t *testing.T) { - tests := []struct { - name string - bundleSrc *mockCloser - x509Src *mockCloser - jwtSource *mockCloser - wantClosed []string // Which sources should be closed - }{ - { - name: "all sources present", - bundleSrc: newMockCloser(), - x509Src: newMockCloser(), - jwtSource: newMockCloser(), - wantClosed: []string{"jwtSource", "x509Src", "bundleSrc"}, - }, - { - name: "only bundleSrc", - bundleSrc: newMockCloser(), - x509Src: nil, - jwtSource: nil, - wantClosed: []string{"bundleSrc"}, - }, - { - name: "only x509Src", - bundleSrc: nil, - x509Src: newMockCloser(), - jwtSource: nil, - wantClosed: []string{"x509Src"}, - }, - { - name: "only jwtSource", - bundleSrc: nil, - x509Src: nil, - jwtSource: newMockCloser(), - wantClosed: []string{"jwtSource"}, - }, - { - name: "no sources", - bundleSrc: nil, - x509Src: nil, - jwtSource: nil, - wantClosed: []string{}, - }, - { - name: "x509 auth pattern (bundleSrc + x509Src)", - bundleSrc: newMockCloser(), - x509Src: newMockCloser(), - jwtSource: nil, - wantClosed: []string{"x509Src", "bundleSrc"}, - }, - { - name: "jwt auth pattern (bundleSrc + jwtSource)", - bundleSrc: newMockCloser(), - x509Src: nil, - jwtSource: newMockCloser(), - wantClosed: []string{"jwtSource", "bundleSrc"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - client := &Client{} - - // Only set non-nil sources to avoid Go interface nil gotcha - if tt.bundleSrc != nil { - client.bundleSrc = tt.bundleSrc - } - - if tt.x509Src != nil { - client.x509Src = tt.x509Src - } - - if tt.jwtSource != nil { - client.jwtSource = tt.jwtSource - } - - // Close the client - if err := client.Close(); err != nil { - t.Errorf("Close() returned error: %v", err) - } - - // Verify expected sources were closed - checkClosed := func(src *mockCloser, name string) { - if src == nil { - // If source is nil, it shouldn't be in wantClosed list - if contains(tt.wantClosed, name) { - t.Errorf("%s was nil but expected to be closed", name) - } - - return - } - - shouldBeClosed := contains(tt.wantClosed, name) - if src.closed != shouldBeClosed { - t.Errorf("%s.closed = %v, want %v", name, src.closed, shouldBeClosed) - } - } - - checkClosed(tt.bundleSrc, "bundleSrc") - checkClosed(tt.x509Src, "x509Src") - checkClosed(tt.jwtSource, "jwtSource") - }) - } -} - -// TestClientClose_SPIFFESourcesCloseOrder tests the order of closing SPIFFE sources. -func TestClientClose_SPIFFESourcesCloseOrder(t *testing.T) { - // Track close order - var ( - closeOrder []string - orderMu sync.Mutex - ) - - // Create closers that record their close order - jwtSource := &orderTrackingCloser{name: "jwtSource", closeOrder: &closeOrder, mu: &orderMu} - x509Src := &orderTrackingCloser{name: "x509Src", closeOrder: &closeOrder, mu: &orderMu} - bundleSrc := &orderTrackingCloser{name: "bundleSrc", closeOrder: &closeOrder, mu: &orderMu} - - client := &Client{ - jwtSource: jwtSource, - x509Src: x509Src, - bundleSrc: bundleSrc, - } - - // Close the client - if err := client.Close(); err != nil { - t.Errorf("Close() returned error: %v", err) - } - - // Verify close order: jwtSource → x509Src → bundleSrc - // This order is important because sources may depend on each other - expectedOrder := []string{"jwtSource", "x509Src", "bundleSrc"} - - orderMu.Lock() - defer orderMu.Unlock() - - if len(closeOrder) != len(expectedOrder) { - t.Errorf("Close order length = %d, want %d (got %v)", len(closeOrder), len(expectedOrder), closeOrder) - } - - for i, want := range expectedOrder { - if i >= len(closeOrder) { - t.Errorf("Missing close call for %s at position %d", want, i) - - continue - } - - if closeOrder[i] != want { - t.Errorf("Close order[%d] = %s, want %s", i, closeOrder[i], want) - } - } -} - -// TestClientClose_SPIFFESourceErrorHandling tests error handling when closing SPIFFE sources. -func TestClientClose_SPIFFESourceErrorHandling(t *testing.T) { - tests := []struct { - name string - bundleErr error - x509Err error - jwtErr error - wantErrCount int - wantErrSubstr string - }{ - { - name: "no errors", - bundleErr: nil, - x509Err: nil, - jwtErr: nil, - wantErrCount: 0, - wantErrSubstr: "", - }, - { - name: "jwt source error", - bundleErr: nil, - x509Err: nil, - jwtErr: errors.New("jwt close failed"), - wantErrCount: 1, - wantErrSubstr: "JWT source", - }, - { - name: "x509 source error", - bundleErr: nil, - x509Err: errors.New("x509 close failed"), - jwtErr: nil, - wantErrCount: 1, - wantErrSubstr: "X.509 source", - }, - { - name: "bundle source error", - bundleErr: errors.New("bundle close failed"), - x509Err: nil, - jwtErr: nil, - wantErrCount: 1, - wantErrSubstr: "bundle source", - }, - { - name: "all sources error", - bundleErr: errors.New("bundle close failed"), - x509Err: errors.New("x509 close failed"), - jwtErr: errors.New("jwt close failed"), - wantErrCount: 3, - wantErrSubstr: "client close errors", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - bundleSrc := newMockCloser() - bundleSrc.closeErr = tt.bundleErr - - x509Src := newMockCloser() - x509Src.closeErr = tt.x509Err - - jwtSource := newMockCloser() - jwtSource.closeErr = tt.jwtErr - - client := &Client{ - bundleSrc: bundleSrc, - x509Src: x509Src, - jwtSource: jwtSource, - } - - err := client.Close() - - // Test case expects no error - if tt.wantErrCount == 0 { - if err != nil { - t.Errorf("Close() returned error when none expected: %v", err) - } - - return - } - - // Test case expects an error - if err == nil { - t.Errorf("Close() returned nil, want error") - - return - } - - // Verify error message contains expected substring - if tt.wantErrSubstr != "" && !containsSubstring(err.Error(), tt.wantErrSubstr) { - t.Errorf("Close() error = %q, want substring %q", err.Error(), tt.wantErrSubstr) - } - - // Verify all sources were attempted to be closed despite errors - if !bundleSrc.closed { - t.Error("bundleSrc was not closed") - } - - if !x509Src.closed { - t.Error("x509Src was not closed") - } - - if !jwtSource.closed { - t.Error("jwtSource was not closed") - } - }) - } -} - -// TestClientClose_SPIFFESourcesWithConnection tests that sources are closed before connection. -func TestClientClose_SPIFFESourcesWithConnection(t *testing.T) { - // Create test server - server, lis := createTestServer(t) - defer server.Stop() - - // Create connection - conn, err := grpc.NewClient( - spiffeTestServerBufnet, - grpc.WithContextDialer(bufDialer(lis)), - grpc.WithTransportCredentials(insecure.NewCredentials()), - ) - if err != nil { - t.Fatalf("Failed to create gRPC client: %v", err) - } - - // Create mock SPIFFE sources - bundleSrc := newMockCloser() - x509Src := newMockCloser() - jwtSource := newMockCloser() - - client := &Client{ - StoreServiceClient: storev1.NewStoreServiceClient(conn), - RoutingServiceClient: routingv1.NewRoutingServiceClient(conn), - SearchServiceClient: searchv1.NewSearchServiceClient(conn), - SyncServiceClient: storev1.NewSyncServiceClient(conn), - SignServiceClient: signv1.NewSignServiceClient(conn), - EventServiceClient: eventsv1.NewEventServiceClient(conn), - config: &Config{ - ServerAddress: spiffeTestServerBufnet, - }, - conn: conn, - bundleSrc: bundleSrc, - x509Src: x509Src, - jwtSource: jwtSource, - } - - // Close the client - if err := client.Close(); err != nil { - t.Errorf("Close() returned error: %v", err) - } - - // Verify all SPIFFE sources were closed - if !jwtSource.closed { - t.Error("jwtSource was not closed") - } - - if !x509Src.closed { - t.Error("x509Src was not closed") - } - - if !bundleSrc.closed { - t.Error("bundleSrc was not closed") - } - - // Verify connection state - time.Sleep(spiffeTestCleanupWait) - - finalState := conn.GetState() - t.Logf("Final connection state: %v", finalState) -} - -// TestClientClose_PartialSPIFFESources tests closing when only some sources are present. -func TestClientClose_PartialSPIFFESources(t *testing.T) { - // Test X.509 auth pattern (bundleSrc + x509Src, no jwtSource) - t.Run("x509 auth pattern", func(t *testing.T) { - bundleSrc := newMockCloser() - x509Src := newMockCloser() - - client := &Client{ - bundleSrc: bundleSrc, - x509Src: x509Src, - jwtSource: nil, // Not used in X.509 auth - } - - if err := client.Close(); err != nil { - t.Errorf("Close() returned error: %v", err) - } - - if !bundleSrc.closed { - t.Error("bundleSrc was not closed") - } - - if !x509Src.closed { - t.Error("x509Src was not closed") - } - }) - - // Test JWT auth pattern (bundleSrc + jwtSource, no x509Src) - t.Run("jwt auth pattern", func(t *testing.T) { - bundleSrc := newMockCloser() - jwtSource := newMockCloser() - - client := &Client{ - bundleSrc: bundleSrc, - x509Src: nil, // Not used in JWT auth - jwtSource: jwtSource, - } - - if err := client.Close(); err != nil { - t.Errorf("Close() returned error: %v", err) - } - - if !bundleSrc.closed { - t.Error("bundleSrc was not closed") - } - - if !jwtSource.closed { - t.Error("jwtSource was not closed") - } - }) -} - -// ============================================================================ -// Helper functions -// ============================================================================ - -// contains checks if a string slice contains a value. -func contains(slice []string, val string) bool { - for _, item := range slice { - if item == val { - return true - } - } - - return false -} - -// containsSubstring checks if a string contains a substring. -func containsSubstring(s, substr string) bool { - return len(s) > 0 && len(substr) > 0 && - (s == substr || len(s) >= len(substr) && - (s[:len(substr)] == substr || s[len(s)-len(substr):] == substr || - len(s) > len(substr) && findSubstring(s, substr))) -} - -func findSubstring(s, substr string) bool { - for i := 0; i <= len(s)-len(substr); i++ { - if s[i:i+len(substr)] == substr { - return true - } - } - - return false -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "errors" + "sync" + "testing" + "time" + + eventsv1 "github.com/agntcy/dir/api/events/v1" + routingv1 "github.com/agntcy/dir/api/routing/v1" + searchv1 "github.com/agntcy/dir/api/search/v1" + signv1 "github.com/agntcy/dir/api/sign/v1" + storev1 "github.com/agntcy/dir/api/store/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +const ( + // Test server constants for SPIFFE tests. + spiffeTestServerBufnet = "bufnet" + spiffeTestCleanupWait = 10 * time.Millisecond +) + +// ============================================================================ +// Issue 3: SPIFFE Sources Resource Leaks Tests +// ============================================================================ + +// mockCloser is a mock implementation of io.Closer for testing. +type mockCloser struct { + closed bool + closeErr error + closeChan chan struct{} // Signal when Close() is called +} + +func newMockCloser() *mockCloser { + return &mockCloser{ + closeChan: make(chan struct{}, 1), + } +} + +func (m *mockCloser) Close() error { + m.closed = true + select { + case m.closeChan <- struct{}{}: + default: + } + + return m.closeErr +} + +// orderTrackingCloser wraps a closer and tracks when it's closed. +type orderTrackingCloser struct { + name string + closeOrder *[]string + mu *sync.Mutex +} + +func (o *orderTrackingCloser) Close() error { + o.mu.Lock() + *o.closeOrder = append(*o.closeOrder, o.name) + o.mu.Unlock() + + return nil +} + +// TestClientClose_ClosesSPIFFESources tests that Close() properly closes all SPIFFE sources. +func TestClientClose_ClosesSPIFFESources(t *testing.T) { + tests := []struct { + name string + bundleSrc *mockCloser + x509Src *mockCloser + jwtSource *mockCloser + wantClosed []string // Which sources should be closed + }{ + { + name: "all sources present", + bundleSrc: newMockCloser(), + x509Src: newMockCloser(), + jwtSource: newMockCloser(), + wantClosed: []string{"jwtSource", "x509Src", "bundleSrc"}, + }, + { + name: "only bundleSrc", + bundleSrc: newMockCloser(), + x509Src: nil, + jwtSource: nil, + wantClosed: []string{"bundleSrc"}, + }, + { + name: "only x509Src", + bundleSrc: nil, + x509Src: newMockCloser(), + jwtSource: nil, + wantClosed: []string{"x509Src"}, + }, + { + name: "only jwtSource", + bundleSrc: nil, + x509Src: nil, + jwtSource: newMockCloser(), + wantClosed: []string{"jwtSource"}, + }, + { + name: "no sources", + bundleSrc: nil, + x509Src: nil, + jwtSource: nil, + wantClosed: []string{}, + }, + { + name: "x509 auth pattern (bundleSrc + x509Src)", + bundleSrc: newMockCloser(), + x509Src: newMockCloser(), + jwtSource: nil, + wantClosed: []string{"x509Src", "bundleSrc"}, + }, + { + name: "jwt auth pattern (bundleSrc + jwtSource)", + bundleSrc: newMockCloser(), + x509Src: nil, + jwtSource: newMockCloser(), + wantClosed: []string{"jwtSource", "bundleSrc"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := &Client{} + + // Only set non-nil sources to avoid Go interface nil gotcha + if tt.bundleSrc != nil { + client.bundleSrc = tt.bundleSrc + } + + if tt.x509Src != nil { + client.x509Src = tt.x509Src + } + + if tt.jwtSource != nil { + client.jwtSource = tt.jwtSource + } + + // Close the client + if err := client.Close(); err != nil { + t.Errorf("Close() returned error: %v", err) + } + + // Verify expected sources were closed + checkClosed := func(src *mockCloser, name string) { + if src == nil { + // If source is nil, it shouldn't be in wantClosed list + if contains(tt.wantClosed, name) { + t.Errorf("%s was nil but expected to be closed", name) + } + + return + } + + shouldBeClosed := contains(tt.wantClosed, name) + if src.closed != shouldBeClosed { + t.Errorf("%s.closed = %v, want %v", name, src.closed, shouldBeClosed) + } + } + + checkClosed(tt.bundleSrc, "bundleSrc") + checkClosed(tt.x509Src, "x509Src") + checkClosed(tt.jwtSource, "jwtSource") + }) + } +} + +// TestClientClose_SPIFFESourcesCloseOrder tests the order of closing SPIFFE sources. +func TestClientClose_SPIFFESourcesCloseOrder(t *testing.T) { + // Track close order + var ( + closeOrder []string + orderMu sync.Mutex + ) + + // Create closers that record their close order + jwtSource := &orderTrackingCloser{name: "jwtSource", closeOrder: &closeOrder, mu: &orderMu} + x509Src := &orderTrackingCloser{name: "x509Src", closeOrder: &closeOrder, mu: &orderMu} + bundleSrc := &orderTrackingCloser{name: "bundleSrc", closeOrder: &closeOrder, mu: &orderMu} + + client := &Client{ + jwtSource: jwtSource, + x509Src: x509Src, + bundleSrc: bundleSrc, + } + + // Close the client + if err := client.Close(); err != nil { + t.Errorf("Close() returned error: %v", err) + } + + // Verify close order: jwtSource → x509Src → bundleSrc + // This order is important because sources may depend on each other + expectedOrder := []string{"jwtSource", "x509Src", "bundleSrc"} + + orderMu.Lock() + defer orderMu.Unlock() + + if len(closeOrder) != len(expectedOrder) { + t.Errorf("Close order length = %d, want %d (got %v)", len(closeOrder), len(expectedOrder), closeOrder) + } + + for i, want := range expectedOrder { + if i >= len(closeOrder) { + t.Errorf("Missing close call for %s at position %d", want, i) + + continue + } + + if closeOrder[i] != want { + t.Errorf("Close order[%d] = %s, want %s", i, closeOrder[i], want) + } + } +} + +// TestClientClose_SPIFFESourceErrorHandling tests error handling when closing SPIFFE sources. +func TestClientClose_SPIFFESourceErrorHandling(t *testing.T) { + tests := []struct { + name string + bundleErr error + x509Err error + jwtErr error + wantErrCount int + wantErrSubstr string + }{ + { + name: "no errors", + bundleErr: nil, + x509Err: nil, + jwtErr: nil, + wantErrCount: 0, + wantErrSubstr: "", + }, + { + name: "jwt source error", + bundleErr: nil, + x509Err: nil, + jwtErr: errors.New("jwt close failed"), + wantErrCount: 1, + wantErrSubstr: "JWT source", + }, + { + name: "x509 source error", + bundleErr: nil, + x509Err: errors.New("x509 close failed"), + jwtErr: nil, + wantErrCount: 1, + wantErrSubstr: "X.509 source", + }, + { + name: "bundle source error", + bundleErr: errors.New("bundle close failed"), + x509Err: nil, + jwtErr: nil, + wantErrCount: 1, + wantErrSubstr: "bundle source", + }, + { + name: "all sources error", + bundleErr: errors.New("bundle close failed"), + x509Err: errors.New("x509 close failed"), + jwtErr: errors.New("jwt close failed"), + wantErrCount: 3, + wantErrSubstr: "client close errors", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + bundleSrc := newMockCloser() + bundleSrc.closeErr = tt.bundleErr + + x509Src := newMockCloser() + x509Src.closeErr = tt.x509Err + + jwtSource := newMockCloser() + jwtSource.closeErr = tt.jwtErr + + client := &Client{ + bundleSrc: bundleSrc, + x509Src: x509Src, + jwtSource: jwtSource, + } + + err := client.Close() + + // Test case expects no error + if tt.wantErrCount == 0 { + if err != nil { + t.Errorf("Close() returned error when none expected: %v", err) + } + + return + } + + // Test case expects an error + if err == nil { + t.Errorf("Close() returned nil, want error") + + return + } + + // Verify error message contains expected substring + if tt.wantErrSubstr != "" && !containsSubstring(err.Error(), tt.wantErrSubstr) { + t.Errorf("Close() error = %q, want substring %q", err.Error(), tt.wantErrSubstr) + } + + // Verify all sources were attempted to be closed despite errors + if !bundleSrc.closed { + t.Error("bundleSrc was not closed") + } + + if !x509Src.closed { + t.Error("x509Src was not closed") + } + + if !jwtSource.closed { + t.Error("jwtSource was not closed") + } + }) + } +} + +// TestClientClose_SPIFFESourcesWithConnection tests that sources are closed before connection. +func TestClientClose_SPIFFESourcesWithConnection(t *testing.T) { + // Create test server + server, lis := createTestServer(t) + defer server.Stop() + + // Create connection + conn, err := grpc.NewClient( + spiffeTestServerBufnet, + grpc.WithContextDialer(bufDialer(lis)), + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + t.Fatalf("Failed to create gRPC client: %v", err) + } + + // Create mock SPIFFE sources + bundleSrc := newMockCloser() + x509Src := newMockCloser() + jwtSource := newMockCloser() + + client := &Client{ + StoreServiceClient: storev1.NewStoreServiceClient(conn), + RoutingServiceClient: routingv1.NewRoutingServiceClient(conn), + SearchServiceClient: searchv1.NewSearchServiceClient(conn), + SyncServiceClient: storev1.NewSyncServiceClient(conn), + SignServiceClient: signv1.NewSignServiceClient(conn), + EventServiceClient: eventsv1.NewEventServiceClient(conn), + config: &Config{ + ServerAddress: spiffeTestServerBufnet, + }, + conn: conn, + bundleSrc: bundleSrc, + x509Src: x509Src, + jwtSource: jwtSource, + } + + // Close the client + if err := client.Close(); err != nil { + t.Errorf("Close() returned error: %v", err) + } + + // Verify all SPIFFE sources were closed + if !jwtSource.closed { + t.Error("jwtSource was not closed") + } + + if !x509Src.closed { + t.Error("x509Src was not closed") + } + + if !bundleSrc.closed { + t.Error("bundleSrc was not closed") + } + + // Verify connection state + time.Sleep(spiffeTestCleanupWait) + + finalState := conn.GetState() + t.Logf("Final connection state: %v", finalState) +} + +// TestClientClose_PartialSPIFFESources tests closing when only some sources are present. +func TestClientClose_PartialSPIFFESources(t *testing.T) { + // Test X.509 auth pattern (bundleSrc + x509Src, no jwtSource) + t.Run("x509 auth pattern", func(t *testing.T) { + bundleSrc := newMockCloser() + x509Src := newMockCloser() + + client := &Client{ + bundleSrc: bundleSrc, + x509Src: x509Src, + jwtSource: nil, // Not used in X.509 auth + } + + if err := client.Close(); err != nil { + t.Errorf("Close() returned error: %v", err) + } + + if !bundleSrc.closed { + t.Error("bundleSrc was not closed") + } + + if !x509Src.closed { + t.Error("x509Src was not closed") + } + }) + + // Test JWT auth pattern (bundleSrc + jwtSource, no x509Src) + t.Run("jwt auth pattern", func(t *testing.T) { + bundleSrc := newMockCloser() + jwtSource := newMockCloser() + + client := &Client{ + bundleSrc: bundleSrc, + x509Src: nil, // Not used in JWT auth + jwtSource: jwtSource, + } + + if err := client.Close(); err != nil { + t.Errorf("Close() returned error: %v", err) + } + + if !bundleSrc.closed { + t.Error("bundleSrc was not closed") + } + + if !jwtSource.closed { + t.Error("jwtSource was not closed") + } + }) +} + +// ============================================================================ +// Helper functions +// ============================================================================ + +// contains checks if a string slice contains a value. +func contains(slice []string, val string) bool { + for _, item := range slice { + if item == val { + return true + } + } + + return false +} + +// containsSubstring checks if a string contains a substring. +func containsSubstring(s, substr string) bool { + return len(s) > 0 && len(substr) > 0 && + (s == substr || len(s) >= len(substr) && + (s[:len(substr)] == substr || s[len(s)-len(substr):] == substr || + len(s) > len(substr) && findSubstring(s, substr))) +} + +func findSubstring(s, substr string) bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + + return false +} diff --git a/client/store.go b/client/store.go index 9ad0ad030..920275b2f 100644 --- a/client/store.go +++ b/client/store.go @@ -1,298 +1,298 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package client - -import ( - "context" - "errors" - "fmt" - "io" - - corev1 "github.com/agntcy/dir/api/core/v1" - storev1 "github.com/agntcy/dir/api/store/v1" - "github.com/agntcy/dir/client/streaming" - "google.golang.org/protobuf/types/known/emptypb" -) - -// Push sends a complete record to the store and returns a record reference. -// This is a convenience wrapper around PushBatch for single-record operations. -// The record must be ≤4MB as per the v1 store service specification. -func (c *Client) Push(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) { - refs, err := c.PushBatch(ctx, []*corev1.Record{record}) - if err != nil { - return nil, err - } - - if len(refs) != 1 { - return nil, errors.New("no data returned") - } - - return refs[0], nil -} - -// PullStream retrieves multiple records efficiently using a single bidirectional stream. -// This method is ideal for batch operations and takes full advantage of gRPC streaming. -// The input channel allows you to send record refs as they become available. -func (c *Client) PullStream(ctx context.Context, refsCh <-chan *corev1.RecordRef) (streaming.StreamResult[corev1.Record], error) { - stream, err := c.StoreServiceClient.Pull(ctx) - if err != nil { - return nil, fmt.Errorf("failed to create pull stream: %w", err) - } - - //nolint:wrapcheck - return streaming.ProcessBidiStream(ctx, stream, refsCh) -} - -// Pull retrieves a single record from the store using its reference. -// This is a convenience wrapper around PullBatch for single-record operations. -func (c *Client) Pull(ctx context.Context, recordRef *corev1.RecordRef) (*corev1.Record, error) { - records, err := c.PullBatch(ctx, []*corev1.RecordRef{recordRef}) - if err != nil { - return nil, err - } - - if len(records) != 1 { - return nil, errors.New("no data returned") - } - - return records[0], nil -} - -// PullBatch retrieves multiple records in a single stream for efficiency. -// This is a convenience method that accepts a slice and returns a slice, -// built on top of the streaming implementation for consistency. -func (c *Client) PullBatch(ctx context.Context, recordRefs []*corev1.RecordRef) ([]*corev1.Record, error) { - // Use channel to communicate error safely (no race condition) - result, err := c.PullStream(ctx, streaming.SliceToChan(ctx, recordRefs)) - if err != nil { - return nil, err - } - - // Check for results - var errs error - - var metas []*corev1.Record - - for { - select { - case err := <-result.ErrCh(): - errs = errors.Join(errs, err) - case resp := <-result.ResCh(): - metas = append(metas, resp) - case <-result.DoneCh(): - return metas, errs - } - } -} - -// PushStream uploads multiple records efficiently using a single bidirectional stream. -// This method is ideal for batch operations and takes full advantage of gRPC streaming. -// The input channel allows you to send records as they become available. -func (c *Client) PushStream(ctx context.Context, recordsCh <-chan *corev1.Record) (streaming.StreamResult[corev1.RecordRef], error) { - stream, err := c.StoreServiceClient.Push(ctx) - if err != nil { - return nil, fmt.Errorf("failed to create push stream: %w", err) - } - - //nolint:wrapcheck - return streaming.ProcessBidiStream(ctx, stream, recordsCh) -} - -// PushBatch sends multiple records in a single stream for efficiency. -// This is a convenience method that accepts a slice and returns a slice, -// built on top of the streaming implementation for consistency. -func (c *Client) PushBatch(ctx context.Context, records []*corev1.Record) ([]*corev1.RecordRef, error) { - // Use channel to communicate error safely (no race condition) - result, err := c.PushStream(ctx, streaming.SliceToChan(ctx, records)) - if err != nil { - return nil, err - } - - // Check for results - var errs error - - var refs []*corev1.RecordRef - - for { - select { - case err := <-result.ErrCh(): - errs = errors.Join(errs, err) - case resp := <-result.ResCh(): - refs = append(refs, resp) - case <-result.DoneCh(): - return refs, errs - } - } -} - -// PushReferrer stores a signature using the PushReferrer RPC. -func (c *Client) PushReferrer(ctx context.Context, req *storev1.PushReferrerRequest) error { - // Create streaming client - stream, err := c.StoreServiceClient.PushReferrer(ctx) - if err != nil { - return fmt.Errorf("failed to create push referrer stream: %w", err) - } - - // Send the request - if err := stream.Send(req); err != nil { - return fmt.Errorf("failed to send push referrer request: %w", err) - } - - // Close send stream - if err := stream.CloseSend(); err != nil { - return fmt.Errorf("failed to close send stream: %w", err) - } - - // Receive response - _, err = stream.Recv() - if err != nil { - return fmt.Errorf("failed to receive push referrer response: %w", err) - } - - return nil -} - -// PullReferrer retrieves all referrers using the PullReferrer RPC. -func (c *Client) PullReferrer(ctx context.Context, req *storev1.PullReferrerRequest) (<-chan *storev1.PullReferrerResponse, error) { - // Create streaming client - stream, err := c.StoreServiceClient.PullReferrer(ctx) - if err != nil { - return nil, fmt.Errorf("failed to create pull referrer stream: %w", err) - } - - // Send the request - if err := stream.Send(req); err != nil { - return nil, fmt.Errorf("failed to send pull referrer request: %w", err) - } - - // Close send stream - if err := stream.CloseSend(); err != nil { - return nil, fmt.Errorf("failed to close send stream: %w", err) - } - - resultCh := make(chan *storev1.PullReferrerResponse) - - go func() { - defer close(resultCh) - - for { - response, err := stream.Recv() - if errors.Is(err, io.EOF) { - break - } - - if err != nil { - logger.Error("failed to receive pull referrer response", "error", err) - - return - } - - select { - case resultCh <- response: - case <-ctx.Done(): - logger.Error("context cancelled while receiving pull referrer response", "error", ctx.Err()) - - return - } - } - }() - - return resultCh, nil -} - -// Lookup retrieves metadata for a record using its reference. -func (c *Client) Lookup(ctx context.Context, recordRef *corev1.RecordRef) (*corev1.RecordMeta, error) { - resp, err := c.LookupBatch(ctx, []*corev1.RecordRef{recordRef}) - if err != nil { - return nil, err - } - - if len(resp) != 1 { - return nil, errors.New("no data returned") - } - - return resp[0], nil -} - -// LookupBatch retrieves metadata for multiple records in a single stream for efficiency. -func (c *Client) LookupBatch(ctx context.Context, recordRefs []*corev1.RecordRef) ([]*corev1.RecordMeta, error) { - // Use channel to communicate error safely (no race condition) - result, err := c.LookupStream(ctx, streaming.SliceToChan(ctx, recordRefs)) - if err != nil { - return nil, err - } - - // Check for results - var errs error - - var metas []*corev1.RecordMeta - - for { - select { - case err := <-result.ErrCh(): - errs = errors.Join(errs, err) - case resp := <-result.ResCh(): - metas = append(metas, resp) - case <-result.DoneCh(): - return metas, errs - } - } -} - -// LookupStream provides efficient streaming lookup operations using channels. -// Record references are sent as they become available and metadata is returned as it's processed. -// This method maintains a single gRPC stream for all operations, dramatically improving efficiency. -// -// Uses sequential streaming pattern (Send → Recv → Send → Recv) which ensures -// strict ordering of request-response pairs. -func (c *Client) LookupStream(ctx context.Context, refsCh <-chan *corev1.RecordRef) (streaming.StreamResult[corev1.RecordMeta], error) { - stream, err := c.StoreServiceClient.Lookup(ctx) - if err != nil { - return nil, fmt.Errorf("failed to create lookup stream: %w", err) - } - - //nolint:wrapcheck - return streaming.ProcessBidiStream(ctx, stream, refsCh) -} - -// Delete removes a record from the store using its reference. -func (c *Client) Delete(ctx context.Context, recordRef *corev1.RecordRef) error { - return c.DeleteBatch(ctx, []*corev1.RecordRef{recordRef}) -} - -// DeleteBatch removes multiple records from the store in a single stream for efficiency. -func (c *Client) DeleteBatch(ctx context.Context, recordRefs []*corev1.RecordRef) error { - // Use channel to communicate error safely (no race condition) - result, err := c.DeleteStream(ctx, streaming.SliceToChan(ctx, recordRefs)) - if err != nil { - return err - } - - // Check for results - for { - select { - case err := <-result.ErrCh(): - // If any error occurs, return immediately - return err - case <-result.ResCh(): - // We don't expect any results, just confirmations - case <-result.DoneCh(): - return nil - } - } -} - -// DeleteStream provides efficient streaming delete operations using channels. -// Record references are sent as they become available and delete confirmations are returned as they're processed. -// This method maintains a single gRPC stream for all operations, dramatically improving efficiency. -func (c *Client) DeleteStream(ctx context.Context, refsCh <-chan *corev1.RecordRef) (streaming.StreamResult[emptypb.Empty], error) { - // Create gRPC stream - stream, err := c.StoreServiceClient.Delete(ctx) - if err != nil { - return nil, fmt.Errorf("failed to create delete stream: %w", err) - } - - //nolint:wrapcheck - return streaming.ProcessClientStream(ctx, stream, refsCh) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "context" + "errors" + "fmt" + "io" + + corev1 "github.com/agntcy/dir/api/core/v1" + storev1 "github.com/agntcy/dir/api/store/v1" + "github.com/agntcy/dir/client/streaming" + "google.golang.org/protobuf/types/known/emptypb" +) + +// Push sends a complete record to the store and returns a record reference. +// This is a convenience wrapper around PushBatch for single-record operations. +// The record must be ≤4MB as per the v1 store service specification. +func (c *Client) Push(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) { + refs, err := c.PushBatch(ctx, []*corev1.Record{record}) + if err != nil { + return nil, err + } + + if len(refs) != 1 { + return nil, errors.New("no data returned") + } + + return refs[0], nil +} + +// PullStream retrieves multiple records efficiently using a single bidirectional stream. +// This method is ideal for batch operations and takes full advantage of gRPC streaming. +// The input channel allows you to send record refs as they become available. +func (c *Client) PullStream(ctx context.Context, refsCh <-chan *corev1.RecordRef) (streaming.StreamResult[corev1.Record], error) { + stream, err := c.StoreServiceClient.Pull(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create pull stream: %w", err) + } + + //nolint:wrapcheck + return streaming.ProcessBidiStream(ctx, stream, refsCh) +} + +// Pull retrieves a single record from the store using its reference. +// This is a convenience wrapper around PullBatch for single-record operations. +func (c *Client) Pull(ctx context.Context, recordRef *corev1.RecordRef) (*corev1.Record, error) { + records, err := c.PullBatch(ctx, []*corev1.RecordRef{recordRef}) + if err != nil { + return nil, err + } + + if len(records) != 1 { + return nil, errors.New("no data returned") + } + + return records[0], nil +} + +// PullBatch retrieves multiple records in a single stream for efficiency. +// This is a convenience method that accepts a slice and returns a slice, +// built on top of the streaming implementation for consistency. +func (c *Client) PullBatch(ctx context.Context, recordRefs []*corev1.RecordRef) ([]*corev1.Record, error) { + // Use channel to communicate error safely (no race condition) + result, err := c.PullStream(ctx, streaming.SliceToChan(ctx, recordRefs)) + if err != nil { + return nil, err + } + + // Check for results + var errs error + + var metas []*corev1.Record + + for { + select { + case err := <-result.ErrCh(): + errs = errors.Join(errs, err) + case resp := <-result.ResCh(): + metas = append(metas, resp) + case <-result.DoneCh(): + return metas, errs + } + } +} + +// PushStream uploads multiple records efficiently using a single bidirectional stream. +// This method is ideal for batch operations and takes full advantage of gRPC streaming. +// The input channel allows you to send records as they become available. +func (c *Client) PushStream(ctx context.Context, recordsCh <-chan *corev1.Record) (streaming.StreamResult[corev1.RecordRef], error) { + stream, err := c.StoreServiceClient.Push(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create push stream: %w", err) + } + + //nolint:wrapcheck + return streaming.ProcessBidiStream(ctx, stream, recordsCh) +} + +// PushBatch sends multiple records in a single stream for efficiency. +// This is a convenience method that accepts a slice and returns a slice, +// built on top of the streaming implementation for consistency. +func (c *Client) PushBatch(ctx context.Context, records []*corev1.Record) ([]*corev1.RecordRef, error) { + // Use channel to communicate error safely (no race condition) + result, err := c.PushStream(ctx, streaming.SliceToChan(ctx, records)) + if err != nil { + return nil, err + } + + // Check for results + var errs error + + var refs []*corev1.RecordRef + + for { + select { + case err := <-result.ErrCh(): + errs = errors.Join(errs, err) + case resp := <-result.ResCh(): + refs = append(refs, resp) + case <-result.DoneCh(): + return refs, errs + } + } +} + +// PushReferrer stores a signature using the PushReferrer RPC. +func (c *Client) PushReferrer(ctx context.Context, req *storev1.PushReferrerRequest) error { + // Create streaming client + stream, err := c.StoreServiceClient.PushReferrer(ctx) + if err != nil { + return fmt.Errorf("failed to create push referrer stream: %w", err) + } + + // Send the request + if err := stream.Send(req); err != nil { + return fmt.Errorf("failed to send push referrer request: %w", err) + } + + // Close send stream + if err := stream.CloseSend(); err != nil { + return fmt.Errorf("failed to close send stream: %w", err) + } + + // Receive response + _, err = stream.Recv() + if err != nil { + return fmt.Errorf("failed to receive push referrer response: %w", err) + } + + return nil +} + +// PullReferrer retrieves all referrers using the PullReferrer RPC. +func (c *Client) PullReferrer(ctx context.Context, req *storev1.PullReferrerRequest) (<-chan *storev1.PullReferrerResponse, error) { + // Create streaming client + stream, err := c.StoreServiceClient.PullReferrer(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create pull referrer stream: %w", err) + } + + // Send the request + if err := stream.Send(req); err != nil { + return nil, fmt.Errorf("failed to send pull referrer request: %w", err) + } + + // Close send stream + if err := stream.CloseSend(); err != nil { + return nil, fmt.Errorf("failed to close send stream: %w", err) + } + + resultCh := make(chan *storev1.PullReferrerResponse) + + go func() { + defer close(resultCh) + + for { + response, err := stream.Recv() + if errors.Is(err, io.EOF) { + break + } + + if err != nil { + logger.Error("failed to receive pull referrer response", "error", err) + + return + } + + select { + case resultCh <- response: + case <-ctx.Done(): + logger.Error("context cancelled while receiving pull referrer response", "error", ctx.Err()) + + return + } + } + }() + + return resultCh, nil +} + +// Lookup retrieves metadata for a record using its reference. +func (c *Client) Lookup(ctx context.Context, recordRef *corev1.RecordRef) (*corev1.RecordMeta, error) { + resp, err := c.LookupBatch(ctx, []*corev1.RecordRef{recordRef}) + if err != nil { + return nil, err + } + + if len(resp) != 1 { + return nil, errors.New("no data returned") + } + + return resp[0], nil +} + +// LookupBatch retrieves metadata for multiple records in a single stream for efficiency. +func (c *Client) LookupBatch(ctx context.Context, recordRefs []*corev1.RecordRef) ([]*corev1.RecordMeta, error) { + // Use channel to communicate error safely (no race condition) + result, err := c.LookupStream(ctx, streaming.SliceToChan(ctx, recordRefs)) + if err != nil { + return nil, err + } + + // Check for results + var errs error + + var metas []*corev1.RecordMeta + + for { + select { + case err := <-result.ErrCh(): + errs = errors.Join(errs, err) + case resp := <-result.ResCh(): + metas = append(metas, resp) + case <-result.DoneCh(): + return metas, errs + } + } +} + +// LookupStream provides efficient streaming lookup operations using channels. +// Record references are sent as they become available and metadata is returned as it's processed. +// This method maintains a single gRPC stream for all operations, dramatically improving efficiency. +// +// Uses sequential streaming pattern (Send → Recv → Send → Recv) which ensures +// strict ordering of request-response pairs. +func (c *Client) LookupStream(ctx context.Context, refsCh <-chan *corev1.RecordRef) (streaming.StreamResult[corev1.RecordMeta], error) { + stream, err := c.StoreServiceClient.Lookup(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create lookup stream: %w", err) + } + + //nolint:wrapcheck + return streaming.ProcessBidiStream(ctx, stream, refsCh) +} + +// Delete removes a record from the store using its reference. +func (c *Client) Delete(ctx context.Context, recordRef *corev1.RecordRef) error { + return c.DeleteBatch(ctx, []*corev1.RecordRef{recordRef}) +} + +// DeleteBatch removes multiple records from the store in a single stream for efficiency. +func (c *Client) DeleteBatch(ctx context.Context, recordRefs []*corev1.RecordRef) error { + // Use channel to communicate error safely (no race condition) + result, err := c.DeleteStream(ctx, streaming.SliceToChan(ctx, recordRefs)) + if err != nil { + return err + } + + // Check for results + for { + select { + case err := <-result.ErrCh(): + // If any error occurs, return immediately + return err + case <-result.ResCh(): + // We don't expect any results, just confirmations + case <-result.DoneCh(): + return nil + } + } +} + +// DeleteStream provides efficient streaming delete operations using channels. +// Record references are sent as they become available and delete confirmations are returned as they're processed. +// This method maintains a single gRPC stream for all operations, dramatically improving efficiency. +func (c *Client) DeleteStream(ctx context.Context, refsCh <-chan *corev1.RecordRef) (streaming.StreamResult[emptypb.Empty], error) { + // Create gRPC stream + stream, err := c.StoreServiceClient.Delete(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create delete stream: %w", err) + } + + //nolint:wrapcheck + return streaming.ProcessClientStream(ctx, stream, refsCh) +} diff --git a/client/streaming/bidi_stream.go b/client/streaming/bidi_stream.go index f1a576d4d..9bff73cf1 100644 --- a/client/streaming/bidi_stream.go +++ b/client/streaming/bidi_stream.go @@ -1,154 +1,154 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package streaming - -import ( - "context" - "errors" - "fmt" - "io" - "sync" -) - -// BidiStream defines the interface for bidirectional streaming. -// This pattern allows sending and receiving messages independently. -type BidiStream[InT, OutT any] interface { - Send(*InT) error - Recv() (*OutT, error) - CloseSend() error -} - -// ProcessBidiStream handles concurrent bidirectional streaming. -// -// Pattern: Sender || Receiver (parallel goroutines) -// -// This processor implements true bidirectional streaming with concurrent send and receive operations. -// It spawns two independent goroutines: -// - Sender: Continuously sends inputs from the input channel -// - Receiver: Continuously receives outputs and sends them to the output channel -// -// This pattern maximizes throughput by: -// - Eliminating round-trip latency between requests -// - Allowing the server to batch/buffer/pipeline operations -// - Fully utilizing network bandwidth -// - Enabling concurrent processing on both client and server -// -// This is useful when: -// - High performance and throughput are needed -// - Processing large batches of data -// - Server can process requests in parallel or batches -// - Responses can arrive in any order or timing -// - Network latency is significant -// -// Returns: -// - result: StreamResult containing result, error, and done channels -// - error: Immediate error if validation fails -// -// The caller should: -// 1. Range over result channels to process outputs and errors -// 2. Check if the processing is done -// 3. Use context cancellation to stop processing early -func ProcessBidiStream[InT, OutT any]( - ctx context.Context, - stream BidiStream[InT, OutT], - inputCh <-chan *InT, -) (StreamResult[OutT], error) { - // Validate inputs - if ctx == nil { - return nil, errors.New("context is nil") - } - - if stream == nil { - return nil, errors.New("stream is nil") - } - - if inputCh == nil { - return nil, errors.New("input channel is nil") - } - - // Create result channels - result := newResult[OutT]() - - // Start goroutines - go func() { - // Close result once the goroutine ends - defer result.close() - - // WaitGroup to coordinate send/receive goroutines - var wg sync.WaitGroup - - // Goroutine [Sender]: send all inputs from inputCh to the network, - // then close the send side of the stream. - // On error, stop and report the error. - wg.Add(1) - - go func() { - defer wg.Done() - - // Close the send side when done sending inputs - //nolint:errcheck - defer stream.CloseSend() - - // Drain inputCh on early exit to prevent upstream producers from blocking - defer func() { - for range inputCh { - // Drain remaining inputs - } - }() - - // Send input to the stream - // - // Note: stream.Send() is blocking if the internal buffer is full. - // This provides backpressure to the sender goroutine - // which in turn provides backpressure to the input channel - // and upstream producers. - // - // If the context is cancelled, Send() will return an error, - // which terminates this goroutine. - for input := range inputCh { - if err := stream.Send(input); err != nil { - result.errCh <- fmt.Errorf("failed to send: %w", err) - - return - } - } - }() - - // Goroutine [Receiver]: receive all responses from API and send them to outputCh. - // On error, stop and report the error. - wg.Add(1) - - go func() { - defer wg.Done() - - // Receive output from the stream - // - // Note: stream.Recv() is blocking until a message is available or - // an error occurs. This provides natural pacing with the server. - // - // If the context is cancelled, Send() will return an error, - // which terminates this goroutine. - for { - output, err := stream.Recv() - if errors.Is(err, io.EOF) { - return - } - - if err != nil { - result.errCh <- fmt.Errorf("failed to receive: %w", err) - - return - } - - // Send output to the output channel - result.resCh <- output - } - }() - - // Wait for all goroutines to complete - wg.Wait() - }() - - return result, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package streaming + +import ( + "context" + "errors" + "fmt" + "io" + "sync" +) + +// BidiStream defines the interface for bidirectional streaming. +// This pattern allows sending and receiving messages independently. +type BidiStream[InT, OutT any] interface { + Send(*InT) error + Recv() (*OutT, error) + CloseSend() error +} + +// ProcessBidiStream handles concurrent bidirectional streaming. +// +// Pattern: Sender || Receiver (parallel goroutines) +// +// This processor implements true bidirectional streaming with concurrent send and receive operations. +// It spawns two independent goroutines: +// - Sender: Continuously sends inputs from the input channel +// - Receiver: Continuously receives outputs and sends them to the output channel +// +// This pattern maximizes throughput by: +// - Eliminating round-trip latency between requests +// - Allowing the server to batch/buffer/pipeline operations +// - Fully utilizing network bandwidth +// - Enabling concurrent processing on both client and server +// +// This is useful when: +// - High performance and throughput are needed +// - Processing large batches of data +// - Server can process requests in parallel or batches +// - Responses can arrive in any order or timing +// - Network latency is significant +// +// Returns: +// - result: StreamResult containing result, error, and done channels +// - error: Immediate error if validation fails +// +// The caller should: +// 1. Range over result channels to process outputs and errors +// 2. Check if the processing is done +// 3. Use context cancellation to stop processing early +func ProcessBidiStream[InT, OutT any]( + ctx context.Context, + stream BidiStream[InT, OutT], + inputCh <-chan *InT, +) (StreamResult[OutT], error) { + // Validate inputs + if ctx == nil { + return nil, errors.New("context is nil") + } + + if stream == nil { + return nil, errors.New("stream is nil") + } + + if inputCh == nil { + return nil, errors.New("input channel is nil") + } + + // Create result channels + result := newResult[OutT]() + + // Start goroutines + go func() { + // Close result once the goroutine ends + defer result.close() + + // WaitGroup to coordinate send/receive goroutines + var wg sync.WaitGroup + + // Goroutine [Sender]: send all inputs from inputCh to the network, + // then close the send side of the stream. + // On error, stop and report the error. + wg.Add(1) + + go func() { + defer wg.Done() + + // Close the send side when done sending inputs + //nolint:errcheck + defer stream.CloseSend() + + // Drain inputCh on early exit to prevent upstream producers from blocking + defer func() { + for range inputCh { + // Drain remaining inputs + } + }() + + // Send input to the stream + // + // Note: stream.Send() is blocking if the internal buffer is full. + // This provides backpressure to the sender goroutine + // which in turn provides backpressure to the input channel + // and upstream producers. + // + // If the context is cancelled, Send() will return an error, + // which terminates this goroutine. + for input := range inputCh { + if err := stream.Send(input); err != nil { + result.errCh <- fmt.Errorf("failed to send: %w", err) + + return + } + } + }() + + // Goroutine [Receiver]: receive all responses from API and send them to outputCh. + // On error, stop and report the error. + wg.Add(1) + + go func() { + defer wg.Done() + + // Receive output from the stream + // + // Note: stream.Recv() is blocking until a message is available or + // an error occurs. This provides natural pacing with the server. + // + // If the context is cancelled, Send() will return an error, + // which terminates this goroutine. + for { + output, err := stream.Recv() + if errors.Is(err, io.EOF) { + return + } + + if err != nil { + result.errCh <- fmt.Errorf("failed to receive: %w", err) + + return + } + + // Send output to the output channel + result.resCh <- output + } + }() + + // Wait for all goroutines to complete + wg.Wait() + }() + + return result, nil +} diff --git a/client/streaming/client_stream.go b/client/streaming/client_stream.go index d451ba350..ad8861132 100644 --- a/client/streaming/client_stream.go +++ b/client/streaming/client_stream.go @@ -1,101 +1,101 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package streaming - -import ( - "context" - "errors" - "fmt" -) - -// ClientStream defines the interface for client streaming (many inputs → one output). -// This pattern is used when sending multiple requests and receiving a single response. -type ClientStream[InT, OutT any] interface { - Send(*InT) error - CloseAndRecv() (*OutT, error) - CloseSend() error -} - -// ProcessClientStream handles client streaming pattern (many inputs → one output). -// -// Pattern: Send → Send → Send → CloseAndRecv() -// -// This processor is ideal for operations where multiple requests are sent to the server, -// and a single final response is received after all requests have been processed. -// -// The processor: -// - Sends all inputs from the channel to the stream -// - Closes the send side when input channel closes -// - Receives the final response via CloseAndRecv() -// -// Returns: -// - result: StreamResult containing result, error, and done channels -// - error: Immediate error if validation fails -// -// The caller should: -// 1. Range over result channels to process outputs and errors -// 2. Check if the processing is done -// 3. Use context cancellation to stop processing early -func ProcessClientStream[InT, OutT any]( - ctx context.Context, - stream ClientStream[InT, OutT], - inputCh <-chan *InT, -) (StreamResult[OutT], error) { - // Validate inputs - if ctx == nil { - return nil, errors.New("context is nil") - } - - if stream == nil { - return nil, errors.New("stream is nil") - } - - if inputCh == nil { - return nil, errors.New("input channel is nil") - } - - // Create result channels - result := newResult[OutT]() - - // Process items - go func() { - // Close result once the goroutine ends - defer result.close() - - // Close the send side when done sending inputs - //nolint:errcheck - defer stream.CloseSend() - - // Drain inputCh on early exit to prevent upstream producers from blocking - defer func() { - for range inputCh { - // Drain remaining inputs - } - }() - - // Process all incoming inputs - for input := range inputCh { - // Send the input to the network buffer and handle errors - if err := stream.Send(input); err != nil { - result.errCh <- fmt.Errorf("failed to send: %w", err) - - return - } - } - - // Once the channel is closed, send the data through the stream and exit. - // Handle any errors using the error handler function. - resp, err := stream.CloseAndRecv() - if err != nil { - result.errCh <- fmt.Errorf("failed to receive final response: %w", err) - - return - } - - // Send the final response to the output channel - result.resCh <- resp - }() - - return result, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package streaming + +import ( + "context" + "errors" + "fmt" +) + +// ClientStream defines the interface for client streaming (many inputs → one output). +// This pattern is used when sending multiple requests and receiving a single response. +type ClientStream[InT, OutT any] interface { + Send(*InT) error + CloseAndRecv() (*OutT, error) + CloseSend() error +} + +// ProcessClientStream handles client streaming pattern (many inputs → one output). +// +// Pattern: Send → Send → Send → CloseAndRecv() +// +// This processor is ideal for operations where multiple requests are sent to the server, +// and a single final response is received after all requests have been processed. +// +// The processor: +// - Sends all inputs from the channel to the stream +// - Closes the send side when input channel closes +// - Receives the final response via CloseAndRecv() +// +// Returns: +// - result: StreamResult containing result, error, and done channels +// - error: Immediate error if validation fails +// +// The caller should: +// 1. Range over result channels to process outputs and errors +// 2. Check if the processing is done +// 3. Use context cancellation to stop processing early +func ProcessClientStream[InT, OutT any]( + ctx context.Context, + stream ClientStream[InT, OutT], + inputCh <-chan *InT, +) (StreamResult[OutT], error) { + // Validate inputs + if ctx == nil { + return nil, errors.New("context is nil") + } + + if stream == nil { + return nil, errors.New("stream is nil") + } + + if inputCh == nil { + return nil, errors.New("input channel is nil") + } + + // Create result channels + result := newResult[OutT]() + + // Process items + go func() { + // Close result once the goroutine ends + defer result.close() + + // Close the send side when done sending inputs + //nolint:errcheck + defer stream.CloseSend() + + // Drain inputCh on early exit to prevent upstream producers from blocking + defer func() { + for range inputCh { + // Drain remaining inputs + } + }() + + // Process all incoming inputs + for input := range inputCh { + // Send the input to the network buffer and handle errors + if err := stream.Send(input); err != nil { + result.errCh <- fmt.Errorf("failed to send: %w", err) + + return + } + } + + // Once the channel is closed, send the data through the stream and exit. + // Handle any errors using the error handler function. + resp, err := stream.CloseAndRecv() + if err != nil { + result.errCh <- fmt.Errorf("failed to receive final response: %w", err) + + return + } + + // Send the final response to the output channel + result.resCh <- resp + }() + + return result, nil +} diff --git a/client/streaming/server_stream.go b/client/streaming/server_stream.go index 99612ed0b..f6ad203d4 100644 --- a/client/streaming/server_stream.go +++ b/client/streaming/server_stream.go @@ -1,118 +1,118 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package streaming - -import ( - "context" - "errors" - "fmt" - "io" -) - -// ServerStream defines the interface for server streaming (one input → many outputs). -// This pattern is used when sending a single request and receiving multiple responses. -type ServerStream[OutT any] interface { - Recv() (*OutT, error) -} - -// ProcessServerStream handles server streaming pattern (one input → many outputs). -// -// Pattern: Send Request → Recv() → Recv() → Recv() → EOF -// -// This processor is ideal for operations where a single request triggers multiple -// responses from the server, such as: -// - Streaming search results -// - Listing resources -// - Tailing logs -// - Real-time event streams -// -// The processor: -// - Continuously receives outputs from the stream -// - Sends each output to the result channel -// - Handles EOF gracefully to signal completion -// - Propagates errors to the error channel -// -// Returns: -// - result: StreamResult containing result, error, and done channels -// - error: Immediate error if validation fails -// -// The caller should: -// 1. Range over result channels to process outputs and errors -// 2. Monitor the DoneCh to know when streaming is complete -// 3. Use context cancellation to stop processing early -// -// Example usage: -// -// stream, err := client.Listen(ctx, req) -// if err != nil { -// return err -// } -// -// result, err := streaming.ProcessServerStream(ctx, stream) -// if err != nil { -// return err -// } -// -// for { -// select { -// case resp := <-result.ResCh(): -// // Process response -// case err := <-result.ErrCh(): -// // Handle error -// return err -// case <-result.DoneCh(): -// // All responses received -// return nil -// case <-ctx.Done(): -// return ctx.Err() -// } -// } -func ProcessServerStream[OutT any]( - ctx context.Context, - stream ServerStream[OutT], -) (StreamResult[OutT], error) { - // Validate inputs - if ctx == nil { - return nil, errors.New("context is nil") - } - - if stream == nil { - return nil, errors.New("stream is nil") - } - - // Create result channels - result := newResult[OutT]() - - // Start receiver goroutine - go func() { - // Close result once the goroutine ends - defer result.close() - - // Receive output from the stream - // - // Note: stream.Recv() is blocking until a message is available or - // an error occurs. This provides natural pacing with the server. - // - // If the context is cancelled, Recv() will return an error, - // which terminates this goroutine. - for { - output, err := stream.Recv() - if errors.Is(err, io.EOF) { - // Normal completion - server closed the stream - return - } - - if err != nil { - result.errCh <- fmt.Errorf("failed to receive: %w", err) - - return - } - - // Send output to the output channel - result.resCh <- output - } - }() - - return result, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package streaming + +import ( + "context" + "errors" + "fmt" + "io" +) + +// ServerStream defines the interface for server streaming (one input → many outputs). +// This pattern is used when sending a single request and receiving multiple responses. +type ServerStream[OutT any] interface { + Recv() (*OutT, error) +} + +// ProcessServerStream handles server streaming pattern (one input → many outputs). +// +// Pattern: Send Request → Recv() → Recv() → Recv() → EOF +// +// This processor is ideal for operations where a single request triggers multiple +// responses from the server, such as: +// - Streaming search results +// - Listing resources +// - Tailing logs +// - Real-time event streams +// +// The processor: +// - Continuously receives outputs from the stream +// - Sends each output to the result channel +// - Handles EOF gracefully to signal completion +// - Propagates errors to the error channel +// +// Returns: +// - result: StreamResult containing result, error, and done channels +// - error: Immediate error if validation fails +// +// The caller should: +// 1. Range over result channels to process outputs and errors +// 2. Monitor the DoneCh to know when streaming is complete +// 3. Use context cancellation to stop processing early +// +// Example usage: +// +// stream, err := client.Listen(ctx, req) +// if err != nil { +// return err +// } +// +// result, err := streaming.ProcessServerStream(ctx, stream) +// if err != nil { +// return err +// } +// +// for { +// select { +// case resp := <-result.ResCh(): +// // Process response +// case err := <-result.ErrCh(): +// // Handle error +// return err +// case <-result.DoneCh(): +// // All responses received +// return nil +// case <-ctx.Done(): +// return ctx.Err() +// } +// } +func ProcessServerStream[OutT any]( + ctx context.Context, + stream ServerStream[OutT], +) (StreamResult[OutT], error) { + // Validate inputs + if ctx == nil { + return nil, errors.New("context is nil") + } + + if stream == nil { + return nil, errors.New("stream is nil") + } + + // Create result channels + result := newResult[OutT]() + + // Start receiver goroutine + go func() { + // Close result once the goroutine ends + defer result.close() + + // Receive output from the stream + // + // Note: stream.Recv() is blocking until a message is available or + // an error occurs. This provides natural pacing with the server. + // + // If the context is cancelled, Recv() will return an error, + // which terminates this goroutine. + for { + output, err := stream.Recv() + if errors.Is(err, io.EOF) { + // Normal completion - server closed the stream + return + } + + if err != nil { + result.errCh <- fmt.Errorf("failed to receive: %w", err) + + return + } + + // Send output to the output channel + result.resCh <- output + } + }() + + return result, nil +} diff --git a/client/streaming/stream_result.go b/client/streaming/stream_result.go index e5095f651..14ce1948e 100644 --- a/client/streaming/stream_result.go +++ b/client/streaming/stream_result.go @@ -1,81 +1,81 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package streaming - -// StreamResult encapsulates the channels for receiving streaming results, -// errors, and completion signals. It provides a structured way to handle -// streaming responses. -// -// Callers that handle StreamResult should: -// 1. Range over the ResCh to process incoming results. -// 2. Range over the ErrCh to handle errors as they occur. -// 3. Monitor the DoneCh to know when processing is complete. -// -// If the caller does not subscribe to these channels, the processing -// goroutines will block until the channels are read or the context is cancelled. -// -// Example usage: -// -// for { -// select { -// case res := <-result.ResCh(): -// // Process result -// case err := <-result.ErrCh(): -// // Handle error -// case <-result.DoneCh(): -// // Processing is done -// // Exit loop -// return -// } -// } -type StreamResult[OutT any] interface { - // ResCh returns a read-only channel for receiving results of type *OutT. - // More than one result can be sent before the DoneCh is closed. - // - // NOTES: - // - For ClientStream, the ResCh can receive a single result before the DoneCh is closed. - // - For BidiStream, the ResCh can receive multiple results until the DoneCh is closed. - ResCh() <-chan *OutT - - // ErrCh returns a read-only channel for receiving errors encountered during processing. - // Errors are sent to this channel as they occur. - // More than one error can be sent before the DoneCh is closed. - ErrCh() <-chan error - - // DoneCh returns a read-only channel that is closed when processing is complete. - // It is used to signal that no more results or errors will be sent. - DoneCh() <-chan struct{} -} - -// result is a concrete implementation of StreamResult. -type result[OutT any] struct { - resCh chan *OutT - errCh chan error - doneCh chan struct{} -} - -func newResult[OutT any]() *result[OutT] { - return &result[OutT]{ - resCh: make(chan *OutT), - errCh: make(chan error), - doneCh: make(chan struct{}), - } -} - -func (r *result[OutT]) ResCh() <-chan *OutT { - return r.resCh -} - -func (r *result[OutT]) ErrCh() <-chan error { - return r.errCh -} - -func (r *result[OutT]) DoneCh() <-chan struct{} { - return r.doneCh -} - -// close closes only the control channel doneCh to signal completion. -func (r *result[OutT]) close() { - close(r.doneCh) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package streaming + +// StreamResult encapsulates the channels for receiving streaming results, +// errors, and completion signals. It provides a structured way to handle +// streaming responses. +// +// Callers that handle StreamResult should: +// 1. Range over the ResCh to process incoming results. +// 2. Range over the ErrCh to handle errors as they occur. +// 3. Monitor the DoneCh to know when processing is complete. +// +// If the caller does not subscribe to these channels, the processing +// goroutines will block until the channels are read or the context is cancelled. +// +// Example usage: +// +// for { +// select { +// case res := <-result.ResCh(): +// // Process result +// case err := <-result.ErrCh(): +// // Handle error +// case <-result.DoneCh(): +// // Processing is done +// // Exit loop +// return +// } +// } +type StreamResult[OutT any] interface { + // ResCh returns a read-only channel for receiving results of type *OutT. + // More than one result can be sent before the DoneCh is closed. + // + // NOTES: + // - For ClientStream, the ResCh can receive a single result before the DoneCh is closed. + // - For BidiStream, the ResCh can receive multiple results until the DoneCh is closed. + ResCh() <-chan *OutT + + // ErrCh returns a read-only channel for receiving errors encountered during processing. + // Errors are sent to this channel as they occur. + // More than one error can be sent before the DoneCh is closed. + ErrCh() <-chan error + + // DoneCh returns a read-only channel that is closed when processing is complete. + // It is used to signal that no more results or errors will be sent. + DoneCh() <-chan struct{} +} + +// result is a concrete implementation of StreamResult. +type result[OutT any] struct { + resCh chan *OutT + errCh chan error + doneCh chan struct{} +} + +func newResult[OutT any]() *result[OutT] { + return &result[OutT]{ + resCh: make(chan *OutT), + errCh: make(chan error), + doneCh: make(chan struct{}), + } +} + +func (r *result[OutT]) ResCh() <-chan *OutT { + return r.resCh +} + +func (r *result[OutT]) ErrCh() <-chan error { + return r.errCh +} + +func (r *result[OutT]) DoneCh() <-chan struct{} { + return r.doneCh +} + +// close closes only the control channel doneCh to signal completion. +func (r *result[OutT]) close() { + close(r.doneCh) +} diff --git a/client/streaming/utils.go b/client/streaming/utils.go index 20e23d169..c3148c780 100644 --- a/client/streaming/utils.go +++ b/client/streaming/utils.go @@ -1,26 +1,26 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package streaming - -import "context" - -// SliceToChan converts a slice of items into a channel that emits each item. -// It respects the provided context for cancellation. -func SliceToChan[T any](ctx context.Context, items []T) <-chan T { - outCh := make(chan T, len(items)) - - go func() { - defer close(outCh) - - for _, item := range items { - select { - case outCh <- item: - case <-ctx.Done(): - return - } - } - }() - - return outCh -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package streaming + +import "context" + +// SliceToChan converts a slice of items into a channel that emits each item. +// It respects the provided context for cancellation. +func SliceToChan[T any](ctx context.Context, items []T) <-chan T { + outCh := make(chan T, len(items)) + + go func() { + defer close(outCh) + + for _, item := range items { + select { + case outCh <- item: + case <-ctx.Done(): + return + } + } + }() + + return outCh +} diff --git a/client/sync.go b/client/sync.go index 81ff9a8f2..4a8b1e8f1 100644 --- a/client/sync.go +++ b/client/sync.go @@ -1,81 +1,81 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package client - -import ( - "context" - "errors" - "fmt" - "io" - - storev1 "github.com/agntcy/dir/api/store/v1" -) - -func (c *Client) CreateSync(ctx context.Context, remoteURL string, cids []string) (string, error) { - meta, err := c.SyncServiceClient.CreateSync(ctx, &storev1.CreateSyncRequest{ - RemoteDirectoryUrl: remoteURL, - Cids: cids, - }) - if err != nil { - return "", fmt.Errorf("failed to create sync: %w", err) - } - - return meta.GetSyncId(), nil -} - -func (c *Client) ListSyncs(ctx context.Context, req *storev1.ListSyncsRequest) (<-chan *storev1.ListSyncsItem, error) { - stream, err := c.SyncServiceClient.ListSyncs(ctx, req) - if err != nil { - return nil, fmt.Errorf("failed to create list syncs stream: %w", err) - } - - resultCh := make(chan *storev1.ListSyncsItem) - - go func() { - defer close(resultCh) - - for { - item, err := stream.Recv() - if errors.Is(err, io.EOF) { - break - } - - if err != nil { - logger.Error("failed to receive list syncs response", "error", err) - - break - } - - select { - case resultCh <- item: - case <-ctx.Done(): - return - } - } - }() - - return resultCh, nil -} - -func (c *Client) GetSync(ctx context.Context, syncID string) (*storev1.GetSyncResponse, error) { - meta, err := c.SyncServiceClient.GetSync(ctx, &storev1.GetSyncRequest{ - SyncId: syncID, - }) - if err != nil { - return nil, fmt.Errorf("failed to get sync: %w", err) - } - - return meta, nil -} - -func (c *Client) DeleteSync(ctx context.Context, syncID string) error { - _, err := c.SyncServiceClient.DeleteSync(ctx, &storev1.DeleteSyncRequest{ - SyncId: syncID, - }) - if err != nil { - return fmt.Errorf("failed to delete sync: %w", err) - } - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "context" + "errors" + "fmt" + "io" + + storev1 "github.com/agntcy/dir/api/store/v1" +) + +func (c *Client) CreateSync(ctx context.Context, remoteURL string, cids []string) (string, error) { + meta, err := c.SyncServiceClient.CreateSync(ctx, &storev1.CreateSyncRequest{ + RemoteDirectoryUrl: remoteURL, + Cids: cids, + }) + if err != nil { + return "", fmt.Errorf("failed to create sync: %w", err) + } + + return meta.GetSyncId(), nil +} + +func (c *Client) ListSyncs(ctx context.Context, req *storev1.ListSyncsRequest) (<-chan *storev1.ListSyncsItem, error) { + stream, err := c.SyncServiceClient.ListSyncs(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to create list syncs stream: %w", err) + } + + resultCh := make(chan *storev1.ListSyncsItem) + + go func() { + defer close(resultCh) + + for { + item, err := stream.Recv() + if errors.Is(err, io.EOF) { + break + } + + if err != nil { + logger.Error("failed to receive list syncs response", "error", err) + + break + } + + select { + case resultCh <- item: + case <-ctx.Done(): + return + } + } + }() + + return resultCh, nil +} + +func (c *Client) GetSync(ctx context.Context, syncID string) (*storev1.GetSyncResponse, error) { + meta, err := c.SyncServiceClient.GetSync(ctx, &storev1.GetSyncRequest{ + SyncId: syncID, + }) + if err != nil { + return nil, fmt.Errorf("failed to get sync: %w", err) + } + + return meta, nil +} + +func (c *Client) DeleteSync(ctx context.Context, syncID string) error { + _, err := c.SyncServiceClient.DeleteSync(ctx, &storev1.DeleteSyncRequest{ + SyncId: syncID, + }) + if err != nil { + return fmt.Errorf("failed to delete sync: %w", err) + } + + return nil +} diff --git a/client/verify.go b/client/verify.go index 1510ecea8..728138524 100644 --- a/client/verify.go +++ b/client/verify.go @@ -1,186 +1,186 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package client - -import ( - "bytes" - "context" - "crypto" - "encoding/base64" - "errors" - "fmt" - - corev1 "github.com/agntcy/dir/api/core/v1" - signv1 "github.com/agntcy/dir/api/sign/v1" - storev1 "github.com/agntcy/dir/api/store/v1" - cosignutils "github.com/agntcy/dir/utils/cosign" - sigs "github.com/sigstore/cosign/v3/pkg/signature" -) - -// Verify verifies the signature of the record. -func (c *Client) Verify(ctx context.Context, req *signv1.VerifyRequest) (*signv1.VerifyResponse, error) { - // Server-side verification - response, err := c.SignServiceClient.Verify(ctx, req) - if err != nil { - return nil, fmt.Errorf("server verification failed: %w", err) - } - - if response.GetSuccess() { - return response, nil - } - - // Fall back to client-side verification - logger.Info("Server verification failed, falling back to client-side verification") - - var errMsg string - - verified, err := c.verifyClientSide(ctx, req.GetRecordRef().GetCid()) - if err != nil { - errMsg = err.Error() - } - - return &signv1.VerifyResponse{ - Success: verified, - ErrorMessage: &errMsg, - }, nil -} - -// verifyClientSide performs client-side signature verification using OCI referrers. -func (c *Client) verifyClientSide(ctx context.Context, recordCID string) (bool, error) { - logger.Debug("Starting client-side verification", "recordCID", recordCID) - - // Generate the expected payload for this record CID - digest, err := corev1.ConvertCIDToDigest(recordCID) - if err != nil { - return false, fmt.Errorf("failed to convert CID to digest: %w", err) - } - - expectedPayload, err := cosignutils.GeneratePayload(digest.String()) - if err != nil { - return false, fmt.Errorf("failed to generate expected payload: %w", err) - } - - // Retrieve signature from OCI referrers - signatures, err := c.pullSignatureReferrer(ctx, recordCID) - if err != nil { - return false, fmt.Errorf("failed to pull signature referrer: %w", err) - } - - if len(signatures) == 0 { - return false, errors.New("no signature found in referrer responses") - } - - // Retrieve public key from OCI referrers - publicKeys, err := c.pullPublicKeyReferrer(ctx, recordCID) - if err != nil { - return false, fmt.Errorf("failed to pull public key referrer: %w", err) - } - - if len(publicKeys) == 0 { - return false, errors.New("no public key found in referrer responses") - } - - // Compare all public keys with all signatures - for _, publicKey := range publicKeys { - for _, signature := range signatures { - // Verify signature using cosign - verifier, err := sigs.LoadPublicKeyRaw([]byte(publicKey), crypto.SHA256) - if err != nil { - // Skip this public key if it's invalid, try the next one - logger.Debug("Failed to load public key, skipping", "error", err) - - continue - } - - // Decode base64 signature if needed - signatureBytes, err := base64.StdEncoding.DecodeString(signature.GetSignature()) - if err != nil { - // If decoding fails, assume it's already raw bytes - signatureBytes = []byte(signature.GetSignature()) - } - - // Verify signature against the expected payload - err = verifier.VerifySignature(bytes.NewReader(signatureBytes), bytes.NewReader(expectedPayload)) - if err != nil { - // Verification failed for this combination, try the next one - logger.Debug("Signature verification failed, trying next combination", "error", err) - - continue - } - - // If the signature is verified against this public key, return true - return true, nil - } - } - - return false, nil -} - -// pullSignatureReferrer retrieves the signature referrer for a record. -func (c *Client) pullSignatureReferrer(ctx context.Context, recordCID string) ([]*signv1.Signature, error) { - signatureType := corev1.SignatureReferrerType - - resultCh, err := c.PullReferrer(ctx, &storev1.PullReferrerRequest{ - RecordRef: &corev1.RecordRef{ - Cid: recordCID, - }, - ReferrerType: &signatureType, - }) - if err != nil { - return nil, fmt.Errorf("failed to pull signature referrer: %w", err) - } - - signatures := make([]*signv1.Signature, 0) - - // Get all signature responses and decode them from referrer data - for response := range resultCh { - referrer := response.GetReferrer() - if referrer != nil { - signature := &signv1.Signature{} - if err := signature.UnmarshalReferrer(referrer); err != nil { - logger.Error("Failed to decode signature from referrer", "error", err) - - continue - } - - signatures = append(signatures, signature) - } - } - - return signatures, nil -} - -// pullPublicKeyReferrer retrieves the public key referrer for a record. -func (c *Client) pullPublicKeyReferrer(ctx context.Context, recordCID string) ([]string, error) { - publicKeyType := corev1.PublicKeyReferrerType - - resultCh, err := c.PullReferrer(ctx, &storev1.PullReferrerRequest{ - RecordRef: &corev1.RecordRef{ - Cid: recordCID, - }, - ReferrerType: &publicKeyType, - }) - if err != nil { - return nil, fmt.Errorf("failed to pull public key referrer: %w", err) - } - - publicKeys := make([]string, 0) - - // Get all public key responses and extract the public key from referrer data - for response := range resultCh { - referrer := response.GetReferrer() - if referrer != nil { - publicKey := &signv1.PublicKey{} - if err := publicKey.UnmarshalReferrer(referrer); err != nil { - logger.Error("Failed to decode public key from referrer", "error", err) - - continue - } - - publicKeys = append(publicKeys, publicKey.GetKey()) - } - } - - return publicKeys, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "bytes" + "context" + "crypto" + "encoding/base64" + "errors" + "fmt" + + corev1 "github.com/agntcy/dir/api/core/v1" + signv1 "github.com/agntcy/dir/api/sign/v1" + storev1 "github.com/agntcy/dir/api/store/v1" + cosignutils "github.com/agntcy/dir/utils/cosign" + sigs "github.com/sigstore/cosign/v3/pkg/signature" +) + +// Verify verifies the signature of the record. +func (c *Client) Verify(ctx context.Context, req *signv1.VerifyRequest) (*signv1.VerifyResponse, error) { + // Server-side verification + response, err := c.SignServiceClient.Verify(ctx, req) + if err != nil { + return nil, fmt.Errorf("server verification failed: %w", err) + } + + if response.GetSuccess() { + return response, nil + } + + // Fall back to client-side verification + logger.Info("Server verification failed, falling back to client-side verification") + + var errMsg string + + verified, err := c.verifyClientSide(ctx, req.GetRecordRef().GetCid()) + if err != nil { + errMsg = err.Error() + } + + return &signv1.VerifyResponse{ + Success: verified, + ErrorMessage: &errMsg, + }, nil +} + +// verifyClientSide performs client-side signature verification using OCI referrers. +func (c *Client) verifyClientSide(ctx context.Context, recordCID string) (bool, error) { + logger.Debug("Starting client-side verification", "recordCID", recordCID) + + // Generate the expected payload for this record CID + digest, err := corev1.ConvertCIDToDigest(recordCID) + if err != nil { + return false, fmt.Errorf("failed to convert CID to digest: %w", err) + } + + expectedPayload, err := cosignutils.GeneratePayload(digest.String()) + if err != nil { + return false, fmt.Errorf("failed to generate expected payload: %w", err) + } + + // Retrieve signature from OCI referrers + signatures, err := c.pullSignatureReferrer(ctx, recordCID) + if err != nil { + return false, fmt.Errorf("failed to pull signature referrer: %w", err) + } + + if len(signatures) == 0 { + return false, errors.New("no signature found in referrer responses") + } + + // Retrieve public key from OCI referrers + publicKeys, err := c.pullPublicKeyReferrer(ctx, recordCID) + if err != nil { + return false, fmt.Errorf("failed to pull public key referrer: %w", err) + } + + if len(publicKeys) == 0 { + return false, errors.New("no public key found in referrer responses") + } + + // Compare all public keys with all signatures + for _, publicKey := range publicKeys { + for _, signature := range signatures { + // Verify signature using cosign + verifier, err := sigs.LoadPublicKeyRaw([]byte(publicKey), crypto.SHA256) + if err != nil { + // Skip this public key if it's invalid, try the next one + logger.Debug("Failed to load public key, skipping", "error", err) + + continue + } + + // Decode base64 signature if needed + signatureBytes, err := base64.StdEncoding.DecodeString(signature.GetSignature()) + if err != nil { + // If decoding fails, assume it's already raw bytes + signatureBytes = []byte(signature.GetSignature()) + } + + // Verify signature against the expected payload + err = verifier.VerifySignature(bytes.NewReader(signatureBytes), bytes.NewReader(expectedPayload)) + if err != nil { + // Verification failed for this combination, try the next one + logger.Debug("Signature verification failed, trying next combination", "error", err) + + continue + } + + // If the signature is verified against this public key, return true + return true, nil + } + } + + return false, nil +} + +// pullSignatureReferrer retrieves the signature referrer for a record. +func (c *Client) pullSignatureReferrer(ctx context.Context, recordCID string) ([]*signv1.Signature, error) { + signatureType := corev1.SignatureReferrerType + + resultCh, err := c.PullReferrer(ctx, &storev1.PullReferrerRequest{ + RecordRef: &corev1.RecordRef{ + Cid: recordCID, + }, + ReferrerType: &signatureType, + }) + if err != nil { + return nil, fmt.Errorf("failed to pull signature referrer: %w", err) + } + + signatures := make([]*signv1.Signature, 0) + + // Get all signature responses and decode them from referrer data + for response := range resultCh { + referrer := response.GetReferrer() + if referrer != nil { + signature := &signv1.Signature{} + if err := signature.UnmarshalReferrer(referrer); err != nil { + logger.Error("Failed to decode signature from referrer", "error", err) + + continue + } + + signatures = append(signatures, signature) + } + } + + return signatures, nil +} + +// pullPublicKeyReferrer retrieves the public key referrer for a record. +func (c *Client) pullPublicKeyReferrer(ctx context.Context, recordCID string) ([]string, error) { + publicKeyType := corev1.PublicKeyReferrerType + + resultCh, err := c.PullReferrer(ctx, &storev1.PullReferrerRequest{ + RecordRef: &corev1.RecordRef{ + Cid: recordCID, + }, + ReferrerType: &publicKeyType, + }) + if err != nil { + return nil, fmt.Errorf("failed to pull public key referrer: %w", err) + } + + publicKeys := make([]string, 0) + + // Get all public key responses and extract the public key from referrer data + for response := range resultCh { + referrer := response.GetReferrer() + if referrer != nil { + publicKey := &signv1.PublicKey{} + if err := publicKey.UnmarshalReferrer(referrer); err != nil { + logger.Error("Failed to decode public key from referrer", "error", err) + + continue + } + + publicKeys = append(publicKeys, publicKey.GetKey()) + } + } + + return publicKeys, nil +} diff --git a/codecov.yml b/codecov.yml index 8d81ba680..77725cbf6 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,35 +1,35 @@ -codecov: - require_ci_to_pass: false - notify: - after_n_builds: 4 # 1 for unit, 3 for e2e - -comment: - layout: "files,diff,flags,tree" - hide_project_coverage: false - require_changes: false - require_base: false - require_head: true - behavior: default - -coverage: - precision: 1 - round: nearest - range: 40..90 - status: - project: - default: - target: auto - threshold: 2% - informational: true - patch: - default: - target: auto - threshold: 1% - informational: true - changes: false - -ignore: - - "**/*.pb.go" - - "**/mock_*.go" - - "e2e/**" - - "**/testdata/**" +codecov: + require_ci_to_pass: false + notify: + after_n_builds: 4 # 1 for unit, 3 for e2e + +comment: + layout: "files,diff,flags,tree" + hide_project_coverage: false + require_changes: false + require_base: false + require_head: true + behavior: default + +coverage: + precision: 1 + round: nearest + range: 40..90 + status: + project: + default: + target: auto + threshold: 2% + informational: true + patch: + default: + target: auto + threshold: 1% + informational: true + changes: false + +ignore: + - "**/*.pb.go" + - "**/mock_*.go" + - "e2e/**" + - "**/testdata/**" diff --git a/docker-bake.hcl b/docker-bake.hcl index 9e7822799..cc5467e9f 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -1,89 +1,89 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -# Documentation available at: https://docs.docker.com/build/bake/ - -# Docker build args -variable "IMAGE_REPO" { default = "ghcr.io/agntcy" } -variable "IMAGE_TAG" { default = "v0.1.0-rc" } -variable "EXTRA_LDFLAGS" { default = "" } -variable "IMAGE_NAME_SUFFIX" { default = "" } - -function "get_tag" { - params = [tags, name] - result = coalescelist(tags, ["${IMAGE_REPO}/${name}${IMAGE_NAME_SUFFIX}:${IMAGE_TAG}"]) -} - -group "default" { - targets = [ - "dir-apiserver", - "dir-ctl", - ] -} - -group "coverage" { - targets = [ - "dir-apiserver-coverage", - ] -} - -target "_common" { - output = [ - "type=image", - ] - platforms = [ - "linux/arm64", - "linux/amd64", - ] - args = { - EXTRA_LDFLAGS = "${EXTRA_LDFLAGS}" - } -} - -target "docker-metadata-action" { - tags = [] -} - - -target "dir-apiserver" { - context = "." - dockerfile = "./server/Dockerfile" - target = "production" - inherits = [ - "_common", - "docker-metadata-action", - ] - tags = get_tag(target.docker-metadata-action.tags, "${target.dir-apiserver.name}") -} - -target "dir-apiserver-coverage" { - context = "." - dockerfile = "./server/Dockerfile" - target = "coverage" - inherits = [ - "_common", - "docker-metadata-action", - ] - tags = get_tag(target.docker-metadata-action.tags, "dir-apiserver") -} - -target "dir-ctl" { - context = "." - dockerfile = "./cli/Dockerfile" - inherits = [ - "_common", - "docker-metadata-action", - ] - tags = get_tag(target.docker-metadata-action.tags, "${target.dir-ctl.name}") -} - -target "sdks-test" { - context = "." - dockerfile = "./e2e/sdk/Dockerfile" - depends_on = ["dir-ctl"] # Ensures dir-ctl is built first - inherits = [ - "_common", - "docker-metadata-action", - ] - tags = get_tag(target.docker-metadata-action.tags, "${target.sdks-test.name}") -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +# Documentation available at: https://docs.docker.com/build/bake/ + +# Docker build args +variable "IMAGE_REPO" { default = "ghcr.io/agntcy" } +variable "IMAGE_TAG" { default = "v0.1.0-rc" } +variable "EXTRA_LDFLAGS" { default = "" } +variable "IMAGE_NAME_SUFFIX" { default = "" } + +function "get_tag" { + params = [tags, name] + result = coalescelist(tags, ["${IMAGE_REPO}/${name}${IMAGE_NAME_SUFFIX}:${IMAGE_TAG}"]) +} + +group "default" { + targets = [ + "dir-apiserver", + "dir-ctl", + ] +} + +group "coverage" { + targets = [ + "dir-apiserver-coverage", + ] +} + +target "_common" { + output = [ + "type=image", + ] + platforms = [ + "linux/arm64", + "linux/amd64", + ] + args = { + EXTRA_LDFLAGS = "${EXTRA_LDFLAGS}" + } +} + +target "docker-metadata-action" { + tags = [] +} + + +target "dir-apiserver" { + context = "." + dockerfile = "./server/Dockerfile" + target = "production" + inherits = [ + "_common", + "docker-metadata-action", + ] + tags = get_tag(target.docker-metadata-action.tags, "${target.dir-apiserver.name}") +} + +target "dir-apiserver-coverage" { + context = "." + dockerfile = "./server/Dockerfile" + target = "coverage" + inherits = [ + "_common", + "docker-metadata-action", + ] + tags = get_tag(target.docker-metadata-action.tags, "dir-apiserver") +} + +target "dir-ctl" { + context = "." + dockerfile = "./cli/Dockerfile" + inherits = [ + "_common", + "docker-metadata-action", + ] + tags = get_tag(target.docker-metadata-action.tags, "${target.dir-ctl.name}") +} + +target "sdks-test" { + context = "." + dockerfile = "./e2e/sdk/Dockerfile" + depends_on = ["dir-ctl"] # Ensures dir-ctl is built first + inherits = [ + "_common", + "docker-metadata-action", + ] + tags = get_tag(target.docker-metadata-action.tags, "${target.sdks-test.name}") +} diff --git a/docs/integrations.md b/docs/integrations.md index 03c84ef03..2521c7b28 100644 --- a/docs/integrations.md +++ b/docs/integrations.md @@ -1,87 +1,87 @@ -# ADS Third-Party Integration Options - -## Overview - -This documents outlines research details of ADS integration support options with third-party services. - -## Goal - -- Minimal or no changes required on ADS and OASF projects -- Enable simple integration path of AGNTCY components -- Leverage existing and widely-adopted tooling for agentic development - -## Methodology - -All workflows try encapsulate three important aspecs in order to support this goal. - -- **Schema Extensions** - Focus only on the data, its contents and structure, e.g. LLMs, Prompts, A2A, MCP servers. Use the findings to define required OASF Record extensions. -- **Data Extractors and Transformers** - Provide logic that reads, extracts, and transforms the data into service-specific artifacts that can be used with given services, eg. VSCode Copilot and Continue. -Use OASF records as a data carriers. -- **Usable and Useful Workflows** - Enable out-of-box configuration and usage of given services. - -## Steps taken - -The integration support was carried out in the following way: - -1. Gather common agentic workflows used by AI developers. Outcome: *devs mainly use LLMs with MCP servers*. -2. Gather common tools used by AI developers. Outcome: *devs mainly use IDEs like VSCode Copilot*. -3. Attach common agentic data to OASF records. Settle for **LLMs, Prompts, MCP servers, and A2A card details**. -4. Provide a script that uses data from 3. to support 1. and 2. - -Focus on the following integrations in the initial PoC: - -- **VSCode Copilot in Agent Mode** - supports only MCP server configuration -- **Continue.dev VSCode extension** - supports LLMs, prompts, and MCP server - -## Outcome - -The data around LLM, Prompts, MCP, and A2A can be easily added to existing OASF schema via extensions. -This can be verified via `demo.record.json` file. -If needed, these extensions can also be moved as first-class schema properties, which is also easily supported by OASF. - -The data extraction and transformation logic can be easily added, either as standalone scripts, or as part of the directory client. -This can be verified via `importer.py` script. -If needed, extractor/transformer interface can be used on the `dirctl` CLI for different tools which can be easily implemented as new plugins given the list of integrations to support. - -> In summary, this demonstrates the usage of OASF and ADS to easily add out-of-box support for third-party tools and services to enable agentic development. - -## Usage - -### Import and configure - -1. Run `task poc:integration` - -This step generates artifacts for both workflow-types, including VSCode Copilot and Continue. - -The artifacts are saved under workflow-specific directory for the given tool, ie. `.vscode/` and `.continue/assistants/`. - -2. Run `cp docs/research/integrations/.env.example .env` - -This step sets up ENV-var inputs for Continue-based workflow. Fill the env vars after setup. -This is required for Continue as it does not support prompt inputs. - -VSC Copilot will ask for all the necessary inputs via prompts when the chat is started, and this step has no meaning for VS Code. - -### *VSC Copilot-based workflow* - - 1. Login to Copilot from VSCode - 2. Open the chat console - 3. Switch to LLM such as Claude - 4. Switch to Agent mode - -### *Continue-based workflow* - -1. Open the chat console -2. Refresh the Assistants tab -3. Switch to our OASF-based assistant -4. Switch to Azure GPT-4o LLM -5. Switch to Agent mode. - -### Try it out with a prompt - -```text -Summarize the pull request in detail, including its purpose, changes made, and any relevant context. Focus on the technical aspects and implications of the changes. Use the provided link to access the GitHub pull request. -Run for this PR: https://github.com/agntcy/dir/pull/179 -``` - -This prompt will use configured MCP GitHub server to fetch the required context and will create a detailed summary about the PR. +# ADS Third-Party Integration Options + +## Overview + +This documents outlines research details of ADS integration support options with third-party services. + +## Goal + +- Minimal or no changes required on ADS and OASF projects +- Enable simple integration path of AGNTCY components +- Leverage existing and widely-adopted tooling for agentic development + +## Methodology + +All workflows try encapsulate three important aspecs in order to support this goal. + +- **Schema Extensions** - Focus only on the data, its contents and structure, e.g. LLMs, Prompts, A2A, MCP servers. Use the findings to define required OASF Record extensions. +- **Data Extractors and Transformers** - Provide logic that reads, extracts, and transforms the data into service-specific artifacts that can be used with given services, eg. VSCode Copilot and Continue. +Use OASF records as a data carriers. +- **Usable and Useful Workflows** - Enable out-of-box configuration and usage of given services. + +## Steps taken + +The integration support was carried out in the following way: + +1. Gather common agentic workflows used by AI developers. Outcome: *devs mainly use LLMs with MCP servers*. +2. Gather common tools used by AI developers. Outcome: *devs mainly use IDEs like VSCode Copilot*. +3. Attach common agentic data to OASF records. Settle for **LLMs, Prompts, MCP servers, and A2A card details**. +4. Provide a script that uses data from 3. to support 1. and 2. + +Focus on the following integrations in the initial PoC: + +- **VSCode Copilot in Agent Mode** - supports only MCP server configuration +- **Continue.dev VSCode extension** - supports LLMs, prompts, and MCP server + +## Outcome + +The data around LLM, Prompts, MCP, and A2A can be easily added to existing OASF schema via extensions. +This can be verified via `demo.record.json` file. +If needed, these extensions can also be moved as first-class schema properties, which is also easily supported by OASF. + +The data extraction and transformation logic can be easily added, either as standalone scripts, or as part of the directory client. +This can be verified via `importer.py` script. +If needed, extractor/transformer interface can be used on the `dirctl` CLI for different tools which can be easily implemented as new plugins given the list of integrations to support. + +> In summary, this demonstrates the usage of OASF and ADS to easily add out-of-box support for third-party tools and services to enable agentic development. + +## Usage + +### Import and configure + +1. Run `task poc:integration` + +This step generates artifacts for both workflow-types, including VSCode Copilot and Continue. + +The artifacts are saved under workflow-specific directory for the given tool, ie. `.vscode/` and `.continue/assistants/`. + +2. Run `cp docs/research/integrations/.env.example .env` + +This step sets up ENV-var inputs for Continue-based workflow. Fill the env vars after setup. +This is required for Continue as it does not support prompt inputs. + +VSC Copilot will ask for all the necessary inputs via prompts when the chat is started, and this step has no meaning for VS Code. + +### *VSC Copilot-based workflow* + + 1. Login to Copilot from VSCode + 2. Open the chat console + 3. Switch to LLM such as Claude + 4. Switch to Agent mode + +### *Continue-based workflow* + +1. Open the chat console +2. Refresh the Assistants tab +3. Switch to our OASF-based assistant +4. Switch to Azure GPT-4o LLM +5. Switch to Agent mode. + +### Try it out with a prompt + +```text +Summarize the pull request in detail, including its purpose, changes made, and any relevant context. Focus on the technical aspects and implications of the changes. Use the provided link to access the GitHub pull request. +Run for this PR: https://github.com/agntcy/dir/pull/179 +``` + +This prompt will use configured MCP GitHub server to fetch the required context and will create a detailed summary about the PR. diff --git a/docs/research/integrations/.env.example b/docs/research/integrations/.env.example index fc15609db..d9dd375f7 100644 --- a/docs/research/integrations/.env.example +++ b/docs/research/integrations/.env.example @@ -1,7 +1,7 @@ -# -# This file contains a list of ENV vars needed for the integration services. -# NOTE: VSCode allows prompt input, while Continue.Dev requires a workspace ".env" file. -# -AZURE_OPENAI_API_KEY= -AZURE_OPENAI_API_BASE= -GITHUB_PERSONAL_ACCESS_TOKEN= +# +# This file contains a list of ENV vars needed for the integration services. +# NOTE: VSCode allows prompt input, while Continue.Dev requires a workspace ".env" file. +# +AZURE_OPENAI_API_KEY= +AZURE_OPENAI_API_BASE= +GITHUB_PERSONAL_ACCESS_TOKEN= diff --git a/docs/research/integrations/SLIDES.md b/docs/research/integrations/SLIDES.md index 1bbdde4dc..ce0f17837 100644 --- a/docs/research/integrations/SLIDES.md +++ b/docs/research/integrations/SLIDES.md @@ -1,533 +1,533 @@ ---- -theme: default -background: "#eff3fc" -class: text-center -highlighter: shiki -lineNumbers: false -drawings: - persist: false -transition: slide-left -title: "AGNTCY Integration Capabilities: An Overview" -mdc: true ---- - -
- Cisco Logo -
- -
- -# AGNTCY Integration Capabilities: An Overview - -
- - by Ramiz Polic, Luca Muscariello - -
- - - ---- -layout: statement ---- - -# Goals - -
- -### **Support Agentic Standards** - -Support widely-adopted standards for agentic development like MCP servers - -
- -### **Enhance Developer Experience** - -Support AI-assisted developer workflows via tools and IDEs like Visual Studio Code - -
- -### **Simplified Integration** - -Easy integration and usage of AGNTCY components like OASF and ADS - ---- -layout: default ---- - -# Methodology - -1. **Schema Extensions** - - Use OASF records as data carriers for common tools - - Add native support for **LLMs, Prompts, A2A Cards, MCP servers** - -2. **Data Extractors and Transformers** - - Generate tool-specific configuration files from OASF records - - Support tool usage via **VS Code Copilot** and **Continue.Dev** - -3. **Workflow Integration** - - Provide simple setup instructions - - Enable immediate productivity - ---- -layout: center ---- - -# Architecture - -
- -```mermaid {scale: 0.6} -flowchart TD - subgraph EX[Schema Extensions] - C1[MCP Servers] - C2[LLM Models] - C3[A2A Cards] - C4[Prompts] - end - - subgraph SRC[OASF Record sources] - H1[Hub Service] - H2[ADS Network] - end - - EX -.-> |*defines extension schema for data attached to a record*| A[OASF Record] - - SRC --> |record found and retreived from a given source| A - - subgraph INT[Tooling Integrations] - D[Data Extractors & Transformers] --> |generates VSCode configs| E1[VSCode Support] - D[Data Extractors & Transformers] --> |generates Continue configs| E2[Continue Support] - end - - A --> |record ingested locally to support| INT - - style A fill:#93c5fd,stroke:#3b82f6 - style D fill:#fbcfe8,stroke:#ec4899 - style EX stroke-dasharray: 5 5 -``` - -
- ---- -background: "#eff3fc" -layout: center ---- - -# OASF Schema Extensions - ---- -layout: two-cols-header ---- - -# OASF Schema: LLM Extension - -::left:: - -
- -```json -{ - "extensions": [{ - "name": "schema.oasf.agntcy.org/features/runtime/model", - "version": "v1.0.0", - "data": { - "models": [ - { - "model": "deepseek-r1:1.5b", - "provider": "ollama", - "api_base": "http://localhost:11434", - "prompt": "You are an expert software developer..." - }, - { - "model": "gpt-4o", - "provider": "azure", - "api_base": "${input:AZURE_OPENAI_API_BASE}", - "api_key": "${input:AZURE_OPENAI_API_KEY}", - } - ] - } - }] -} -``` - -
- -::right:: - -
- -## Features - -Describes LLM support and its configuration for a given agent. - -
- - - Multi-model support - - Local and cloud providers - - Model tuning parameters - - Compatible with existing tools - -
- ---- -layout: two-cols-header ---- - -# OASF Schema: LLM Prompt Extensions - -::left:: - -
- -```json -{ - "extensions": [{ - "name": "schema.oasf.agntcy.org/features/runtime/prompt", - "version": "v1.0.0", - "data": { - "prompts": [ - { - "name": "PR Summary", - "description": "PR analysis", - "prompt": "Summarize the pull request in detail..." - }, - { - "name": "PR Review", - "description": "PR review", - "prompt": "Review the pull request in detail..." - } - ] - } - }] -} -``` - -
- -::right:: - -
- -## Features - -Describes common LLM interaction prompts to use the agent. - -
- - - Structured LLM Prompts - - Categorized by purpose - - Descriptive metadata - - Task-specific instructions - - Compatible with existing tools - -
- -




- ---- -layout: two-cols-header ---- - -# OASF Schema: MCP Server Extension - -::left:: - -
- -```json -{ - "extensions": [{ - "name": "schema.oasf.agntcy.org/features/runtime/mcp", - "version": "v1.0.0", - "data": { - "servers": { - "github": { - "command": "docker", - "args": [ - "run", "-i", "--rm", - "-e", "GITHUB_PAT", - "ghcr.io/github/github-mcp-server" - ], - "env": { - "GITHUB_PAT": "${input:GITHUB_PAT}" - } - } - } - } - }] -} -``` - -
- -::right:: - -
- -## Features - -Describes MCP servers required to run and interact with the agent. - -
- - - Support for multiple servers - - Input variable mapping and templating - - Compatible with existing tools - -
- ---- -layout: two-cols-header ---- - -# OASF Schema: A2A Extensions - -::left:: - -
- -```json -{ - "extensions": [{ - "name": "schema.oasf.agntcy.org/features/runtime/a2a", - "version": "v1.0.0", - "data": { - "name": "example-agent", - "description": "An agent that performs web searches", - "url": "http://localhost:8000", - "capabilities": { - "streaming": true, - "pushNotifications": false - }, - "defaultInputModes": ["text"], - "defaultOutputModes": ["text"], - "skills": [ - { "id": "browser", "name": "browser automation" } - ] - } - }] -} -``` - -
- -::right:: - -
- -## Features - -Describes A2A card details for communication and its usage with A2A protocol. - -
- - - Common A2A Card schema - - Compatible with existing tools - -
- ---- -background: "#eff3fc" -layout: center ---- - -# Agentic Workflow: IDE Integrations - ---- -layout: default ---- - -## Agentic Workflow: Example OASF record - -
- -<<< @/integrations/record.json - -
- ---- -layout: two-cols-header ---- - -# VS Code Integration - -Explores ways to use OASF records to enable its usage in agentic workflows with VS Code. - -::left:: - -## Implementation - -1. Load and Process OASF records - -2. Extract data from OASF record about: - - MCP servers - - NOTE: other components are not yet supported - -3. Generate `.vscode/mcp.json` configuration file - -::right:: - -
- -## Features - -- **MCP Server Configuration** - - Automatic server setup - - Secure credential handling - - Input variable templating - -- **Copilot Agent Mode Support** - - Native integration - - Interactive credential prompts - -
- -
-
- ---- -background: "#eff3fc" -layout: center ---- - -# VS Code Integration: Live Demo - ---- -layout: two-cols-header ---- - -# Continue Integration - -Explores ways to use OASF records to enable its usage in agentic workflows with VS Code Continue.dev extension. - -::left:: - -## Implementation - -1. Load and Process OASF records - -2. Extract data from OASF record about: - - MCP servers - - LLM Data - - Prompts - - A2A information - -3. Generate `.continue/assistants/`
configuration file - -::right:: - -## Features - -- **Quick Provisioning** - - Multi-model configuration - - Pre-defined prompts - - MCP server integration - -- **Dev Experience** - - Assistant gallery integration - - Model provider selection - - Prompt usage - ---- -background: "#eff3fc" -layout: center ---- - -# Continue Integration: Live Demo - ---- -layout: two-cols-header ---- - -# MCP to OASF Example: Agentic App - -Demonstrate the usage of OASF agents by loading them into IDEs and performing an agentic workflow to scan MCP server records from GitHub and create matching OASF records. - -::left:: - -## VSCode Workflow - -
- -1. Open VSCode chat console -2. Switch to LLM (e.g., Claude) -3. Enable Agent mode -4. Enter conversion generation prompt -5. Check results - -::right:: - -## Continue Workflow - -
- -1. Open Continue chat console -2. Refresh Assistants tab -3. Select OASF-generated assistant -4. Switch to configured LLM (Azure GPT-4o) -5. Select conversion generation prompt -6. Check results - -
-
-
- ---- -layout: default ---- - -# MCP to OASF Example: OASF Record - -Connecting everything together to showcase an example application.
-Built on top of OASF and ADS with native IDE support. - -
- -<<< @/mcp-extractor-agent/extractor.record.json - -
-
-
- ---- -background: "#eff3fc" -layout: center ---- - -# MCP to OASF Example: Live Demo - ---- -layout: statement ---- - -# Outcomes - -
- -**Extensible Schema**: OASF can easily support third-party integrations via extensions - -**Minimal Impact**: No significant changes needed to ADS or OASF projects - -**Simple Integration**: Straightforward integration of AGNTCY components - -**Developer Experience**: Leverages familiar tools with minimal configuration - -**Transformation Pattern**: Data extraction and transformation approach works well - ---- -layout: end ---- - -# Thank You - - +--- +theme: default +background: "#eff3fc" +class: text-center +highlighter: shiki +lineNumbers: false +drawings: + persist: false +transition: slide-left +title: "AGNTCY Integration Capabilities: An Overview" +mdc: true +--- + +
+ Cisco Logo +
+ +
+ +# AGNTCY Integration Capabilities: An Overview + +
+ + by Ramiz Polic, Luca Muscariello + +
+ + + +--- +layout: statement +--- + +# Goals + +
+ +### **Support Agentic Standards** + +Support widely-adopted standards for agentic development like MCP servers + +
+ +### **Enhance Developer Experience** + +Support AI-assisted developer workflows via tools and IDEs like Visual Studio Code + +
+ +### **Simplified Integration** + +Easy integration and usage of AGNTCY components like OASF and ADS + +--- +layout: default +--- + +# Methodology + +1. **Schema Extensions** + - Use OASF records as data carriers for common tools + - Add native support for **LLMs, Prompts, A2A Cards, MCP servers** + +2. **Data Extractors and Transformers** + - Generate tool-specific configuration files from OASF records + - Support tool usage via **VS Code Copilot** and **Continue.Dev** + +3. **Workflow Integration** + - Provide simple setup instructions + - Enable immediate productivity + +--- +layout: center +--- + +# Architecture + +
+ +```mermaid {scale: 0.6} +flowchart TD + subgraph EX[Schema Extensions] + C1[MCP Servers] + C2[LLM Models] + C3[A2A Cards] + C4[Prompts] + end + + subgraph SRC[OASF Record sources] + H1[Hub Service] + H2[ADS Network] + end + + EX -.-> |*defines extension schema for data attached to a record*| A[OASF Record] + + SRC --> |record found and retreived from a given source| A + + subgraph INT[Tooling Integrations] + D[Data Extractors & Transformers] --> |generates VSCode configs| E1[VSCode Support] + D[Data Extractors & Transformers] --> |generates Continue configs| E2[Continue Support] + end + + A --> |record ingested locally to support| INT + + style A fill:#93c5fd,stroke:#3b82f6 + style D fill:#fbcfe8,stroke:#ec4899 + style EX stroke-dasharray: 5 5 +``` + +
+ +--- +background: "#eff3fc" +layout: center +--- + +# OASF Schema Extensions + +--- +layout: two-cols-header +--- + +# OASF Schema: LLM Extension + +::left:: + +
+ +```json +{ + "extensions": [{ + "name": "schema.oasf.agntcy.org/features/runtime/model", + "version": "v1.0.0", + "data": { + "models": [ + { + "model": "deepseek-r1:1.5b", + "provider": "ollama", + "api_base": "http://localhost:11434", + "prompt": "You are an expert software developer..." + }, + { + "model": "gpt-4o", + "provider": "azure", + "api_base": "${input:AZURE_OPENAI_API_BASE}", + "api_key": "${input:AZURE_OPENAI_API_KEY}", + } + ] + } + }] +} +``` + +
+ +::right:: + +
+ +## Features + +Describes LLM support and its configuration for a given agent. + +
+ + - Multi-model support + - Local and cloud providers + - Model tuning parameters + - Compatible with existing tools + +
+ +--- +layout: two-cols-header +--- + +# OASF Schema: LLM Prompt Extensions + +::left:: + +
+ +```json +{ + "extensions": [{ + "name": "schema.oasf.agntcy.org/features/runtime/prompt", + "version": "v1.0.0", + "data": { + "prompts": [ + { + "name": "PR Summary", + "description": "PR analysis", + "prompt": "Summarize the pull request in detail..." + }, + { + "name": "PR Review", + "description": "PR review", + "prompt": "Review the pull request in detail..." + } + ] + } + }] +} +``` + +
+ +::right:: + +
+ +## Features + +Describes common LLM interaction prompts to use the agent. + +
+ + - Structured LLM Prompts + - Categorized by purpose + - Descriptive metadata + - Task-specific instructions + - Compatible with existing tools + +
+ +




+ +--- +layout: two-cols-header +--- + +# OASF Schema: MCP Server Extension + +::left:: + +
+ +```json +{ + "extensions": [{ + "name": "schema.oasf.agntcy.org/features/runtime/mcp", + "version": "v1.0.0", + "data": { + "servers": { + "github": { + "command": "docker", + "args": [ + "run", "-i", "--rm", + "-e", "GITHUB_PAT", + "ghcr.io/github/github-mcp-server" + ], + "env": { + "GITHUB_PAT": "${input:GITHUB_PAT}" + } + } + } + } + }] +} +``` + +
+ +::right:: + +
+ +## Features + +Describes MCP servers required to run and interact with the agent. + +
+ + - Support for multiple servers + - Input variable mapping and templating + - Compatible with existing tools + +
+ +--- +layout: two-cols-header +--- + +# OASF Schema: A2A Extensions + +::left:: + +
+ +```json +{ + "extensions": [{ + "name": "schema.oasf.agntcy.org/features/runtime/a2a", + "version": "v1.0.0", + "data": { + "name": "example-agent", + "description": "An agent that performs web searches", + "url": "http://localhost:8000", + "capabilities": { + "streaming": true, + "pushNotifications": false + }, + "defaultInputModes": ["text"], + "defaultOutputModes": ["text"], + "skills": [ + { "id": "browser", "name": "browser automation" } + ] + } + }] +} +``` + +
+ +::right:: + +
+ +## Features + +Describes A2A card details for communication and its usage with A2A protocol. + +
+ + - Common A2A Card schema + - Compatible with existing tools + +
+ +--- +background: "#eff3fc" +layout: center +--- + +# Agentic Workflow: IDE Integrations + +--- +layout: default +--- + +## Agentic Workflow: Example OASF record + +
+ +<<< @/integrations/record.json + +
+ +--- +layout: two-cols-header +--- + +# VS Code Integration + +Explores ways to use OASF records to enable its usage in agentic workflows with VS Code. + +::left:: + +## Implementation + +1. Load and Process OASF records + +2. Extract data from OASF record about: + - MCP servers + - NOTE: other components are not yet supported + +3. Generate `.vscode/mcp.json` configuration file + +::right:: + +
+ +## Features + +- **MCP Server Configuration** + - Automatic server setup + - Secure credential handling + - Input variable templating + +- **Copilot Agent Mode Support** + - Native integration + - Interactive credential prompts + +
+ +
+
+ +--- +background: "#eff3fc" +layout: center +--- + +# VS Code Integration: Live Demo + +--- +layout: two-cols-header +--- + +# Continue Integration + +Explores ways to use OASF records to enable its usage in agentic workflows with VS Code Continue.dev extension. + +::left:: + +## Implementation + +1. Load and Process OASF records + +2. Extract data from OASF record about: + - MCP servers + - LLM Data + - Prompts + - A2A information + +3. Generate `.continue/assistants/`
configuration file + +::right:: + +## Features + +- **Quick Provisioning** + - Multi-model configuration + - Pre-defined prompts + - MCP server integration + +- **Dev Experience** + - Assistant gallery integration + - Model provider selection + - Prompt usage + +--- +background: "#eff3fc" +layout: center +--- + +# Continue Integration: Live Demo + +--- +layout: two-cols-header +--- + +# MCP to OASF Example: Agentic App + +Demonstrate the usage of OASF agents by loading them into IDEs and performing an agentic workflow to scan MCP server records from GitHub and create matching OASF records. + +::left:: + +## VSCode Workflow + +
+ +1. Open VSCode chat console +2. Switch to LLM (e.g., Claude) +3. Enable Agent mode +4. Enter conversion generation prompt +5. Check results + +::right:: + +## Continue Workflow + +
+ +1. Open Continue chat console +2. Refresh Assistants tab +3. Select OASF-generated assistant +4. Switch to configured LLM (Azure GPT-4o) +5. Select conversion generation prompt +6. Check results + +
+
+
+ +--- +layout: default +--- + +# MCP to OASF Example: OASF Record + +Connecting everything together to showcase an example application.
+Built on top of OASF and ADS with native IDE support. + +
+ +<<< @/mcp-extractor-agent/extractor.record.json + +
+
+
+ +--- +background: "#eff3fc" +layout: center +--- + +# MCP to OASF Example: Live Demo + +--- +layout: statement +--- + +# Outcomes + +
+ +**Extensible Schema**: OASF can easily support third-party integrations via extensions + +**Minimal Impact**: No significant changes needed to ADS or OASF projects + +**Simple Integration**: Straightforward integration of AGNTCY components + +**Developer Experience**: Leverages familiar tools with minimal configuration + +**Transformation Pattern**: Data extraction and transformation approach works well + +--- +layout: end +--- + +# Thank You + + diff --git a/docs/research/integrations/demo.record.json b/docs/research/integrations/demo.record.json index 02d13837a..4f4d6e985 100644 --- a/docs/research/integrations/demo.record.json +++ b/docs/research/integrations/demo.record.json @@ -1,116 +1,116 @@ -{ - "name": "poc/integrations-agent-example", - "version": "v1.0.0", - "description": "An example agent with IDE integrations support", - "authors": [ - "Ramiz Polic " - ], - "created_at": "2025-06-16T17:06:37Z", - "skills": [ - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Text Completion", - "class_uid": 10201 - } - ], - "locators": [ - { - "type": "docker-image", - "url": "https://ghcr.io/agntcy/dir/integrations-agent-example" - } - ], - "extensions": [ - { - "name": "schema.oasf.agntcy.org/features/runtime/mcp", - "version": "v1.0.0", - "data": { - "servers": { - "github": { - "command": "docker", - "args": [ - "run", - "-i", - "--rm", - "-e", - "GITHUB_PERSONAL_ACCESS_TOKEN", - "ghcr.io/github/github-mcp-server" - ], - "env": { - "GITHUB_PERSONAL_ACCESS_TOKEN": "${input:GITHUB_PERSONAL_ACCESS_TOKEN}" - } - } - } - } - }, - { - "name": "schema.oasf.agntcy.org/features/runtime/a2a", - "version": "v1.0.0", - "data": { - "name": "example-agent", - "description": "An agent that performs web searches and extracts information.", - "url": "http://localhost:8000", - "capabilities": { - "streaming": true, - "pushNotifications": false - }, - "defaultInputModes": [ - "text" - ], - "defaultOutputModes": [ - "text" - ], - "skills": [ - { - "id": "browser", - "name": "browser automation", - "description": "Performs web searches to retrieve information." - } - ] - } - }, - { - "name": "schema.oasf.agntcy.org/features/runtime/model", - "version": "v1.0.0", - "data": { - "models": [ - { - "model": "deepseek-r1:1.5b", - "provider": "ollama", - "api_base": "http://localhost:11434", - "prompt": "You are an expert software developer. Your task is to assist with software development tasks, including code generation, debugging, and providing explanations for code snippets. Use the provided context to generate relevant and accurate responses." - }, - { - "model": "gpt-4o", - "provider": "azure", - "api_key": "${input:AZURE_OPENAI_API_KEY}", - "api_base": "${input:AZURE_OPENAI_API_BASE}", - "roles": [ - "chat", - "edit", - "apply" - ], - "completion_options": { - "context_length": 128000, - "max_tokens": 16384 - } - } - ] - } - }, - { - "name": "schema.oasf.agntcy.org/features/runtime/prompt", - "version": "v1.0.0", - "data": { - "prompts": [ - { - "name": "PR Summary", - "description": "Provide an in-depth summary of the pull request.", - "prompt": "Summarize the pull request in detail, including its purpose, changes made, and any relevant context. Focus on the technical aspects and implications of the changes. Use the provided link to access the GitHub pull request" - } - ] - } - } - ], - "signature": {} -} +{ + "name": "poc/integrations-agent-example", + "version": "v1.0.0", + "description": "An example agent with IDE integrations support", + "authors": [ + "Ramiz Polic " + ], + "created_at": "2025-06-16T17:06:37Z", + "skills": [ + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Text Completion", + "class_uid": 10201 + } + ], + "locators": [ + { + "type": "docker-image", + "url": "https://ghcr.io/agntcy/dir/integrations-agent-example" + } + ], + "extensions": [ + { + "name": "schema.oasf.agntcy.org/features/runtime/mcp", + "version": "v1.0.0", + "data": { + "servers": { + "github": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "GITHUB_PERSONAL_ACCESS_TOKEN", + "ghcr.io/github/github-mcp-server" + ], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "${input:GITHUB_PERSONAL_ACCESS_TOKEN}" + } + } + } + } + }, + { + "name": "schema.oasf.agntcy.org/features/runtime/a2a", + "version": "v1.0.0", + "data": { + "name": "example-agent", + "description": "An agent that performs web searches and extracts information.", + "url": "http://localhost:8000", + "capabilities": { + "streaming": true, + "pushNotifications": false + }, + "defaultInputModes": [ + "text" + ], + "defaultOutputModes": [ + "text" + ], + "skills": [ + { + "id": "browser", + "name": "browser automation", + "description": "Performs web searches to retrieve information." + } + ] + } + }, + { + "name": "schema.oasf.agntcy.org/features/runtime/model", + "version": "v1.0.0", + "data": { + "models": [ + { + "model": "deepseek-r1:1.5b", + "provider": "ollama", + "api_base": "http://localhost:11434", + "prompt": "You are an expert software developer. Your task is to assist with software development tasks, including code generation, debugging, and providing explanations for code snippets. Use the provided context to generate relevant and accurate responses." + }, + { + "model": "gpt-4o", + "provider": "azure", + "api_key": "${input:AZURE_OPENAI_API_KEY}", + "api_base": "${input:AZURE_OPENAI_API_BASE}", + "roles": [ + "chat", + "edit", + "apply" + ], + "completion_options": { + "context_length": 128000, + "max_tokens": 16384 + } + } + ] + } + }, + { + "name": "schema.oasf.agntcy.org/features/runtime/prompt", + "version": "v1.0.0", + "data": { + "prompts": [ + { + "name": "PR Summary", + "description": "Provide an in-depth summary of the pull request.", + "prompt": "Summarize the pull request in detail, including its purpose, changes made, and any relevant context. Focus on the technical aspects and implications of the changes. Use the provided link to access the GitHub pull request" + } + ] + } + } + ], + "signature": {} +} diff --git a/docs/research/integrations/importer.py b/docs/research/integrations/importer.py index 8b021fcc7..517430b44 100755 --- a/docs/research/integrations/importer.py +++ b/docs/research/integrations/importer.py @@ -1,254 +1,254 @@ -#!/usr/bin/env python3 - -############################################################################## -## This script processes an Agent Record JSON file and generates -## output files for VSCode and Continue. The output files enable -## native integration of agentic workflows with VSCode Copilot and -## Continue VSCode Extension. -## -## Usage: -## -## ./importer.py -record=./record.json -vscode_path=./.vscode -continue_path=./.continue/assistants -## -############################################################################## - -import argparse -import json -import yaml -import os -from pathlib import Path - -def parse_arguments(): - parser = argparse.ArgumentParser(description='Process record JSON file and generate output files') - parser.add_argument('-record', help='Path to the input JSON file', required=True) - parser.add_argument('-vscode_path', help='Output path for VSCode directory', required=True) - parser.add_argument('-continue_path', help='Output path for Continue directory', required=True) - args = parser.parse_args() - - # Convert all paths to absolute paths - args.record = os.path.abspath(args.record) - args.vscode_path = os.path.abspath(args.vscode_path) - args.continue_path = os.path.abspath(args.continue_path) - - return args - -def read_json_file(file_path): - try: - with open(file_path, 'r') as f: - return json.load(f) - except json.JSONDecodeError as e: - print(f"Error decoding JSON file: {e}") - exit(1) - except FileNotFoundError: - print(f"Input file not found: {file_path}") - exit(1) - -def write_json_file(data, file_path): - # Ensure the directory exists - os.makedirs(os.path.dirname(file_path), exist_ok=True) - - try: - with open(file_path, 'w') as f: - json.dump(data, f, indent=2) - print(f"Successfully wrote to: {file_path}") - except Exception as e: - print(f"Error writing to {file_path}: {e}") - exit(1) - -def write_yaml_file(data, file_path): - # Ensure the directory exists - os.makedirs(os.path.dirname(file_path), exist_ok=True) - - try: - with open(file_path, 'w') as f: - yaml.dump(data, f, default_flow_style=False) - print(f"Successfully wrote to: {file_path}") - except Exception as e: - print(f"Error writing to {file_path}: {e}") - exit(1) - -def extract_vscode_data(record_data): - # Find the MCP extension in the extensions list - mcp_extension = None - for extension in record_data.get('extensions', []): - if extension['name'] == 'schema.oasf.agntcy.org/features/runtime/mcp': - mcp_extension = extension - break - - if not mcp_extension: - print("Warning: No MCP extension found in the record") - return {} - - # Extract servers data from the MCP extension - if 'data' not in mcp_extension or 'servers' not in mcp_extension['data']: - print("Warning: No servers data found in the MCP extension") - return {} - - mcp_server_data = mcp_extension['data']['servers'] - - # Extract inputs data from the MCP servers - server_inputs = {} # Use a set to avoid duplicates - for server_name, server_data in mcp_server_data.items(): - if 'env' in server_data: - for env_key, env_value in server_data['env'].items(): - # Check if the value is a reference to an environment variable - if isinstance(env_value, str) and env_value.startswith('${input:'): - # Extract the env var name from ${env:NAME} - env_name = env_value.replace('${input:', '').replace('}', '') - server_inputs[env_name] = { - 'id': env_name, - 'type': 'promptString', - 'password': True, - 'description': f"Secret value for {env_name}", - } - - # Return MCP data - return { - 'servers': mcp_server_data, - 'inputs': list(server_inputs.values()), - } - -def extract_continue_model_data(record_data): - # Find the model extension - model_extension = None - for extension in record_data.get('extensions', []): - if extension['name'] == 'schema.oasf.agntcy.org/features/runtime/model': - model_extension = extension - break - - if not model_extension or 'models' not in model_extension['data']: - return [] - - transformed_models = [] - for model in model_extension['data']['models']: - transformed_model = { - 'name': f"{model['provider'].title()} {model['model']}", - 'provider': model['provider'], - 'model': model['model'] - } - - # Add API key or base URL if present - if 'api_key' in model: - transformed_model['apiKey'] = model['api_key']\ - .replace(' ', '')\ - .replace('${input:', '${{secrets.')\ - .replace('}', '}}') - if 'api_base' in model: - transformed_model['apiBase'] = model['api_base']\ - .replace(' ', '')\ - .replace('${input:', '${{secrets.')\ - .replace('}', '}}') - - # Add roles if present - if 'roles' in model: - transformed_model['roles'] = model['roles'] - - # Add completion options if present - if 'completion_options' in model: - transformed_model['defaultCompletionOptions'] = { - 'contextLength': model['completion_options'].get('context_length'), - 'maxTokens': model['completion_options'].get('max_tokens') - } - - transformed_models.append(transformed_model) - - return transformed_models - -def extract_continue_prompt_data(record_data): - # Find the model extension - model_extension = None - for extension in record_data.get('extensions', []): - if extension['name'] == 'schema.oasf.agntcy.org/features/runtime/prompt': - model_extension = extension - break - - if not model_extension or 'prompts' not in model_extension['data']: - return [] - - transformed_prompts = [] - for prompt in model_extension['data']['prompts']: - transformed_prompts.append({ - 'name': prompt['name'], - 'description': prompt['description'], - 'prompt': prompt['prompt'] - }) - - return transformed_prompts - -def extract_continue_mcp_data(record_data): - # Find the MCP extension - mcp_extension = None - for extension in record_data.get('extensions', []): - if extension['name'] == 'schema.oasf.agntcy.org/features/runtime/mcp': - mcp_extension = extension - break - - if not mcp_extension or 'servers' not in mcp_extension['data']: - return [] - - transformed_servers = [] - for server_name, server_data in mcp_extension['data']['servers'].items(): - transformed_server = { - 'name': server_name.title(), - 'command': server_data['command'], - 'args': server_data['args'] - } - - # Transform environment variables to match Continue's format - if 'env' in server_data: - transformed_server['env'] = { - key: value.replace('${input:', '${{secrets.').replace('}', '}}') - for key, value in server_data['env'].items() - } - - transformed_servers.append(transformed_server) - - return transformed_servers - -def extract_continue_data(record_data): - continue_data = {} - - # Get the assistant name that is a valid filename - continue_assistant_name = record_data['name'] + '-' + record_data['version'] - continue_assistant_filename = continue_assistant_name.replace(' ', '-').replace('/', '-') - - # Get basic configuration - continue_data['name'] = continue_assistant_filename - continue_data['version'] = record_data['version'] - continue_data['schema'] = "v1" - - # Get models data - models = extract_continue_model_data(record_data) - if models: - continue_data['models'] = models - - # Get MCP servers data - mcp_servers = extract_continue_mcp_data(record_data) - if mcp_servers: - continue_data['mcpServers'] = mcp_servers - - # Get prompt data - prompt_data = extract_continue_prompt_data(record_data) - if prompt_data: - continue_data['prompts'] = prompt_data - - return continue_data - -def main(): - args = parse_arguments() - - # Read the record JSON file - record_data = read_json_file(args.record) - - # Write to VSCode path - vscode_data = extract_vscode_data(record_data) - vscode_output = Path(args.vscode_path) / 'mcp.json' - write_json_file(vscode_data, str(vscode_output)) - - # Write to continue path - continue_data = extract_continue_data(record_data) - continue_output = Path(args.continue_path) / (continue_data['name'] + '.yaml') - write_yaml_file(continue_data, str(continue_output)) - -if __name__ == '__main__': - main() +#!/usr/bin/env python3 + +############################################################################## +## This script processes an Agent Record JSON file and generates +## output files for VSCode and Continue. The output files enable +## native integration of agentic workflows with VSCode Copilot and +## Continue VSCode Extension. +## +## Usage: +## +## ./importer.py -record=./record.json -vscode_path=./.vscode -continue_path=./.continue/assistants +## +############################################################################## + +import argparse +import json +import yaml +import os +from pathlib import Path + +def parse_arguments(): + parser = argparse.ArgumentParser(description='Process record JSON file and generate output files') + parser.add_argument('-record', help='Path to the input JSON file', required=True) + parser.add_argument('-vscode_path', help='Output path for VSCode directory', required=True) + parser.add_argument('-continue_path', help='Output path for Continue directory', required=True) + args = parser.parse_args() + + # Convert all paths to absolute paths + args.record = os.path.abspath(args.record) + args.vscode_path = os.path.abspath(args.vscode_path) + args.continue_path = os.path.abspath(args.continue_path) + + return args + +def read_json_file(file_path): + try: + with open(file_path, 'r') as f: + return json.load(f) + except json.JSONDecodeError as e: + print(f"Error decoding JSON file: {e}") + exit(1) + except FileNotFoundError: + print(f"Input file not found: {file_path}") + exit(1) + +def write_json_file(data, file_path): + # Ensure the directory exists + os.makedirs(os.path.dirname(file_path), exist_ok=True) + + try: + with open(file_path, 'w') as f: + json.dump(data, f, indent=2) + print(f"Successfully wrote to: {file_path}") + except Exception as e: + print(f"Error writing to {file_path}: {e}") + exit(1) + +def write_yaml_file(data, file_path): + # Ensure the directory exists + os.makedirs(os.path.dirname(file_path), exist_ok=True) + + try: + with open(file_path, 'w') as f: + yaml.dump(data, f, default_flow_style=False) + print(f"Successfully wrote to: {file_path}") + except Exception as e: + print(f"Error writing to {file_path}: {e}") + exit(1) + +def extract_vscode_data(record_data): + # Find the MCP extension in the extensions list + mcp_extension = None + for extension in record_data.get('extensions', []): + if extension['name'] == 'schema.oasf.agntcy.org/features/runtime/mcp': + mcp_extension = extension + break + + if not mcp_extension: + print("Warning: No MCP extension found in the record") + return {} + + # Extract servers data from the MCP extension + if 'data' not in mcp_extension or 'servers' not in mcp_extension['data']: + print("Warning: No servers data found in the MCP extension") + return {} + + mcp_server_data = mcp_extension['data']['servers'] + + # Extract inputs data from the MCP servers + server_inputs = {} # Use a set to avoid duplicates + for server_name, server_data in mcp_server_data.items(): + if 'env' in server_data: + for env_key, env_value in server_data['env'].items(): + # Check if the value is a reference to an environment variable + if isinstance(env_value, str) and env_value.startswith('${input:'): + # Extract the env var name from ${env:NAME} + env_name = env_value.replace('${input:', '').replace('}', '') + server_inputs[env_name] = { + 'id': env_name, + 'type': 'promptString', + 'password': True, + 'description': f"Secret value for {env_name}", + } + + # Return MCP data + return { + 'servers': mcp_server_data, + 'inputs': list(server_inputs.values()), + } + +def extract_continue_model_data(record_data): + # Find the model extension + model_extension = None + for extension in record_data.get('extensions', []): + if extension['name'] == 'schema.oasf.agntcy.org/features/runtime/model': + model_extension = extension + break + + if not model_extension or 'models' not in model_extension['data']: + return [] + + transformed_models = [] + for model in model_extension['data']['models']: + transformed_model = { + 'name': f"{model['provider'].title()} {model['model']}", + 'provider': model['provider'], + 'model': model['model'] + } + + # Add API key or base URL if present + if 'api_key' in model: + transformed_model['apiKey'] = model['api_key']\ + .replace(' ', '')\ + .replace('${input:', '${{secrets.')\ + .replace('}', '}}') + if 'api_base' in model: + transformed_model['apiBase'] = model['api_base']\ + .replace(' ', '')\ + .replace('${input:', '${{secrets.')\ + .replace('}', '}}') + + # Add roles if present + if 'roles' in model: + transformed_model['roles'] = model['roles'] + + # Add completion options if present + if 'completion_options' in model: + transformed_model['defaultCompletionOptions'] = { + 'contextLength': model['completion_options'].get('context_length'), + 'maxTokens': model['completion_options'].get('max_tokens') + } + + transformed_models.append(transformed_model) + + return transformed_models + +def extract_continue_prompt_data(record_data): + # Find the model extension + model_extension = None + for extension in record_data.get('extensions', []): + if extension['name'] == 'schema.oasf.agntcy.org/features/runtime/prompt': + model_extension = extension + break + + if not model_extension or 'prompts' not in model_extension['data']: + return [] + + transformed_prompts = [] + for prompt in model_extension['data']['prompts']: + transformed_prompts.append({ + 'name': prompt['name'], + 'description': prompt['description'], + 'prompt': prompt['prompt'] + }) + + return transformed_prompts + +def extract_continue_mcp_data(record_data): + # Find the MCP extension + mcp_extension = None + for extension in record_data.get('extensions', []): + if extension['name'] == 'schema.oasf.agntcy.org/features/runtime/mcp': + mcp_extension = extension + break + + if not mcp_extension or 'servers' not in mcp_extension['data']: + return [] + + transformed_servers = [] + for server_name, server_data in mcp_extension['data']['servers'].items(): + transformed_server = { + 'name': server_name.title(), + 'command': server_data['command'], + 'args': server_data['args'] + } + + # Transform environment variables to match Continue's format + if 'env' in server_data: + transformed_server['env'] = { + key: value.replace('${input:', '${{secrets.').replace('}', '}}') + for key, value in server_data['env'].items() + } + + transformed_servers.append(transformed_server) + + return transformed_servers + +def extract_continue_data(record_data): + continue_data = {} + + # Get the assistant name that is a valid filename + continue_assistant_name = record_data['name'] + '-' + record_data['version'] + continue_assistant_filename = continue_assistant_name.replace(' ', '-').replace('/', '-') + + # Get basic configuration + continue_data['name'] = continue_assistant_filename + continue_data['version'] = record_data['version'] + continue_data['schema'] = "v1" + + # Get models data + models = extract_continue_model_data(record_data) + if models: + continue_data['models'] = models + + # Get MCP servers data + mcp_servers = extract_continue_mcp_data(record_data) + if mcp_servers: + continue_data['mcpServers'] = mcp_servers + + # Get prompt data + prompt_data = extract_continue_prompt_data(record_data) + if prompt_data: + continue_data['prompts'] = prompt_data + + return continue_data + +def main(): + args = parse_arguments() + + # Read the record JSON file + record_data = read_json_file(args.record) + + # Write to VSCode path + vscode_data = extract_vscode_data(record_data) + vscode_output = Path(args.vscode_path) / 'mcp.json' + write_json_file(vscode_data, str(vscode_output)) + + # Write to continue path + continue_data = extract_continue_data(record_data) + continue_output = Path(args.continue_path) / (continue_data['name'] + '.yaml') + write_yaml_file(continue_data, str(continue_output)) + +if __name__ == '__main__': + main() diff --git a/docs/research/integrations/mcp-to-oasf-agent/context/example.record.json b/docs/research/integrations/mcp-to-oasf-agent/context/example.record.json index 234c418f9..f84b864cc 100644 --- a/docs/research/integrations/mcp-to-oasf-agent/context/example.record.json +++ b/docs/research/integrations/mcp-to-oasf-agent/context/example.record.json @@ -1,89 +1,89 @@ -{ - "name": "organization/agent-name", - "version": "version", - "description": "Full description of the MCP agent, less than 200 characters.", - "authors": [ - "Author/Org name " - ], - "created_at": "current timestamp in RFC 3339 format", - "skills": [ - { - "category_name": "Category name, e.g., Natural Language Processing", - "category_uid": 1, - "class_name": "Class name, e.g., Text Completion", - "class_uid": 10201 - } - ], - "locators": [ - { - "type": "source-code", - "url": "https://ghcr.io/agntcy/dir/agent-name" - } - ], - "extensions": [ - { - "name": "schema.oasf.agntcy.org/features/runtime/mcp", - "version": "v1.0.0", - "data": { - "servers": { - "github": { - "command": "docker", - "args": [ - "run", - "-i", - "--rm", - "-e", - "GITHUB_PERSONAL_ACCESS_TOKEN", - "ghcr.io/github/github-mcp-server" - ], - "env": { - "GITHUB_PERSONAL_ACCESS_TOKEN": "${input:GITHUB_PERSONAL_ACCESS_TOKEN}" - } - } - } - } - }, - { - "name": "schema.oasf.agntcy.org/features/runtime/model", - "version": "v1.0.0", - "data": { - "models": [ - { - "model": "deepseek-r1:1.5b", - "provider": "ollama", - "api_base": "http://localhost:11434", - "prompt": "You are an expert software developer. You give helpful and concise responses. You use typescript and react with next js 14. You prefer arrow functions and more functional programmer." - }, - { - "model": "gpt-4o", - "provider": "azure", - "api_key": "${input:AZURE_OPENAI_API_KEY}", - "api_base": "${input:AZURE_OPENAI_API_BASE}", - "roles": [ - "chat", - "edit", - "apply" - ], - "completion_options": { - "context_length": 128000, - "max_tokens": 16384 - } - } - ] - } - }, - { - "name": "schema.oasf.agntcy.org/features/runtime/prompt", - "version": "v1.0.0", - "data": { - "prompts": [ - { - "name": "Prompt name", - "description": "Prompt summary", - "prompt": "Full example prompt, less than 200 characters." - } - ] - } - } - ] -} +{ + "name": "organization/agent-name", + "version": "version", + "description": "Full description of the MCP agent, less than 200 characters.", + "authors": [ + "Author/Org name " + ], + "created_at": "current timestamp in RFC 3339 format", + "skills": [ + { + "category_name": "Category name, e.g., Natural Language Processing", + "category_uid": 1, + "class_name": "Class name, e.g., Text Completion", + "class_uid": 10201 + } + ], + "locators": [ + { + "type": "source-code", + "url": "https://ghcr.io/agntcy/dir/agent-name" + } + ], + "extensions": [ + { + "name": "schema.oasf.agntcy.org/features/runtime/mcp", + "version": "v1.0.0", + "data": { + "servers": { + "github": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "GITHUB_PERSONAL_ACCESS_TOKEN", + "ghcr.io/github/github-mcp-server" + ], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "${input:GITHUB_PERSONAL_ACCESS_TOKEN}" + } + } + } + } + }, + { + "name": "schema.oasf.agntcy.org/features/runtime/model", + "version": "v1.0.0", + "data": { + "models": [ + { + "model": "deepseek-r1:1.5b", + "provider": "ollama", + "api_base": "http://localhost:11434", + "prompt": "You are an expert software developer. You give helpful and concise responses. You use typescript and react with next js 14. You prefer arrow functions and more functional programmer." + }, + { + "model": "gpt-4o", + "provider": "azure", + "api_key": "${input:AZURE_OPENAI_API_KEY}", + "api_base": "${input:AZURE_OPENAI_API_BASE}", + "roles": [ + "chat", + "edit", + "apply" + ], + "completion_options": { + "context_length": 128000, + "max_tokens": 16384 + } + } + ] + } + }, + { + "name": "schema.oasf.agntcy.org/features/runtime/prompt", + "version": "v1.0.0", + "data": { + "prompts": [ + { + "name": "Prompt name", + "description": "Prompt summary", + "prompt": "Full example prompt, less than 200 characters." + } + ] + } + } + ] +} diff --git a/docs/research/integrations/mcp-to-oasf-agent/context/skills.json b/docs/research/integrations/mcp-to-oasf-agent/context/skills.json index 3b64a05ba..481c6279a 100644 --- a/docs/research/integrations/mcp-to-oasf-agent/context/skills.json +++ b/docs/research/integrations/mcp-to-oasf-agent/context/skills.json @@ -1,823 +1,823 @@ -{ - "nlp": { - "name": "nlp", - "description": "Natural Language Processing (NLP) tasks are the application of computational techniques to the analysis and synthesis of natural language and speech.", - "uid": 1, - "classes": { - "entity_recognition": { - "name": "entity_recognition", - "family": "skill", - "description": "Identifying and categorizing key entities within the text, such as names, dates, or locations.", - "uid": 10103, - "extends": "nlu", - "category_uid": 1, - "caption": "Entity Recognition" - }, - "ethical_interaction": { - "name": "ethical_interaction", - "family": "skill", - "description": "Capabilities for ensuring ethical, unbiased, and safe content generation and interaction.", - "uid": 108, - "extends": "nlp", - "category_uid": 1, - "caption": "Ethical and Safe Interaction" - }, - "named_entity_recognition": { - "name": "named_entity_recognition", - "family": "skill", - "description": "Task to recognize names as entity, for example, people, locations, buildings, and so on.", - "uid": 11101, - "extends": "token_classification", - "category_uid": 1, - "caption": "Named Entity Recognition" - }, - "summarization": { - "name": "summarization", - "family": "skill", - "description": "Condensing longer texts into concise summaries while preserving essential information and maintaining coherence.", - "uid": 10202, - "extends": "natural_language_generation", - "category_uid": 1, - "caption": "Text Summarization" - }, - "token_classification": { - "name": "token_classification", - "family": "skill", - "description": "Capabilities for classifying individual tokens or words within text.", - "uid": 111, - "extends": "nlp", - "category_uid": 1, - "caption": "Token Classification" - }, - "paraphrasing": { - "name": "paraphrasing", - "family": "skill", - "description": "Rewriting text to express the same ideas using different words and structures while maintaining the original meaning.", - "uid": 10203, - "extends": "natural_language_generation", - "category_uid": 1, - "caption": "Text Paraphrasing" - }, - "pos_tagging": { - "name": "pos_tagging", - "family": "skill", - "description": "Tagging each part of a sentence as nouns, adjectives, verbs, and so on.", - "uid": 11102, - "extends": "token_classification", - "category_uid": 1, - "caption": "Part-of-Speech Tagging" - }, - "dialogue_generation": { - "name": "dialogue_generation", - "family": "skill", - "description": "Producing conversational responses that are contextually relevant and engaging within a dialogue context.", - "uid": 10204, - "extends": "natural_language_generation", - "category_uid": 1, - "caption": "Dialogue Generation" - }, - "personalization": { - "name": "personalization", - "family": "skill", - "description": "Capabilities for adapting and personalizing content based on user context and preferences.", - "uid": 106, - "extends": "nlp", - "category_uid": 1, - "caption": "Personalisation and Adaptation" - }, - "multilingual_understanding": { - "name": "multilingual_understanding", - "family": "skill", - "description": "Recognizing and processing text in multiple languages.", - "uid": 10502, - "extends": "language_translation", - "category_uid": 1, - "caption": "Multilingual Understanding" - }, - "question_generation": { - "name": "question_generation", - "family": "skill", - "description": "Automatically generating relevant and meaningful questions from a given text or context.", - "uid": 10205, - "extends": "natural_language_generation", - "category_uid": 1, - "caption": "Question Generation" - }, - "nlu": { - "name": "nlu", - "family": "skill", - "description": "Natural Language Understanding (NLU) focuses on the ability to interpret and comprehend human language, including understanding context, semantics, and identifying key entities within text.", - "uid": 101, - "extends": "nlp", - "category_uid": 1, - "caption": "Natural Language Understanding" - }, - "knowledge_synthesis": { - "name": "knowledge_synthesis", - "family": "skill", - "description": "Capability to aggregate and combine information from multiple sources, creating comprehensive and coherent responses while maintaining context and relevance.", - "uid": 10303, - "extends": "information_retrieval_synthesis", - "category_uid": 1, - "caption": "Knowledge Synthesis" - }, - "storytelling": { - "name": "storytelling", - "family": "skill", - "description": "Creating narratives, stories, or fictional content with creativity and coherence.", - "uid": 10401, - "extends": "creative_content", - "category_uid": 1, - "caption": "Storytelling" - }, - "creative_content": { - "name": "creative_content", - "family": "skill", - "description": "Capabilities for generating various forms of creative content, including narratives, poetry, and other creative writing forms.", - "uid": 104, - "extends": "nlp", - "category_uid": 1, - "caption": "Creative Content Generation" - }, - "feature_extraction": { - "name": "feature_extraction", - "family": "skill", - "description": "Capabilities for extracting and representing textual features as vectors for downstream tasks.", - "uid": 110, - "extends": "nlp", - "category_uid": 1, - "caption": "Feature Extraction" - }, - "bias_mitigation": { - "name": "bias_mitigation", - "family": "skill", - "description": "Reducing or eliminating biased language and ensuring fair and unbiased output.", - "uid": 10801, - "extends": "ethical_interaction", - "category_uid": 1, - "caption": "Bias Mitigation" - }, - "natural_language_inference": { - "name": "natural_language_inference", - "family": "skill", - "description": "Classifying the relation between two texts, like a contradiction, entailment, and others.", - "uid": 10903, - "extends": "text_classification", - "category_uid": 1, - "caption": "Natural Language Inference" - }, - "text_completion": { - "name": "text_completion", - "family": "skill", - "description": "Continuing a given text prompt in a coherent and contextually appropriate manner to generate fluent and contextually relevant content.", - "uid": 10201, - "extends": "natural_language_generation", - "category_uid": 1, - "caption": "Text Completion" - }, - "sentiment_analysis": { - "name": "sentiment_analysis", - "family": "skill", - "description": "Classify the sentiment of a text, that is, a positive movie review.", - "uid": 10902, - "extends": "text_classification", - "category_uid": 1, - "caption": "Sentiment Analysis" - }, - "content_moderation_skill": { - "name": "content_moderation_skill", - "family": "skill", - "description": "Avoiding the generation of harmful, inappropriate, or sensitive content.", - "uid": 10802, - "extends": "ethical_interaction", - "category_uid": 1, - "caption": "Content Moderation" - }, - "inference_deduction": { - "name": "inference_deduction", - "family": "skill", - "description": "Making logical inferences based on provided information.", - "uid": 10701, - "extends": "analytical_reasoning", - "category_uid": 1, - "caption": "Inference and Deduction" - }, - "fact_extraction": { - "name": "fact_extraction", - "family": "skill", - "description": "Capability to identify and extract factual information from text documents or knowledge bases, including entities, relationships, and key data points.", - "uid": 10301, - "extends": "information_retrieval_synthesis", - "category_uid": 1, - "caption": "Fact Extraction" - }, - "information_retrieval_synthesis_search": { - "name": "information_retrieval_synthesis_search", - "family": "skill", - "description": "Capability to perform efficient and accurate searches within large textual databases based on various criteria, including keywords, semantic meaning, or complex queries.", - "uid": 10306, - "extends": "information_retrieval_synthesis", - "category_uid": 1, - "caption": "Search" - }, - "style_adjustment": { - "name": "style_adjustment", - "family": "skill", - "description": "Modifying the tone or style of generated text to suit specific audiences or purposes.", - "uid": 10602, - "extends": "personalization", - "category_uid": 1, - "caption": "Tone and Style Adjustment" - }, - "question_answering": { - "name": "question_answering", - "family": "skill", - "description": "System capability to understand questions and provide accurate, relevant answers by analyzing available information sources.", - "uid": 10302, - "extends": "information_retrieval_synthesis", - "category_uid": 1, - "caption": "Question Answering" - }, - "model_feature_extraction": { - "name": "model_feature_extraction", - "family": "skill", - "description": "Representing parts of text with vectors to be used as input to other tasks.", - "uid": 11001, - "extends": "feature_extraction", - "category_uid": 1, - "caption": "Model Feature Extraction" - }, - "story_generation": { - "name": "story_generation", - "family": "skill", - "description": "Generating a piece of text given a description or a first sentence to complete.", - "uid": 10207, - "extends": "natural_language_generation", - "category_uid": 1, - "caption": "Story Generation" - }, - "language_translation": { - "name": "language_translation", - "family": "skill", - "description": "Capabilities for handling multiple languages, including translation and multilingual text processing.", - "uid": 105, - "extends": "nlp", - "category_uid": 1, - "caption": "Language Translation and Multilingual Support" - }, - "topic_labeling": { - "name": "topic_labeling", - "family": "skill", - "description": "Classifying a text as belong to one of several topics, which can be used to tag a text.", - "uid": 10901, - "extends": "text_classification", - "category_uid": 1, - "caption": "Topic Labelling and Tagging" - }, - "document_passage_retrieval": { - "name": "document_passage_retrieval", - "family": "skill", - "description": "Capability to identify and retrieve relevant documents or text passages based on specific criteria or queries from a larger collection of texts.", - "uid": 10305, - "extends": "information_retrieval_synthesis", - "category_uid": 1, - "caption": "Document and Passage Retrieval" - }, - "problem_solving": { - "name": "problem_solving", - "family": "skill", - "description": "Assisting with solving problems by generating potential solutions or strategies.", - "uid": 10702, - "extends": "analytical_reasoning", - "category_uid": 1, - "caption": "Problem Solving" - }, - "poetry_writing": { - "name": "poetry_writing", - "family": "skill", - "description": "Composing poems, prose, or other forms of creative literature.", - "uid": 10402, - "extends": "creative_content", - "category_uid": 1, - "caption": "Poetry and Creative Writing" - }, - "information_retrieval_synthesis": { - "name": "information_retrieval_synthesis", - "family": "skill", - "description": "Capabilities for retrieving relevant information from various sources and synthesizing it into coherent, contextually appropriate responses. This includes searching, extracting, combining, and presenting information in a meaningful way.", - "uid": 103, - "extends": "nlp", - "category_uid": 1, - "caption": "Information Retrieval and Synthesis" - }, - "text_classification": { - "name": "text_classification", - "family": "skill", - "description": "Capabilities for classifying and categorizing text into predefined categories or labels.", - "uid": 109, - "extends": "nlp", - "category_uid": 1, - "caption": "Text Classification" - }, - "translation": { - "name": "translation", - "family": "skill", - "description": "Converting text from one language to another while maintaining meaning and context.", - "uid": 10501, - "extends": "language_translation", - "category_uid": 1, - "caption": "Translation" - }, - "analytical_reasoning": { - "name": "analytical_reasoning", - "family": "skill", - "description": "Capabilities for performing logical analysis, inference, and problem-solving tasks.", - "uid": 107, - "extends": "nlp", - "category_uid": 1, - "caption": "Analytical and Logical Reasoning" - }, - "contextual_comprehension": { - "name": "contextual_comprehension", - "family": "skill", - "description": "Understanding the context and nuances of text input to provide relevant responses.", - "uid": 10101, - "extends": "nlu", - "category_uid": 1, - "caption": "Contextual Comprehension" - }, - "natural_language_generation": { - "name": "natural_language_generation", - "family": "skill", - "description": "Natural Language Generation (NLG) describes the ability to generate human-like text from structured data or other inputs.", - "uid": 102, - "extends": "nlp", - "category_uid": 1, - "caption": "Natural Language Generation" - }, - "user_adaptation": { - "name": "user_adaptation", - "family": "skill", - "description": "Tailoring responses based on user preferences, history, or context.", - "uid": 10601, - "extends": "personalization", - "category_uid": 1, - "caption": "User Adaptation" - }, - "semantic_understanding": { - "name": "semantic_understanding", - "family": "skill", - "description": "Grasping the meaning and intent behind words and phrases.", - "uid": 10102, - "extends": "nlu", - "category_uid": 1, - "caption": "Semantic Understanding" - }, - "style_transfer": { - "name": "style_transfer", - "family": "skill", - "description": "Rewriting text to match the style of a given reference text while preserving the original content.", - "uid": 10206, - "extends": "natural_language_generation", - "category_uid": 1, - "caption": "Text Style Transfer" - }, - "sentence_similarity": { - "name": "sentence_similarity", - "family": "skill", - "description": "Capability to analyze and determine the semantic similarity between sentences, supporting tasks like search, matching, and content comparison.", - "uid": 10304, - "extends": "information_retrieval_synthesis", - "category_uid": 1, - "caption": "Sentence Similarity" - }, - "fact_verification": { - "name": "fact_verification", - "family": "skill", - "description": "Verifying facts and claims given a reference text.", - "uid": 10703, - "extends": "analytical_reasoning", - "category_uid": 1, - "caption": "Fact and Claim Verification" - } - }, - "caption": "Natural Language Processing" - }, - "images_computer_vision": { - "name": "images_computer_vision", - "description": "Images / Computer Vision tasks are the application of computational techniques to the analysis and synthesis of images.", - "uid": 2, - "classes": { - "depth_estimation": { - "name": "depth_estimation", - "family": "skill", - "description": "Predicting the distance or depth of objects within a scene from a single image or multiple images.", - "uid": 207, - "extends": "images_computer_vision", - "category_uid": 2, - "caption": "Depth Estimation" - }, - "image_classification": { - "name": "image_classification", - "family": "skill", - "description": "Assigning labels or categories to images based on their visual content.", - "uid": 203, - "extends": "images_computer_vision", - "category_uid": 2, - "caption": "Image Classification" - }, - "image_feature_extraction": { - "name": "image_feature_extraction", - "family": "skill", - "description": "Identifying and isolating key characteristics or patterns from an image to aid in tasks like classification or recognition.", - "uid": 208, - "extends": "images_computer_vision", - "category_uid": 2, - "caption": "Image Feature Extraction" - }, - "image_generation": { - "name": "image_generation", - "family": "skill", - "description": "Creating new images from learned patterns or data using machine learning models.", - "uid": 206, - "extends": "images_computer_vision", - "category_uid": 2, - "caption": "Image Generation" - }, - "image_segmentation": { - "name": "image_segmentation", - "family": "skill", - "description": "Assigning labels or categories to images based on their visual content.", - "uid": 201, - "extends": "images_computer_vision", - "category_uid": 2, - "caption": "Image Segmentation" - }, - "image_to_3d": { - "name": "image_to_3d", - "family": "skill", - "description": "The process of converting a 2D image into a 3D representation or model, often by inferring depth and spatial relationships.", - "uid": 211, - "extends": "images_computer_vision", - "category_uid": 2, - "caption": "Image-to-3D" - }, - "image_to_image": { - "name": "image_to_image", - "family": "skill", - "description": "Transforming one image into another using a learned mapping, often for tasks like style transfer, colorization, or image enhancement.", - "uid": 210, - "extends": "images_computer_vision", - "category_uid": 2, - "caption": "Image-to-Image" - }, - "keypoint_detection": { - "name": "keypoint_detection", - "family": "skill", - "description": "Identifying and locating specific points of interest within an image or object.", - "uid": 205, - "extends": "images_computer_vision", - "category_uid": 2, - "caption": "Keypoint Detection" - }, - "mask_generation": { - "name": "mask_generation", - "family": "skill", - "description": "Producing segmented regions in an image to highlight specific areas or objects, typically represented as separate layers or overlays.", - "uid": 209, - "extends": "images_computer_vision", - "category_uid": 2, - "caption": "Mask Generation" - }, - "object_detection": { - "name": "object_detection", - "family": "skill", - "description": "Identifying and locating specific objects within an image or video, often by drawing bounding boxes around them.", - "uid": 204, - "extends": "images_computer_vision", - "category_uid": 2, - "caption": "Object Detection" - }, - "video_classification": { - "name": "video_classification", - "family": "skill", - "description": "Assigning labels or categories to entire videos or segments based on their visual and audio content.", - "uid": 202, - "extends": "images_computer_vision", - "category_uid": 2, - "caption": "Video Classification" - } - }, - "caption": "Images / Computer Vision" - }, - "audio": { - "name": "audio", - "description": "Audio tasks are the application of computational techniques to the analysis and synthesis of audio data.", - "uid": 3, - "classes": { - "audio_classification": { - "name": "audio_classification", - "family": "skill", - "description": "Assigning labels or classes to audio content based on its characteristics.", - "uid": 301, - "extends": "audio", - "category_uid": 3, - "caption": "Audio Classification" - }, - "audio_to_audio": { - "name": "audio_to_audio", - "family": "skill", - "description": "Transforming audio through various manipulations including cutting, filtering, and mixing.", - "uid": 302, - "extends": "audio", - "category_uid": 3, - "caption": "Audio to Audio" - } - }, - "caption": "Audio" - }, - "tabular_text": { - "name": "tabular_text", - "description": "Tabular / Text tasks are the application of computational techniques to the analysis and synthesis of tabular data and text.", - "uid": 4, - "classes": { - "tabular_classification": { - "name": "tabular_classification", - "family": "skill", - "description": "Classifying data based on attributes using classical machine learning approaches.", - "uid": 401, - "extends": "tabular_text", - "category_uid": 4, - "caption": "Tabular Classification" - }, - "tabular_regression": { - "name": "tabular_regression", - "family": "skill", - "description": "Predicting numerical values based on tabular attributes and features.", - "uid": 402, - "extends": "tabular_text", - "category_uid": 4, - "caption": "Tabular Regression" - } - }, - "caption": "Tabular / Text" - }, - "analytical_skills": { - "name": "analytical_skills", - "description": "Analytical skills encompass a range of capabilities that involve logical reasoning, problem-solving, and the ability to process and interpret complex data.", - "uid": 5, - "classes": { - "code_optimization": { - "name": "code_optimization", - "family": "skill", - "description": "Rewriting and optimizing existing code through refactoring techniques.", - "uid": 50204, - "extends": "coding_skills", - "category_uid": 5, - "caption": "Code Refactoring and Optimization" - }, - "code_templates": { - "name": "code_templates", - "family": "skill", - "description": "Automatically filling in code templates with appropriate content.", - "uid": 50203, - "extends": "coding_skills", - "category_uid": 5, - "caption": "Code Template Filling" - }, - "code_to_docstrings": { - "name": "code_to_docstrings", - "family": "skill", - "description": "Generating natural language documentation for code segments.", - "uid": 50202, - "extends": "coding_skills", - "category_uid": 5, - "caption": "Code to Docstrings" - }, - "coding_skills": { - "name": "coding_skills", - "family": "skill", - "description": "Capabilities for code generation, documentation, and optimization.", - "uid": 502, - "extends": "analytical_skills", - "category_uid": 5, - "caption": "Coding Skills" - }, - "text_to_code": { - "name": "text_to_code", - "family": "skill", - "description": "Translating natural language instructions into executable code.", - "uid": 50201, - "extends": "coding_skills", - "category_uid": 5, - "caption": "Text to Code" - }, - "geometry": { - "name": "geometry", - "family": "skill", - "description": "Solving geometric problems and spatial reasoning tasks.", - "uid": 50103, - "extends": "mathematical_reasoning", - "category_uid": 5, - "caption": "Geometry" - }, - "math_word_problems": { - "name": "math_word_problems", - "family": "skill", - "description": "Solving mathematical exercises presented in natural language format.", - "uid": 50102, - "extends": "mathematical_reasoning", - "category_uid": 5, - "caption": "Math Word Problems" - }, - "mathematical_reasoning": { - "name": "mathematical_reasoning", - "family": "skill", - "description": "Capabilities for solving mathematical problems and proving theorems.", - "uid": 501, - "extends": "analytical_skills", - "category_uid": 5, - "caption": "Mathematical Reasoning" - }, - "pure_math_operations": { - "name": "pure_math_operations", - "family": "skill", - "description": "Executing pure mathematical operations, such as arithmetic calculations.", - "uid": 50101, - "extends": "mathematical_reasoning", - "category_uid": 5, - "caption": "Pure Mathematical Operations" - }, - "theorem_proving": { - "name": "theorem_proving", - "family": "skill", - "description": "Proving mathematical theorems using computational methods.", - "uid": 50104, - "extends": "mathematical_reasoning", - "category_uid": 5, - "caption": "Automated Theorem Proving" - } - }, - "caption": "Analytical skills" - }, - "retrieval_augmented_generation": { - "name": "retrieval_augmented_generation", - "description": "Retrieval Augmented Generation tasks are the application of computational techniques to the analysis and synthesis of data from multiple modalities.", - "uid": 6, - "classes": { - "document_or_database_question_answering": { - "name": "document_or_database_question_answering", - "family": "skill", - "description": "Document or database question answering is the process of retrieving and using information from a document or database to answer a specific question.", - "uid": 602, - "extends": "retrieval_augmented_generation", - "category_uid": 6, - "caption": "Document or Database Question Answering" - }, - "generation_of_any": { - "name": "generation_of_any", - "family": "skill", - "description": "Generation of any is augmenting the creation of text, images, audio, or other media by incorporating retrieved information to improve or guide the generation process.", - "uid": 603, - "extends": "retrieval_augmented_generation", - "category_uid": 6, - "caption": "Generation of Any" - }, - "document_retrieval": { - "name": "document_retrieval", - "family": "skill", - "description": "Document retrieval is the process of retrieving relevant documents from a collection based on a specific query, typically through indexing and search techniques.", - "uid": 60103, - "extends": "retrieval_of_information", - "category_uid": 6, - "caption": "Document Retrieval" - }, - "indexing": { - "name": "indexing", - "family": "skill", - "description": "Depth estimations the task of predicting the distance or depth of objects within a scene from a single image or multiple images.", - "uid": 60101, - "extends": "retrieval_of_information", - "category_uid": 6, - "caption": "Indexing" - }, - "retrieval_of_information": { - "name": "retrieval_of_information", - "family": "skill", - "description": "Retrieval of information is the process of fetching relevant data or documents from a large dataset or database based on a specific query or input.", - "uid": 601, - "extends": "retrieval_augmented_generation", - "category_uid": 6, - "caption": "Retrieval of information" - }, - "retrieval_of_information_search": { - "name": "retrieval_of_information_search", - "family": "skill", - "description": "Search is the process of exploring a dataset or index to find relevant information or results based on a given query.", - "uid": 60102, - "extends": "retrieval_of_information", - "category_uid": 6, - "caption": "Search" - } - }, - "caption": "Retrieval Augmented Generation" - }, - "multi_modal": { - "name": "multi_modal", - "description": "Multi-modal tasks are the application of computational techniques to the analysis and synthesis of data from multiple modalities.", - "uid": 7, - "classes": { - "any_to_any": { - "name": "any_to_any", - "family": "skill", - "description": "Converting between any supported modalities (text, image, audio, video, or 3D).", - "uid": 703, - "extends": "multi_modal", - "category_uid": 7, - "caption": "Any to Any Transformation" - }, - "audio_processing": { - "name": "audio_processing", - "family": "skill", - "description": "Capabilities for processing audio, including speech synthesis and recognition.", - "uid": 702, - "extends": "multi_modal", - "category_uid": 7, - "caption": "Audio Processing" - }, - "speech_recognition": { - "name": "speech_recognition", - "family": "skill", - "description": "Converting spoken language into written text.", - "uid": 70202, - "extends": "audio_processing", - "category_uid": 7, - "caption": "Automatic Speech Recognition" - }, - "text_to_speech": { - "name": "text_to_speech", - "family": "skill", - "description": "Converting text into natural-sounding speech audio.", - "uid": 70201, - "extends": "audio_processing", - "category_uid": 7, - "caption": "Text to Speech" - }, - "image_processing": { - "name": "image_processing", - "family": "skill", - "description": "Capabilities for processing and generating images from various inputs and generating textual descriptions of visual content.", - "uid": 701, - "extends": "multi_modal", - "category_uid": 7, - "caption": "Image Processing" - }, - "image_to_text": { - "name": "image_to_text", - "family": "skill", - "description": "Generating textual descriptions or captions for images.", - "uid": 70101, - "extends": "image_processing", - "category_uid": 7, - "caption": "Image to Text" - }, - "text_to_3d": { - "name": "text_to_3d", - "family": "skill", - "description": "Generating 3D objects or scenes based on textual descriptions.", - "uid": 70104, - "extends": "image_processing", - "category_uid": 7, - "caption": "Text to 3D" - }, - "text_to_image": { - "name": "text_to_image", - "family": "skill", - "description": "Generating images based on textual descriptions or instructions.", - "uid": 70102, - "extends": "image_processing", - "category_uid": 7, - "caption": "Text to Image" - }, - "text_to_video": { - "name": "text_to_video", - "family": "skill", - "description": "Generating video content based on textual descriptions or instructions.", - "uid": 70103, - "extends": "image_processing", - "category_uid": 7, - "caption": "Text to Video" - }, - "visual_qa": { - "name": "visual_qa", - "family": "skill", - "description": "Answering questions about images using natural language.", - "uid": 70105, - "extends": "image_processing", - "category_uid": 7, - "caption": "Visual Question Answering" - } - }, - "caption": "Multi-modal" - } -} +{ + "nlp": { + "name": "nlp", + "description": "Natural Language Processing (NLP) tasks are the application of computational techniques to the analysis and synthesis of natural language and speech.", + "uid": 1, + "classes": { + "entity_recognition": { + "name": "entity_recognition", + "family": "skill", + "description": "Identifying and categorizing key entities within the text, such as names, dates, or locations.", + "uid": 10103, + "extends": "nlu", + "category_uid": 1, + "caption": "Entity Recognition" + }, + "ethical_interaction": { + "name": "ethical_interaction", + "family": "skill", + "description": "Capabilities for ensuring ethical, unbiased, and safe content generation and interaction.", + "uid": 108, + "extends": "nlp", + "category_uid": 1, + "caption": "Ethical and Safe Interaction" + }, + "named_entity_recognition": { + "name": "named_entity_recognition", + "family": "skill", + "description": "Task to recognize names as entity, for example, people, locations, buildings, and so on.", + "uid": 11101, + "extends": "token_classification", + "category_uid": 1, + "caption": "Named Entity Recognition" + }, + "summarization": { + "name": "summarization", + "family": "skill", + "description": "Condensing longer texts into concise summaries while preserving essential information and maintaining coherence.", + "uid": 10202, + "extends": "natural_language_generation", + "category_uid": 1, + "caption": "Text Summarization" + }, + "token_classification": { + "name": "token_classification", + "family": "skill", + "description": "Capabilities for classifying individual tokens or words within text.", + "uid": 111, + "extends": "nlp", + "category_uid": 1, + "caption": "Token Classification" + }, + "paraphrasing": { + "name": "paraphrasing", + "family": "skill", + "description": "Rewriting text to express the same ideas using different words and structures while maintaining the original meaning.", + "uid": 10203, + "extends": "natural_language_generation", + "category_uid": 1, + "caption": "Text Paraphrasing" + }, + "pos_tagging": { + "name": "pos_tagging", + "family": "skill", + "description": "Tagging each part of a sentence as nouns, adjectives, verbs, and so on.", + "uid": 11102, + "extends": "token_classification", + "category_uid": 1, + "caption": "Part-of-Speech Tagging" + }, + "dialogue_generation": { + "name": "dialogue_generation", + "family": "skill", + "description": "Producing conversational responses that are contextually relevant and engaging within a dialogue context.", + "uid": 10204, + "extends": "natural_language_generation", + "category_uid": 1, + "caption": "Dialogue Generation" + }, + "personalization": { + "name": "personalization", + "family": "skill", + "description": "Capabilities for adapting and personalizing content based on user context and preferences.", + "uid": 106, + "extends": "nlp", + "category_uid": 1, + "caption": "Personalisation and Adaptation" + }, + "multilingual_understanding": { + "name": "multilingual_understanding", + "family": "skill", + "description": "Recognizing and processing text in multiple languages.", + "uid": 10502, + "extends": "language_translation", + "category_uid": 1, + "caption": "Multilingual Understanding" + }, + "question_generation": { + "name": "question_generation", + "family": "skill", + "description": "Automatically generating relevant and meaningful questions from a given text or context.", + "uid": 10205, + "extends": "natural_language_generation", + "category_uid": 1, + "caption": "Question Generation" + }, + "nlu": { + "name": "nlu", + "family": "skill", + "description": "Natural Language Understanding (NLU) focuses on the ability to interpret and comprehend human language, including understanding context, semantics, and identifying key entities within text.", + "uid": 101, + "extends": "nlp", + "category_uid": 1, + "caption": "Natural Language Understanding" + }, + "knowledge_synthesis": { + "name": "knowledge_synthesis", + "family": "skill", + "description": "Capability to aggregate and combine information from multiple sources, creating comprehensive and coherent responses while maintaining context and relevance.", + "uid": 10303, + "extends": "information_retrieval_synthesis", + "category_uid": 1, + "caption": "Knowledge Synthesis" + }, + "storytelling": { + "name": "storytelling", + "family": "skill", + "description": "Creating narratives, stories, or fictional content with creativity and coherence.", + "uid": 10401, + "extends": "creative_content", + "category_uid": 1, + "caption": "Storytelling" + }, + "creative_content": { + "name": "creative_content", + "family": "skill", + "description": "Capabilities for generating various forms of creative content, including narratives, poetry, and other creative writing forms.", + "uid": 104, + "extends": "nlp", + "category_uid": 1, + "caption": "Creative Content Generation" + }, + "feature_extraction": { + "name": "feature_extraction", + "family": "skill", + "description": "Capabilities for extracting and representing textual features as vectors for downstream tasks.", + "uid": 110, + "extends": "nlp", + "category_uid": 1, + "caption": "Feature Extraction" + }, + "bias_mitigation": { + "name": "bias_mitigation", + "family": "skill", + "description": "Reducing or eliminating biased language and ensuring fair and unbiased output.", + "uid": 10801, + "extends": "ethical_interaction", + "category_uid": 1, + "caption": "Bias Mitigation" + }, + "natural_language_inference": { + "name": "natural_language_inference", + "family": "skill", + "description": "Classifying the relation between two texts, like a contradiction, entailment, and others.", + "uid": 10903, + "extends": "text_classification", + "category_uid": 1, + "caption": "Natural Language Inference" + }, + "text_completion": { + "name": "text_completion", + "family": "skill", + "description": "Continuing a given text prompt in a coherent and contextually appropriate manner to generate fluent and contextually relevant content.", + "uid": 10201, + "extends": "natural_language_generation", + "category_uid": 1, + "caption": "Text Completion" + }, + "sentiment_analysis": { + "name": "sentiment_analysis", + "family": "skill", + "description": "Classify the sentiment of a text, that is, a positive movie review.", + "uid": 10902, + "extends": "text_classification", + "category_uid": 1, + "caption": "Sentiment Analysis" + }, + "content_moderation_skill": { + "name": "content_moderation_skill", + "family": "skill", + "description": "Avoiding the generation of harmful, inappropriate, or sensitive content.", + "uid": 10802, + "extends": "ethical_interaction", + "category_uid": 1, + "caption": "Content Moderation" + }, + "inference_deduction": { + "name": "inference_deduction", + "family": "skill", + "description": "Making logical inferences based on provided information.", + "uid": 10701, + "extends": "analytical_reasoning", + "category_uid": 1, + "caption": "Inference and Deduction" + }, + "fact_extraction": { + "name": "fact_extraction", + "family": "skill", + "description": "Capability to identify and extract factual information from text documents or knowledge bases, including entities, relationships, and key data points.", + "uid": 10301, + "extends": "information_retrieval_synthesis", + "category_uid": 1, + "caption": "Fact Extraction" + }, + "information_retrieval_synthesis_search": { + "name": "information_retrieval_synthesis_search", + "family": "skill", + "description": "Capability to perform efficient and accurate searches within large textual databases based on various criteria, including keywords, semantic meaning, or complex queries.", + "uid": 10306, + "extends": "information_retrieval_synthesis", + "category_uid": 1, + "caption": "Search" + }, + "style_adjustment": { + "name": "style_adjustment", + "family": "skill", + "description": "Modifying the tone or style of generated text to suit specific audiences or purposes.", + "uid": 10602, + "extends": "personalization", + "category_uid": 1, + "caption": "Tone and Style Adjustment" + }, + "question_answering": { + "name": "question_answering", + "family": "skill", + "description": "System capability to understand questions and provide accurate, relevant answers by analyzing available information sources.", + "uid": 10302, + "extends": "information_retrieval_synthesis", + "category_uid": 1, + "caption": "Question Answering" + }, + "model_feature_extraction": { + "name": "model_feature_extraction", + "family": "skill", + "description": "Representing parts of text with vectors to be used as input to other tasks.", + "uid": 11001, + "extends": "feature_extraction", + "category_uid": 1, + "caption": "Model Feature Extraction" + }, + "story_generation": { + "name": "story_generation", + "family": "skill", + "description": "Generating a piece of text given a description or a first sentence to complete.", + "uid": 10207, + "extends": "natural_language_generation", + "category_uid": 1, + "caption": "Story Generation" + }, + "language_translation": { + "name": "language_translation", + "family": "skill", + "description": "Capabilities for handling multiple languages, including translation and multilingual text processing.", + "uid": 105, + "extends": "nlp", + "category_uid": 1, + "caption": "Language Translation and Multilingual Support" + }, + "topic_labeling": { + "name": "topic_labeling", + "family": "skill", + "description": "Classifying a text as belong to one of several topics, which can be used to tag a text.", + "uid": 10901, + "extends": "text_classification", + "category_uid": 1, + "caption": "Topic Labelling and Tagging" + }, + "document_passage_retrieval": { + "name": "document_passage_retrieval", + "family": "skill", + "description": "Capability to identify and retrieve relevant documents or text passages based on specific criteria or queries from a larger collection of texts.", + "uid": 10305, + "extends": "information_retrieval_synthesis", + "category_uid": 1, + "caption": "Document and Passage Retrieval" + }, + "problem_solving": { + "name": "problem_solving", + "family": "skill", + "description": "Assisting with solving problems by generating potential solutions or strategies.", + "uid": 10702, + "extends": "analytical_reasoning", + "category_uid": 1, + "caption": "Problem Solving" + }, + "poetry_writing": { + "name": "poetry_writing", + "family": "skill", + "description": "Composing poems, prose, or other forms of creative literature.", + "uid": 10402, + "extends": "creative_content", + "category_uid": 1, + "caption": "Poetry and Creative Writing" + }, + "information_retrieval_synthesis": { + "name": "information_retrieval_synthesis", + "family": "skill", + "description": "Capabilities for retrieving relevant information from various sources and synthesizing it into coherent, contextually appropriate responses. This includes searching, extracting, combining, and presenting information in a meaningful way.", + "uid": 103, + "extends": "nlp", + "category_uid": 1, + "caption": "Information Retrieval and Synthesis" + }, + "text_classification": { + "name": "text_classification", + "family": "skill", + "description": "Capabilities for classifying and categorizing text into predefined categories or labels.", + "uid": 109, + "extends": "nlp", + "category_uid": 1, + "caption": "Text Classification" + }, + "translation": { + "name": "translation", + "family": "skill", + "description": "Converting text from one language to another while maintaining meaning and context.", + "uid": 10501, + "extends": "language_translation", + "category_uid": 1, + "caption": "Translation" + }, + "analytical_reasoning": { + "name": "analytical_reasoning", + "family": "skill", + "description": "Capabilities for performing logical analysis, inference, and problem-solving tasks.", + "uid": 107, + "extends": "nlp", + "category_uid": 1, + "caption": "Analytical and Logical Reasoning" + }, + "contextual_comprehension": { + "name": "contextual_comprehension", + "family": "skill", + "description": "Understanding the context and nuances of text input to provide relevant responses.", + "uid": 10101, + "extends": "nlu", + "category_uid": 1, + "caption": "Contextual Comprehension" + }, + "natural_language_generation": { + "name": "natural_language_generation", + "family": "skill", + "description": "Natural Language Generation (NLG) describes the ability to generate human-like text from structured data or other inputs.", + "uid": 102, + "extends": "nlp", + "category_uid": 1, + "caption": "Natural Language Generation" + }, + "user_adaptation": { + "name": "user_adaptation", + "family": "skill", + "description": "Tailoring responses based on user preferences, history, or context.", + "uid": 10601, + "extends": "personalization", + "category_uid": 1, + "caption": "User Adaptation" + }, + "semantic_understanding": { + "name": "semantic_understanding", + "family": "skill", + "description": "Grasping the meaning and intent behind words and phrases.", + "uid": 10102, + "extends": "nlu", + "category_uid": 1, + "caption": "Semantic Understanding" + }, + "style_transfer": { + "name": "style_transfer", + "family": "skill", + "description": "Rewriting text to match the style of a given reference text while preserving the original content.", + "uid": 10206, + "extends": "natural_language_generation", + "category_uid": 1, + "caption": "Text Style Transfer" + }, + "sentence_similarity": { + "name": "sentence_similarity", + "family": "skill", + "description": "Capability to analyze and determine the semantic similarity between sentences, supporting tasks like search, matching, and content comparison.", + "uid": 10304, + "extends": "information_retrieval_synthesis", + "category_uid": 1, + "caption": "Sentence Similarity" + }, + "fact_verification": { + "name": "fact_verification", + "family": "skill", + "description": "Verifying facts and claims given a reference text.", + "uid": 10703, + "extends": "analytical_reasoning", + "category_uid": 1, + "caption": "Fact and Claim Verification" + } + }, + "caption": "Natural Language Processing" + }, + "images_computer_vision": { + "name": "images_computer_vision", + "description": "Images / Computer Vision tasks are the application of computational techniques to the analysis and synthesis of images.", + "uid": 2, + "classes": { + "depth_estimation": { + "name": "depth_estimation", + "family": "skill", + "description": "Predicting the distance or depth of objects within a scene from a single image or multiple images.", + "uid": 207, + "extends": "images_computer_vision", + "category_uid": 2, + "caption": "Depth Estimation" + }, + "image_classification": { + "name": "image_classification", + "family": "skill", + "description": "Assigning labels or categories to images based on their visual content.", + "uid": 203, + "extends": "images_computer_vision", + "category_uid": 2, + "caption": "Image Classification" + }, + "image_feature_extraction": { + "name": "image_feature_extraction", + "family": "skill", + "description": "Identifying and isolating key characteristics or patterns from an image to aid in tasks like classification or recognition.", + "uid": 208, + "extends": "images_computer_vision", + "category_uid": 2, + "caption": "Image Feature Extraction" + }, + "image_generation": { + "name": "image_generation", + "family": "skill", + "description": "Creating new images from learned patterns or data using machine learning models.", + "uid": 206, + "extends": "images_computer_vision", + "category_uid": 2, + "caption": "Image Generation" + }, + "image_segmentation": { + "name": "image_segmentation", + "family": "skill", + "description": "Assigning labels or categories to images based on their visual content.", + "uid": 201, + "extends": "images_computer_vision", + "category_uid": 2, + "caption": "Image Segmentation" + }, + "image_to_3d": { + "name": "image_to_3d", + "family": "skill", + "description": "The process of converting a 2D image into a 3D representation or model, often by inferring depth and spatial relationships.", + "uid": 211, + "extends": "images_computer_vision", + "category_uid": 2, + "caption": "Image-to-3D" + }, + "image_to_image": { + "name": "image_to_image", + "family": "skill", + "description": "Transforming one image into another using a learned mapping, often for tasks like style transfer, colorization, or image enhancement.", + "uid": 210, + "extends": "images_computer_vision", + "category_uid": 2, + "caption": "Image-to-Image" + }, + "keypoint_detection": { + "name": "keypoint_detection", + "family": "skill", + "description": "Identifying and locating specific points of interest within an image or object.", + "uid": 205, + "extends": "images_computer_vision", + "category_uid": 2, + "caption": "Keypoint Detection" + }, + "mask_generation": { + "name": "mask_generation", + "family": "skill", + "description": "Producing segmented regions in an image to highlight specific areas or objects, typically represented as separate layers or overlays.", + "uid": 209, + "extends": "images_computer_vision", + "category_uid": 2, + "caption": "Mask Generation" + }, + "object_detection": { + "name": "object_detection", + "family": "skill", + "description": "Identifying and locating specific objects within an image or video, often by drawing bounding boxes around them.", + "uid": 204, + "extends": "images_computer_vision", + "category_uid": 2, + "caption": "Object Detection" + }, + "video_classification": { + "name": "video_classification", + "family": "skill", + "description": "Assigning labels or categories to entire videos or segments based on their visual and audio content.", + "uid": 202, + "extends": "images_computer_vision", + "category_uid": 2, + "caption": "Video Classification" + } + }, + "caption": "Images / Computer Vision" + }, + "audio": { + "name": "audio", + "description": "Audio tasks are the application of computational techniques to the analysis and synthesis of audio data.", + "uid": 3, + "classes": { + "audio_classification": { + "name": "audio_classification", + "family": "skill", + "description": "Assigning labels or classes to audio content based on its characteristics.", + "uid": 301, + "extends": "audio", + "category_uid": 3, + "caption": "Audio Classification" + }, + "audio_to_audio": { + "name": "audio_to_audio", + "family": "skill", + "description": "Transforming audio through various manipulations including cutting, filtering, and mixing.", + "uid": 302, + "extends": "audio", + "category_uid": 3, + "caption": "Audio to Audio" + } + }, + "caption": "Audio" + }, + "tabular_text": { + "name": "tabular_text", + "description": "Tabular / Text tasks are the application of computational techniques to the analysis and synthesis of tabular data and text.", + "uid": 4, + "classes": { + "tabular_classification": { + "name": "tabular_classification", + "family": "skill", + "description": "Classifying data based on attributes using classical machine learning approaches.", + "uid": 401, + "extends": "tabular_text", + "category_uid": 4, + "caption": "Tabular Classification" + }, + "tabular_regression": { + "name": "tabular_regression", + "family": "skill", + "description": "Predicting numerical values based on tabular attributes and features.", + "uid": 402, + "extends": "tabular_text", + "category_uid": 4, + "caption": "Tabular Regression" + } + }, + "caption": "Tabular / Text" + }, + "analytical_skills": { + "name": "analytical_skills", + "description": "Analytical skills encompass a range of capabilities that involve logical reasoning, problem-solving, and the ability to process and interpret complex data.", + "uid": 5, + "classes": { + "code_optimization": { + "name": "code_optimization", + "family": "skill", + "description": "Rewriting and optimizing existing code through refactoring techniques.", + "uid": 50204, + "extends": "coding_skills", + "category_uid": 5, + "caption": "Code Refactoring and Optimization" + }, + "code_templates": { + "name": "code_templates", + "family": "skill", + "description": "Automatically filling in code templates with appropriate content.", + "uid": 50203, + "extends": "coding_skills", + "category_uid": 5, + "caption": "Code Template Filling" + }, + "code_to_docstrings": { + "name": "code_to_docstrings", + "family": "skill", + "description": "Generating natural language documentation for code segments.", + "uid": 50202, + "extends": "coding_skills", + "category_uid": 5, + "caption": "Code to Docstrings" + }, + "coding_skills": { + "name": "coding_skills", + "family": "skill", + "description": "Capabilities for code generation, documentation, and optimization.", + "uid": 502, + "extends": "analytical_skills", + "category_uid": 5, + "caption": "Coding Skills" + }, + "text_to_code": { + "name": "text_to_code", + "family": "skill", + "description": "Translating natural language instructions into executable code.", + "uid": 50201, + "extends": "coding_skills", + "category_uid": 5, + "caption": "Text to Code" + }, + "geometry": { + "name": "geometry", + "family": "skill", + "description": "Solving geometric problems and spatial reasoning tasks.", + "uid": 50103, + "extends": "mathematical_reasoning", + "category_uid": 5, + "caption": "Geometry" + }, + "math_word_problems": { + "name": "math_word_problems", + "family": "skill", + "description": "Solving mathematical exercises presented in natural language format.", + "uid": 50102, + "extends": "mathematical_reasoning", + "category_uid": 5, + "caption": "Math Word Problems" + }, + "mathematical_reasoning": { + "name": "mathematical_reasoning", + "family": "skill", + "description": "Capabilities for solving mathematical problems and proving theorems.", + "uid": 501, + "extends": "analytical_skills", + "category_uid": 5, + "caption": "Mathematical Reasoning" + }, + "pure_math_operations": { + "name": "pure_math_operations", + "family": "skill", + "description": "Executing pure mathematical operations, such as arithmetic calculations.", + "uid": 50101, + "extends": "mathematical_reasoning", + "category_uid": 5, + "caption": "Pure Mathematical Operations" + }, + "theorem_proving": { + "name": "theorem_proving", + "family": "skill", + "description": "Proving mathematical theorems using computational methods.", + "uid": 50104, + "extends": "mathematical_reasoning", + "category_uid": 5, + "caption": "Automated Theorem Proving" + } + }, + "caption": "Analytical skills" + }, + "retrieval_augmented_generation": { + "name": "retrieval_augmented_generation", + "description": "Retrieval Augmented Generation tasks are the application of computational techniques to the analysis and synthesis of data from multiple modalities.", + "uid": 6, + "classes": { + "document_or_database_question_answering": { + "name": "document_or_database_question_answering", + "family": "skill", + "description": "Document or database question answering is the process of retrieving and using information from a document or database to answer a specific question.", + "uid": 602, + "extends": "retrieval_augmented_generation", + "category_uid": 6, + "caption": "Document or Database Question Answering" + }, + "generation_of_any": { + "name": "generation_of_any", + "family": "skill", + "description": "Generation of any is augmenting the creation of text, images, audio, or other media by incorporating retrieved information to improve or guide the generation process.", + "uid": 603, + "extends": "retrieval_augmented_generation", + "category_uid": 6, + "caption": "Generation of Any" + }, + "document_retrieval": { + "name": "document_retrieval", + "family": "skill", + "description": "Document retrieval is the process of retrieving relevant documents from a collection based on a specific query, typically through indexing and search techniques.", + "uid": 60103, + "extends": "retrieval_of_information", + "category_uid": 6, + "caption": "Document Retrieval" + }, + "indexing": { + "name": "indexing", + "family": "skill", + "description": "Depth estimations the task of predicting the distance or depth of objects within a scene from a single image or multiple images.", + "uid": 60101, + "extends": "retrieval_of_information", + "category_uid": 6, + "caption": "Indexing" + }, + "retrieval_of_information": { + "name": "retrieval_of_information", + "family": "skill", + "description": "Retrieval of information is the process of fetching relevant data or documents from a large dataset or database based on a specific query or input.", + "uid": 601, + "extends": "retrieval_augmented_generation", + "category_uid": 6, + "caption": "Retrieval of information" + }, + "retrieval_of_information_search": { + "name": "retrieval_of_information_search", + "family": "skill", + "description": "Search is the process of exploring a dataset or index to find relevant information or results based on a given query.", + "uid": 60102, + "extends": "retrieval_of_information", + "category_uid": 6, + "caption": "Search" + } + }, + "caption": "Retrieval Augmented Generation" + }, + "multi_modal": { + "name": "multi_modal", + "description": "Multi-modal tasks are the application of computational techniques to the analysis and synthesis of data from multiple modalities.", + "uid": 7, + "classes": { + "any_to_any": { + "name": "any_to_any", + "family": "skill", + "description": "Converting between any supported modalities (text, image, audio, video, or 3D).", + "uid": 703, + "extends": "multi_modal", + "category_uid": 7, + "caption": "Any to Any Transformation" + }, + "audio_processing": { + "name": "audio_processing", + "family": "skill", + "description": "Capabilities for processing audio, including speech synthesis and recognition.", + "uid": 702, + "extends": "multi_modal", + "category_uid": 7, + "caption": "Audio Processing" + }, + "speech_recognition": { + "name": "speech_recognition", + "family": "skill", + "description": "Converting spoken language into written text.", + "uid": 70202, + "extends": "audio_processing", + "category_uid": 7, + "caption": "Automatic Speech Recognition" + }, + "text_to_speech": { + "name": "text_to_speech", + "family": "skill", + "description": "Converting text into natural-sounding speech audio.", + "uid": 70201, + "extends": "audio_processing", + "category_uid": 7, + "caption": "Text to Speech" + }, + "image_processing": { + "name": "image_processing", + "family": "skill", + "description": "Capabilities for processing and generating images from various inputs and generating textual descriptions of visual content.", + "uid": 701, + "extends": "multi_modal", + "category_uid": 7, + "caption": "Image Processing" + }, + "image_to_text": { + "name": "image_to_text", + "family": "skill", + "description": "Generating textual descriptions or captions for images.", + "uid": 70101, + "extends": "image_processing", + "category_uid": 7, + "caption": "Image to Text" + }, + "text_to_3d": { + "name": "text_to_3d", + "family": "skill", + "description": "Generating 3D objects or scenes based on textual descriptions.", + "uid": 70104, + "extends": "image_processing", + "category_uid": 7, + "caption": "Text to 3D" + }, + "text_to_image": { + "name": "text_to_image", + "family": "skill", + "description": "Generating images based on textual descriptions or instructions.", + "uid": 70102, + "extends": "image_processing", + "category_uid": 7, + "caption": "Text to Image" + }, + "text_to_video": { + "name": "text_to_video", + "family": "skill", + "description": "Generating video content based on textual descriptions or instructions.", + "uid": 70103, + "extends": "image_processing", + "category_uid": 7, + "caption": "Text to Video" + }, + "visual_qa": { + "name": "visual_qa", + "family": "skill", + "description": "Answering questions about images using natural language.", + "uid": 70105, + "extends": "image_processing", + "category_uid": 7, + "caption": "Visual Question Answering" + } + }, + "caption": "Multi-modal" + } +} diff --git a/docs/research/integrations/mcp-to-oasf-agent/extracted-records/aws.record.json b/docs/research/integrations/mcp-to-oasf-agent/extracted-records/aws.record.json index 4817cb2af..0bf23515e 100644 --- a/docs/research/integrations/mcp-to-oasf-agent/extracted-records/aws.record.json +++ b/docs/research/integrations/mcp-to-oasf-agent/extracted-records/aws.record.json @@ -1,84 +1,84 @@ -{ - "name": "awslabs/aws-mcp-server", - "version": "1.0.0", - "description": "Official AWS MCP server that provides access to Amazon Web Services including EC2, S3, Lambda, and other cloud services.", - "authors": [ - "Amazon Web Services " - ], - "created_at": "2023-08-17T09:15:00Z", - "skills": [ - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Text Completion", - "class_uid": 10201 - }, - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Question Answering", - "class_uid": 10302 - }, - { - "category_name": "Analytical skills", - "category_uid": 5, - "class_name": "Coding Skills", - "class_uid": 502 - }, - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Problem Solving", - "class_uid": 10702 - } - ], - "locators": [ - { - "type": "source-code", - "url": "https://github.com/awslabs/aws-mcp-server" - } - ], - "extensions": [ - { - "name": "schema.oasf.agntcy.org/features/runtime/mcp", - "version": "v1.0.0", - "data": { - "servers": { - "aws": { - "command": "docker", - "args": [ - "run", - "-i", - "--rm", - "-e", - "AWS_ACCESS_KEY_ID", - "-e", - "AWS_SECRET_ACCESS_KEY", - "-e", - "AWS_REGION", - "ghcr.io/awslabs/aws-mcp-server" - ], - "env": { - "AWS_ACCESS_KEY_ID": "${input:AWS_ACCESS_KEY_ID}", - "AWS_SECRET_ACCESS_KEY": "${input:AWS_SECRET_ACCESS_KEY}", - "AWS_REGION": "${input:AWS_REGION}" - } - } - } - } - }, - { - "name": "schema.oasf.agntcy.org/features/runtime/prompt", - "version": "v1.0.0", - "data": { - "prompts": [ - { - "name": "AWS Cloud Assistant", - "description": "AWS cloud services assistant", - "prompt": "You are an AWS cloud assistant. Help users manage AWS resources, design cloud architectures, and troubleshoot AWS service issues." - } - ] - } - } - ] -} +{ + "name": "awslabs/aws-mcp-server", + "version": "1.0.0", + "description": "Official AWS MCP server that provides access to Amazon Web Services including EC2, S3, Lambda, and other cloud services.", + "authors": [ + "Amazon Web Services " + ], + "created_at": "2023-08-17T09:15:00Z", + "skills": [ + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Text Completion", + "class_uid": 10201 + }, + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Question Answering", + "class_uid": 10302 + }, + { + "category_name": "Analytical skills", + "category_uid": 5, + "class_name": "Coding Skills", + "class_uid": 502 + }, + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Problem Solving", + "class_uid": 10702 + } + ], + "locators": [ + { + "type": "source-code", + "url": "https://github.com/awslabs/aws-mcp-server" + } + ], + "extensions": [ + { + "name": "schema.oasf.agntcy.org/features/runtime/mcp", + "version": "v1.0.0", + "data": { + "servers": { + "aws": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "AWS_ACCESS_KEY_ID", + "-e", + "AWS_SECRET_ACCESS_KEY", + "-e", + "AWS_REGION", + "ghcr.io/awslabs/aws-mcp-server" + ], + "env": { + "AWS_ACCESS_KEY_ID": "${input:AWS_ACCESS_KEY_ID}", + "AWS_SECRET_ACCESS_KEY": "${input:AWS_SECRET_ACCESS_KEY}", + "AWS_REGION": "${input:AWS_REGION}" + } + } + } + } + }, + { + "name": "schema.oasf.agntcy.org/features/runtime/prompt", + "version": "v1.0.0", + "data": { + "prompts": [ + { + "name": "AWS Cloud Assistant", + "description": "AWS cloud services assistant", + "prompt": "You are an AWS cloud assistant. Help users manage AWS resources, design cloud architectures, and troubleshoot AWS service issues." + } + ] + } + } + ] +} diff --git a/docs/research/integrations/mcp-to-oasf-agent/extracted-records/azure.record.json b/docs/research/integrations/mcp-to-oasf-agent/extracted-records/azure.record.json index f7c70373e..86b566522 100644 --- a/docs/research/integrations/mcp-to-oasf-agent/extracted-records/azure.record.json +++ b/docs/research/integrations/mcp-to-oasf-agent/extracted-records/azure.record.json @@ -1,87 +1,87 @@ -{ - "name": "Azure/azure-mcp-server", - "version": "1.0.0", - "description": "Official Azure MCP server that provides access to Microsoft Azure cloud services, including Azure VMs, Storage, Functions, and other cloud resources.", - "authors": [ - "Microsoft Corporation " - ], - "created_at": "2023-09-25T13:40:00Z", - "skills": [ - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Text Completion", - "class_uid": 10201 - }, - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Question Answering", - "class_uid": 10302 - }, - { - "category_name": "Analytical skills", - "category_uid": 5, - "class_name": "Coding Skills", - "class_uid": 502 - }, - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Problem Solving", - "class_uid": 10702 - } - ], - "locators": [ - { - "type": "source-code", - "url": "https://github.com/Azure/azure-mcp-server" - } - ], - "extensions": [ - { - "name": "schema.oasf.agntcy.org/features/runtime/mcp", - "version": "v1.0.0", - "data": { - "servers": { - "azure": { - "command": "docker", - "args": [ - "run", - "-i", - "--rm", - "-e", - "AZURE_SUBSCRIPTION_ID", - "-e", - "AZURE_TENANT_ID", - "-e", - "AZURE_CLIENT_ID", - "-e", - "AZURE_CLIENT_SECRET", - "ghcr.io/Azure/azure-mcp-server" - ], - "env": { - "AZURE_SUBSCRIPTION_ID": "${input:AZURE_SUBSCRIPTION_ID}", - "AZURE_TENANT_ID": "${input:AZURE_TENANT_ID}", - "AZURE_CLIENT_ID": "${input:AZURE_CLIENT_ID}", - "AZURE_CLIENT_SECRET": "${input:AZURE_CLIENT_SECRET}" - } - } - } - } - }, - { - "name": "schema.oasf.agntcy.org/features/runtime/prompt", - "version": "v1.0.0", - "data": { - "prompts": [ - { - "name": "Azure Cloud Assistant", - "description": "Azure cloud services assistant", - "prompt": "You are an Azure cloud assistant. Help users deploy and manage Azure resources, troubleshoot cloud services, and implement Azure solutions." - } - ] - } - } - ] -} +{ + "name": "Azure/azure-mcp-server", + "version": "1.0.0", + "description": "Official Azure MCP server that provides access to Microsoft Azure cloud services, including Azure VMs, Storage, Functions, and other cloud resources.", + "authors": [ + "Microsoft Corporation " + ], + "created_at": "2023-09-25T13:40:00Z", + "skills": [ + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Text Completion", + "class_uid": 10201 + }, + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Question Answering", + "class_uid": 10302 + }, + { + "category_name": "Analytical skills", + "category_uid": 5, + "class_name": "Coding Skills", + "class_uid": 502 + }, + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Problem Solving", + "class_uid": 10702 + } + ], + "locators": [ + { + "type": "source-code", + "url": "https://github.com/Azure/azure-mcp-server" + } + ], + "extensions": [ + { + "name": "schema.oasf.agntcy.org/features/runtime/mcp", + "version": "v1.0.0", + "data": { + "servers": { + "azure": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "AZURE_SUBSCRIPTION_ID", + "-e", + "AZURE_TENANT_ID", + "-e", + "AZURE_CLIENT_ID", + "-e", + "AZURE_CLIENT_SECRET", + "ghcr.io/Azure/azure-mcp-server" + ], + "env": { + "AZURE_SUBSCRIPTION_ID": "${input:AZURE_SUBSCRIPTION_ID}", + "AZURE_TENANT_ID": "${input:AZURE_TENANT_ID}", + "AZURE_CLIENT_ID": "${input:AZURE_CLIENT_ID}", + "AZURE_CLIENT_SECRET": "${input:AZURE_CLIENT_SECRET}" + } + } + } + } + }, + { + "name": "schema.oasf.agntcy.org/features/runtime/prompt", + "version": "v1.0.0", + "data": { + "prompts": [ + { + "name": "Azure Cloud Assistant", + "description": "Azure cloud services assistant", + "prompt": "You are an Azure cloud assistant. Help users deploy and manage Azure resources, troubleshoot cloud services, and implement Azure solutions." + } + ] + } + } + ] +} diff --git a/docs/research/integrations/mcp-to-oasf-agent/extracted-records/gcp.record.json b/docs/research/integrations/mcp-to-oasf-agent/extracted-records/gcp.record.json index 5bdfd137b..180f16900 100644 --- a/docs/research/integrations/mcp-to-oasf-agent/extracted-records/gcp.record.json +++ b/docs/research/integrations/mcp-to-oasf-agent/extracted-records/gcp.record.json @@ -1,81 +1,81 @@ -{ - "name": "googlecloud/gcp-mcp-server", - "version": "1.0.0", - "description": "Official Google Cloud MCP server that provides access to Google Cloud Platform services, including GCE, GCS, Cloud Functions, and other GCP resources.", - "authors": [ - "Google LLC " - ], - "created_at": "2023-10-10T11:20:00Z", - "skills": [ - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Text Completion", - "class_uid": 10201 - }, - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Question Answering", - "class_uid": 10302 - }, - { - "category_name": "Analytical skills", - "category_uid": 5, - "class_name": "Coding Skills", - "class_uid": 502 - }, - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Problem Solving", - "class_uid": 10702 - } - ], - "locators": [ - { - "type": "source-code", - "url": "https://github.com/googlecloud/gcp-mcp-server" - } - ], - "extensions": [ - { - "name": "schema.oasf.agntcy.org/features/runtime/mcp", - "version": "v1.0.0", - "data": { - "servers": { - "gcp": { - "command": "docker", - "args": [ - "run", - "-i", - "--rm", - "-e", - "GOOGLE_APPLICATION_CREDENTIALS", - "-e", - "GCP_PROJECT_ID", - "ghcr.io/googlecloud/gcp-mcp-server" - ], - "env": { - "GOOGLE_APPLICATION_CREDENTIALS": "${input:GOOGLE_APPLICATION_CREDENTIALS}", - "GCP_PROJECT_ID": "${input:GCP_PROJECT_ID}" - } - } - } - } - }, - { - "name": "schema.oasf.agntcy.org/features/runtime/prompt", - "version": "v1.0.0", - "data": { - "prompts": [ - { - "name": "GCP Cloud Assistant", - "description": "Google Cloud Platform assistant", - "prompt": "You are a Google Cloud Platform assistant. Help users deploy and manage GCP resources, troubleshoot cloud services, and implement GCP solutions." - } - ] - } - } - ] -} +{ + "name": "googlecloud/gcp-mcp-server", + "version": "1.0.0", + "description": "Official Google Cloud MCP server that provides access to Google Cloud Platform services, including GCE, GCS, Cloud Functions, and other GCP resources.", + "authors": [ + "Google LLC " + ], + "created_at": "2023-10-10T11:20:00Z", + "skills": [ + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Text Completion", + "class_uid": 10201 + }, + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Question Answering", + "class_uid": 10302 + }, + { + "category_name": "Analytical skills", + "category_uid": 5, + "class_name": "Coding Skills", + "class_uid": 502 + }, + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Problem Solving", + "class_uid": 10702 + } + ], + "locators": [ + { + "type": "source-code", + "url": "https://github.com/googlecloud/gcp-mcp-server" + } + ], + "extensions": [ + { + "name": "schema.oasf.agntcy.org/features/runtime/mcp", + "version": "v1.0.0", + "data": { + "servers": { + "gcp": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "GOOGLE_APPLICATION_CREDENTIALS", + "-e", + "GCP_PROJECT_ID", + "ghcr.io/googlecloud/gcp-mcp-server" + ], + "env": { + "GOOGLE_APPLICATION_CREDENTIALS": "${input:GOOGLE_APPLICATION_CREDENTIALS}", + "GCP_PROJECT_ID": "${input:GCP_PROJECT_ID}" + } + } + } + } + }, + { + "name": "schema.oasf.agntcy.org/features/runtime/prompt", + "version": "v1.0.0", + "data": { + "prompts": [ + { + "name": "GCP Cloud Assistant", + "description": "Google Cloud Platform assistant", + "prompt": "You are a Google Cloud Platform assistant. Help users deploy and manage GCP resources, troubleshoot cloud services, and implement GCP solutions." + } + ] + } + } + ] +} diff --git a/docs/research/integrations/mcp-to-oasf-agent/extracted-records/github.record.json b/docs/research/integrations/mcp-to-oasf-agent/extracted-records/github.record.json index 4132dc156..d9ceb003d 100644 --- a/docs/research/integrations/mcp-to-oasf-agent/extracted-records/github.record.json +++ b/docs/research/integrations/mcp-to-oasf-agent/extracted-records/github.record.json @@ -1,72 +1,72 @@ -{ - "name": "github/github-mcp-server", - "version": "1.0.0", - "description": "Official GitHub MCP server that provides access to GitHub repositories, issues, pull requests, and other GitHub API functionality.", - "authors": [ - "GitHub Inc. " - ], - "created_at": "2023-09-12T14:30:00Z", - "skills": [ - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Text Completion", - "class_uid": 10201 - }, - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Question Answering", - "class_uid": 10302 - }, - { - "category_name": "Analytical skills", - "category_uid": 5, - "class_name": "Coding Skills", - "class_uid": 502 - } - ], - "locators": [ - { - "type": "source-code", - "url": "https://github.com/github/github-mcp-server" - } - ], - "extensions": [ - { - "name": "schema.oasf.agntcy.org/features/runtime/mcp", - "version": "v1.0.0", - "data": { - "servers": { - "github": { - "command": "docker", - "args": [ - "run", - "-i", - "--rm", - "-e", - "GITHUB_PERSONAL_ACCESS_TOKEN", - "ghcr.io/github/github-mcp-server" - ], - "env": { - "GITHUB_PERSONAL_ACCESS_TOKEN": "${input:GITHUB_PERSONAL_ACCESS_TOKEN}" - } - } - } - } - }, - { - "name": "schema.oasf.agntcy.org/features/runtime/prompt", - "version": "v1.0.0", - "data": { - "prompts": [ - { - "name": "GitHub Assistant", - "description": "GitHub operations assistant", - "prompt": "You are a GitHub assistant. Use the GitHub MCP Server to interact with repositories, issues, pull requests, and other GitHub features." - } - ] - } - } - ] -} +{ + "name": "github/github-mcp-server", + "version": "1.0.0", + "description": "Official GitHub MCP server that provides access to GitHub repositories, issues, pull requests, and other GitHub API functionality.", + "authors": [ + "GitHub Inc. " + ], + "created_at": "2023-09-12T14:30:00Z", + "skills": [ + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Text Completion", + "class_uid": 10201 + }, + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Question Answering", + "class_uid": 10302 + }, + { + "category_name": "Analytical skills", + "category_uid": 5, + "class_name": "Coding Skills", + "class_uid": 502 + } + ], + "locators": [ + { + "type": "source-code", + "url": "https://github.com/github/github-mcp-server" + } + ], + "extensions": [ + { + "name": "schema.oasf.agntcy.org/features/runtime/mcp", + "version": "v1.0.0", + "data": { + "servers": { + "github": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "GITHUB_PERSONAL_ACCESS_TOKEN", + "ghcr.io/github/github-mcp-server" + ], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "${input:GITHUB_PERSONAL_ACCESS_TOKEN}" + } + } + } + } + }, + { + "name": "schema.oasf.agntcy.org/features/runtime/prompt", + "version": "v1.0.0", + "data": { + "prompts": [ + { + "name": "GitHub Assistant", + "description": "GitHub operations assistant", + "prompt": "You are a GitHub assistant. Use the GitHub MCP Server to interact with repositories, issues, pull requests, and other GitHub features." + } + ] + } + } + ] +} diff --git a/docs/research/integrations/mcp-to-oasf-agent/extracted-records/slack.record.json b/docs/research/integrations/mcp-to-oasf-agent/extracted-records/slack.record.json index c9713dd89..562750289 100644 --- a/docs/research/integrations/mcp-to-oasf-agent/extracted-records/slack.record.json +++ b/docs/research/integrations/mcp-to-oasf-agent/extracted-records/slack.record.json @@ -1,78 +1,78 @@ -{ - "name": "slackapi/slack-mcp-server", - "version": "1.0.0", - "description": "Official Slack MCP server that provides access to Slack messaging, channels, workspaces, and other Slack platform features.", - "authors": [ - "Slack Technologies, LLC " - ], - "created_at": "2023-10-20T15:25:00Z", - "skills": [ - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Text Completion", - "class_uid": 10201 - }, - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Dialogue Generation", - "class_uid": 10204 - }, - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Text Summarization", - "class_uid": 10202 - } - ], - "locators": [ - { - "type": "source-code", - "url": "https://github.com/slackapi/slack-mcp-server" - } - ], - "extensions": [ - { - "name": "schema.oasf.agntcy.org/features/runtime/mcp", - "version": "v1.0.0", - "data": { - "servers": { - "slack": { - "command": "docker", - "args": [ - "run", - "-i", - "--rm", - "-e", - "SLACK_BOT_TOKEN", - "-e", - "SLACK_APP_TOKEN", - "-e", - "SLACK_SIGNING_SECRET", - "ghcr.io/slackapi/slack-mcp-server" - ], - "env": { - "SLACK_BOT_TOKEN": "${input:SLACK_BOT_TOKEN}", - "SLACK_APP_TOKEN": "${input:SLACK_APP_TOKEN}", - "SLACK_SIGNING_SECRET": "${input:SLACK_SIGNING_SECRET}" - } - } - } - } - }, - { - "name": "schema.oasf.agntcy.org/features/runtime/prompt", - "version": "v1.0.0", - "data": { - "prompts": [ - { - "name": "Slack Messaging Assistant", - "description": "Slack messaging platform assistant", - "prompt": "You are a Slack assistant. Help users send messages, manage channels, integrate with workflows, and use Slack platform features." - } - ] - } - } - ] -} +{ + "name": "slackapi/slack-mcp-server", + "version": "1.0.0", + "description": "Official Slack MCP server that provides access to Slack messaging, channels, workspaces, and other Slack platform features.", + "authors": [ + "Slack Technologies, LLC " + ], + "created_at": "2023-10-20T15:25:00Z", + "skills": [ + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Text Completion", + "class_uid": 10201 + }, + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Dialogue Generation", + "class_uid": 10204 + }, + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Text Summarization", + "class_uid": 10202 + } + ], + "locators": [ + { + "type": "source-code", + "url": "https://github.com/slackapi/slack-mcp-server" + } + ], + "extensions": [ + { + "name": "schema.oasf.agntcy.org/features/runtime/mcp", + "version": "v1.0.0", + "data": { + "servers": { + "slack": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "SLACK_BOT_TOKEN", + "-e", + "SLACK_APP_TOKEN", + "-e", + "SLACK_SIGNING_SECRET", + "ghcr.io/slackapi/slack-mcp-server" + ], + "env": { + "SLACK_BOT_TOKEN": "${input:SLACK_BOT_TOKEN}", + "SLACK_APP_TOKEN": "${input:SLACK_APP_TOKEN}", + "SLACK_SIGNING_SECRET": "${input:SLACK_SIGNING_SECRET}" + } + } + } + } + }, + { + "name": "schema.oasf.agntcy.org/features/runtime/prompt", + "version": "v1.0.0", + "data": { + "prompts": [ + { + "name": "Slack Messaging Assistant", + "description": "Slack messaging platform assistant", + "prompt": "You are a Slack assistant. Help users send messages, manage channels, integrate with workflows, and use Slack platform features." + } + ] + } + } + ] +} diff --git a/docs/research/integrations/mcp-to-oasf-agent/extractor.record.json b/docs/research/integrations/mcp-to-oasf-agent/extractor.record.json index 6e12415b4..ce1d745c5 100644 --- a/docs/research/integrations/mcp-to-oasf-agent/extractor.record.json +++ b/docs/research/integrations/mcp-to-oasf-agent/extractor.record.json @@ -1,84 +1,84 @@ -{ - "name": "poc/mcp-oasf-extractor-agent", - "version": "v1.0.0", - "description": "An agent that scans MCP server registries and generates OASF records for each MCP server.", - "authors": [ - "Ramiz Polic " - ], - "created_at": "2025-06-16T17:06:37Z", - "skills": [ - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Text Completion", - "class_uid": 10201 - } - ], - "locators": [ - { - "type": "docker-image", - "url": "https://ghcr.io/agntcy/dir/mcp-oasf-extractor-agent" - } - ], - "extensions": [ - { - "name": "schema.oasf.agntcy.org/features/runtime/mcp", - "version": "v1.0.0", - "data": { - "servers": { - "github": { - "command": "docker", - "args": [ - "run", - "-i", - "--rm", - "-e", - "GITHUB_PERSONAL_ACCESS_TOKEN", - "ghcr.io/github/github-mcp-server" - ], - "env": { - "GITHUB_PERSONAL_ACCESS_TOKEN": "${input:GITHUB_PERSONAL_ACCESS_TOKEN}" - } - } - } - } - }, - { - "name": "schema.oasf.agntcy.org/features/runtime/model", - "version": "v1.0.0", - "data": { - "models": [ - { - "model": "gpt-4o", - "provider": "azure", - "api_key": "${input:AZURE_OPENAI_API_KEY}", - "api_base": "${input:AZURE_OPENAI_API_BASE}", - "roles": [ - "chat", - "edit", - "apply" - ], - "completion_options": { - "context_length": 500000, - "max_tokens": 16384 - } - } - ] - } - }, - { - "name": "schema.oasf.agntcy.org/features/runtime/prompt", - "version": "v1.0.0", - "data": { - "prompts": [ - { - "name": "MCP to OASF extractor", - "description": "Scan MCP registry and extract OASF records", - "prompt": "You are an scanner and extractor agent. You use GitHub repo https://github.com/modelcontextprotocol/servers to fetch the MCP server data once. Only focus on Official Integrations and ignore all other servers. You must convert fetched MCP data into appropriate OASF records. The schema for the OASF record is defined as an example record at @https://raw.githubusercontent.com/agntcy/dir/refs/heads/poc/mcp-support/docs/poc/mcp-extractor-agent/context/example.record.json. You must follow this schema and should not add any new property names to it. You return records in JSON format. You must fill each field in the record with the appropriate data extracted about the MCP server. You should pick between 2 and 5 best skills that describe the record. Make sure that all selected skills defined in @https://raw.githubusercontent.com/agntcy/dir/refs/heads/poc/mcp-support/docs/poc/mcp-extractor-agent/context/skills.json always match valid names and IDs. You should only include source-code locator for each agent that matches the extracted name. You should include both extensions regarding prompt and MCP server as part of the OASF record. Ignore all inputs for each MCP server. The environment variables for the MCP server extension should always follow ${input:ENV_VARIABLE_NAME naming convention. Store all the records with appropriate names using {mcp-server-name}.record.json under the `./extracted-records` directory. Extract OASF schema for top 10 MCP servers you think are popular." - } - ] - } - } - ], - "signature": {} -} +{ + "name": "poc/mcp-oasf-extractor-agent", + "version": "v1.0.0", + "description": "An agent that scans MCP server registries and generates OASF records for each MCP server.", + "authors": [ + "Ramiz Polic " + ], + "created_at": "2025-06-16T17:06:37Z", + "skills": [ + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Text Completion", + "class_uid": 10201 + } + ], + "locators": [ + { + "type": "docker-image", + "url": "https://ghcr.io/agntcy/dir/mcp-oasf-extractor-agent" + } + ], + "extensions": [ + { + "name": "schema.oasf.agntcy.org/features/runtime/mcp", + "version": "v1.0.0", + "data": { + "servers": { + "github": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "GITHUB_PERSONAL_ACCESS_TOKEN", + "ghcr.io/github/github-mcp-server" + ], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "${input:GITHUB_PERSONAL_ACCESS_TOKEN}" + } + } + } + } + }, + { + "name": "schema.oasf.agntcy.org/features/runtime/model", + "version": "v1.0.0", + "data": { + "models": [ + { + "model": "gpt-4o", + "provider": "azure", + "api_key": "${input:AZURE_OPENAI_API_KEY}", + "api_base": "${input:AZURE_OPENAI_API_BASE}", + "roles": [ + "chat", + "edit", + "apply" + ], + "completion_options": { + "context_length": 500000, + "max_tokens": 16384 + } + } + ] + } + }, + { + "name": "schema.oasf.agntcy.org/features/runtime/prompt", + "version": "v1.0.0", + "data": { + "prompts": [ + { + "name": "MCP to OASF extractor", + "description": "Scan MCP registry and extract OASF records", + "prompt": "You are an scanner and extractor agent. You use GitHub repo https://github.com/modelcontextprotocol/servers to fetch the MCP server data once. Only focus on Official Integrations and ignore all other servers. You must convert fetched MCP data into appropriate OASF records. The schema for the OASF record is defined as an example record at @https://raw.githubusercontent.com/agntcy/dir/refs/heads/poc/mcp-support/docs/poc/mcp-extractor-agent/context/example.record.json. You must follow this schema and should not add any new property names to it. You return records in JSON format. You must fill each field in the record with the appropriate data extracted about the MCP server. You should pick between 2 and 5 best skills that describe the record. Make sure that all selected skills defined in @https://raw.githubusercontent.com/agntcy/dir/refs/heads/poc/mcp-support/docs/poc/mcp-extractor-agent/context/skills.json always match valid names and IDs. You should only include source-code locator for each agent that matches the extracted name. You should include both extensions regarding prompt and MCP server as part of the OASF record. Ignore all inputs for each MCP server. The environment variables for the MCP server extension should always follow ${input:ENV_VARIABLE_NAME naming convention. Store all the records with appropriate names using {mcp-server-name}.record.json under the `./extracted-records` directory. Extract OASF schema for top 10 MCP servers you think are popular." + } + ] + } + } + ], + "signature": {} +} diff --git a/docs/research/integrations/mcp-to-oasf-agent/validator.record.json b/docs/research/integrations/mcp-to-oasf-agent/validator.record.json index e2cd3222f..4fd13db10 100644 --- a/docs/research/integrations/mcp-to-oasf-agent/validator.record.json +++ b/docs/research/integrations/mcp-to-oasf-agent/validator.record.json @@ -1,84 +1,84 @@ -{ - "name": "poc/mcp-oasf-validator", - "version": "v1.0.0", - "description": "An agent that reads OASF MCP records and fixes their schema based on requirements.", - "authors": [ - "Ramiz Polic " - ], - "created_at": "2025-06-16T17:06:37Z", - "skills": [ - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Text Completion", - "class_uid": 10201 - } - ], - "locators": [ - { - "type": "docker-image", - "url": "https://ghcr.io/agntcy/dir/mcp-oasf-validator" - } - ], - "extensions": [ - { - "name": "schema.oasf.agntcy.org/features/runtime/mcp", - "version": "v1.0.0", - "data": { - "servers": { - "github": { - "command": "docker", - "args": [ - "run", - "-i", - "--rm", - "-e", - "GITHUB_PERSONAL_ACCESS_TOKEN", - "ghcr.io/github/github-mcp-server" - ], - "env": { - "GITHUB_PERSONAL_ACCESS_TOKEN": "${input:GITHUB_PERSONAL_ACCESS_TOKEN}" - } - } - } - } - }, - { - "name": "schema.oasf.agntcy.org/features/runtime/model", - "version": "v1.0.0", - "data": { - "models": [ - { - "model": "gpt-4o", - "provider": "azure", - "api_key": "${input:AZURE_OPENAI_API_KEY}", - "api_base": "${input:AZURE_OPENAI_API_BASE}", - "roles": [ - "chat", - "edit", - "apply" - ], - "completion_options": { - "context_length": 500000, - "max_tokens": 16384 - } - } - ] - } - }, - { - "name": "schema.oasf.agntcy.org/features/runtime/prompt", - "version": "v1.0.0", - "data": { - "prompts": [ - { - "name": "OAFS MCP Record Validator", - "description": "An agent that validates and fixes OASF MCP records.", - "prompt": "You are an validator agent. You must validate all skills for each record from valid skills file @https://raw.githubusercontent.com/agntcy/dir/refs/heads/poc/mcp-support/docs/poc/mcp-extractor-agent/context/skills.json. You must ensure that all the IDs and names are valid as specified in the full skills list. You perform no additinal changes apart from skills. You fix all the files available in `./extracted-records` directory." - } - ] - } - } - ], - "signature": {} -} +{ + "name": "poc/mcp-oasf-validator", + "version": "v1.0.0", + "description": "An agent that reads OASF MCP records and fixes their schema based on requirements.", + "authors": [ + "Ramiz Polic " + ], + "created_at": "2025-06-16T17:06:37Z", + "skills": [ + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Text Completion", + "class_uid": 10201 + } + ], + "locators": [ + { + "type": "docker-image", + "url": "https://ghcr.io/agntcy/dir/mcp-oasf-validator" + } + ], + "extensions": [ + { + "name": "schema.oasf.agntcy.org/features/runtime/mcp", + "version": "v1.0.0", + "data": { + "servers": { + "github": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "GITHUB_PERSONAL_ACCESS_TOKEN", + "ghcr.io/github/github-mcp-server" + ], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "${input:GITHUB_PERSONAL_ACCESS_TOKEN}" + } + } + } + } + }, + { + "name": "schema.oasf.agntcy.org/features/runtime/model", + "version": "v1.0.0", + "data": { + "models": [ + { + "model": "gpt-4o", + "provider": "azure", + "api_key": "${input:AZURE_OPENAI_API_KEY}", + "api_base": "${input:AZURE_OPENAI_API_BASE}", + "roles": [ + "chat", + "edit", + "apply" + ], + "completion_options": { + "context_length": 500000, + "max_tokens": 16384 + } + } + ] + } + }, + { + "name": "schema.oasf.agntcy.org/features/runtime/prompt", + "version": "v1.0.0", + "data": { + "prompts": [ + { + "name": "OAFS MCP Record Validator", + "description": "An agent that validates and fixes OASF MCP records.", + "prompt": "You are an validator agent. You must validate all skills for each record from valid skills file @https://raw.githubusercontent.com/agntcy/dir/refs/heads/poc/mcp-support/docs/poc/mcp-extractor-agent/context/skills.json. You must ensure that all the IDs and names are valid as specified in the full skills list. You perform no additinal changes apart from skills. You fix all the files available in `./extracted-records` directory." + } + ] + } + } + ], + "signature": {} +} diff --git a/docs/research/signing/01-deploy-sigstore.sh b/docs/research/signing/01-deploy-sigstore.sh index 2783ac575..c9ce05a89 100755 --- a/docs/research/signing/01-deploy-sigstore.sh +++ b/docs/research/signing/01-deploy-sigstore.sh @@ -1,58 +1,58 @@ -#!/bin/bash - -# This script deploys the Sigstore components to a Kubernetes cluster using Kind. -# Requirements: -# - kind -# - helm -# - kubectl -# - cosign - -## KIND: Deploy cluster -cat < agent.json.tmp -cat agent.json.tmp | jq 'del(.signature)' > agent.json -rm -rf agent.json.tmp - -## 1. Sign agent -cosign sign-blob \ - --fulcio-url=$FULCIO_URL \ - --rekor-url=$REKOR_URL \ - --yes \ - --b64=false \ - --bundle='agent.sig' \ - ./agent.json - -# Append signature to agent model -cat agent.json | jq ".signature += $(cat agent.sig | jq .)" > pushed.agent.json - -## 2. Push signed agent -# DIGEST=$(dirctl push pushed.agent.json) - -## 3. Pull signed agent -# dirctl pull $DIGEST - -## 4. Extract signature -cat pushed.agent.json | jq '.signature' > pulled.agent.sig.json -cat pushed.agent.json | jq 'del(.signature)' > pulled.agent.json - -## 5. Verify agent -echo -e "\n\nVerifying blob signature..." -cosign verify-blob \ - --rekor-url=$REKOR_URL \ - --bundle 'pulled.agent.sig.json' \ - --certificate-identity=".*" \ - --certificate-oidc-issuer=https://github.com/login/oauth \ - ./pulled.agent.json - -## 6. CLEANUP -rm -rf pulled.agent.* +#!/bin/bash + +# This script configures Cosign to be used for signing and verification. +# Requirements: +# - cosign + +## Initialize cosign (required to setup trust chain) +# TODO: you need to add "ca.cert.pem" to your OS trust store. +# cosign initialize \ +# --root https://tuf.sigstore.local/root.json \ +# --mirror https://tuf.sigstore.local + +## Prepare the environment +REKOR_URL=https://rekor.sigstore.dev +FULCIO_URL=https://fulcio.sigstore.dev +export COSIGN_EXPERIMENTAL=1 + +## Fix model by stripping the signature and applying proper JSON formatting +cat agent.json | jq . > agent.json.tmp +cat agent.json.tmp | jq 'del(.signature)' > agent.json +rm -rf agent.json.tmp + +## 1. Sign agent +cosign sign-blob \ + --fulcio-url=$FULCIO_URL \ + --rekor-url=$REKOR_URL \ + --yes \ + --b64=false \ + --bundle='agent.sig' \ + ./agent.json + +# Append signature to agent model +cat agent.json | jq ".signature += $(cat agent.sig | jq .)" > pushed.agent.json + +## 2. Push signed agent +# DIGEST=$(dirctl push pushed.agent.json) + +## 3. Pull signed agent +# dirctl pull $DIGEST + +## 4. Extract signature +cat pushed.agent.json | jq '.signature' > pulled.agent.sig.json +cat pushed.agent.json | jq 'del(.signature)' > pulled.agent.json + +## 5. Verify agent +echo -e "\n\nVerifying blob signature..." +cosign verify-blob \ + --rekor-url=$REKOR_URL \ + --bundle 'pulled.agent.sig.json' \ + --certificate-identity=".*" \ + --certificate-oidc-issuer=https://github.com/login/oauth \ + ./pulled.agent.json + +## 6. CLEANUP +rm -rf pulled.agent.* diff --git a/docs/research/signing/agent.json b/docs/research/signing/agent.json index 4ed43db88..c87edcbac 100644 --- a/docs/research/signing/agent.json +++ b/docs/research/signing/agent.json @@ -1,33 +1,33 @@ -{ - "name": "directory.agntcy.org/cisco/marketing-strategy", - "version": "v1.0.0", - "schema_version": "v0.3.1", - "description": "Research agent for Cisco's marketing strategy.", - "authors": [ - "Cisco Systems" - ], - "created_at": "2025-03-19T17:06:37Z", - "annotations": { - "key": "value" - }, - "skills": [ - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Text Completion", - "class_uid": 10201 - }, - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Problem Solving", - "class_uid": 10702 - } - ], - "locators": [ - { - "type": "docker-image", - "url": "https://ghcr.io/agntcy/marketing-strategy" - } - ] -} +{ + "name": "directory.agntcy.org/cisco/marketing-strategy", + "version": "v1.0.0", + "schema_version": "v0.3.1", + "description": "Research agent for Cisco's marketing strategy.", + "authors": [ + "Cisco Systems" + ], + "created_at": "2025-03-19T17:06:37Z", + "annotations": { + "key": "value" + }, + "skills": [ + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Text Completion", + "class_uid": 10201 + }, + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Problem Solving", + "class_uid": 10702 + } + ], + "locators": [ + { + "type": "docker-image", + "url": "https://ghcr.io/agntcy/marketing-strategy" + } + ] +} diff --git a/docs/research/signing/scaffold.values.yaml b/docs/research/signing/scaffold.values.yaml index ec23205b4..e6de6c6d2 100644 --- a/docs/research/signing/scaffold.values.yaml +++ b/docs/research/signing/scaffold.values.yaml @@ -1,53 +1,53 @@ -copySecretJob: - enabled: true - -rekor: - server: - attestation_storage: - enabled: false - persistence: - enabled: false - ingress: - className: nginx - hosts: - - host: rekor.sigstore.local - path: / - tls: - - secretName: rekor-tls - hosts: - - rekor.sigstore.local -fulcio: - createcerts: - enabled: true - server: - ingress: - className: nginx - http: - hosts: - - host: fulcio.sigstore.local - path: / - tls: - - secretName: fulcio-tls - hosts: - - fulcio.sigstore.local - config: - contents: - OIDCIssuers: - https://oauth2.sigstore.dev/auth: - IssuerURL: https://oauth2.sigstore.dev/auth - ClientID: sigstore - Type: email - IssuerClaim: $.federated_claims.connector_id - -tuf: - enabled: true - ingress: - className: nginx - http: - hosts: - - host: tuf.sigstore.local - path: / - tls: - - secretName: tuf-tls - hosts: - - "tuf.sigstore.local" +copySecretJob: + enabled: true + +rekor: + server: + attestation_storage: + enabled: false + persistence: + enabled: false + ingress: + className: nginx + hosts: + - host: rekor.sigstore.local + path: / + tls: + - secretName: rekor-tls + hosts: + - rekor.sigstore.local +fulcio: + createcerts: + enabled: true + server: + ingress: + className: nginx + http: + hosts: + - host: fulcio.sigstore.local + path: / + tls: + - secretName: fulcio-tls + hosts: + - fulcio.sigstore.local + config: + contents: + OIDCIssuers: + https://oauth2.sigstore.dev/auth: + IssuerURL: https://oauth2.sigstore.dev/auth + ClientID: sigstore + Type: email + IssuerClaim: $.federated_claims.connector_id + +tuf: + enabled: true + ingress: + className: nginx + http: + hosts: + - host: tuf.sigstore.local + path: / + tls: + - secretName: tuf-tls + hosts: + - "tuf.sigstore.local" diff --git a/docs/research/token/fetcher.sh b/docs/research/token/fetcher.sh index 31fe85c83..e5bd25d82 100644 --- a/docs/research/token/fetcher.sh +++ b/docs/research/token/fetcher.sh @@ -1,17 +1,17 @@ -#!/bin/bash - -# Deploy example fetcher -kubectl apply -f fetcher.yaml - -# Fetch SVID data -kubectl exec alice -c client -- cat /svids/tls.crt > svids.tls.crt -kubectl exec alice -c client -- cat /svids/tls.key > svids.tls.key -kubectl exec alice -c client -- cat /svids/svid_bundle.pem > svids.bundle.pem - -# Set ENV variables for the client -export DIRECTORY_CLIENT_SERVER_ADDRESS=127.0.0.1:8888 -export DIRECTORY_CLIENT_AUTH_MODE=tls -export DIRECTORY_CLIENT_TLS_SKIP_VERIFY=true -export DIRECTORY_CLIENT_TLS_CERT_FILE=$(pwd)/svids.tls.crt -export DIRECTORY_CLIENT_TLS_CA_FILE=$(pwd)/svids.bundle.pem -export DIRECTORY_CLIENT_TLS_KEY_FILE=$(pwd)/svids.tls.key +#!/bin/bash + +# Deploy example fetcher +kubectl apply -f fetcher.yaml + +# Fetch SVID data +kubectl exec alice -c client -- cat /svids/tls.crt > svids.tls.crt +kubectl exec alice -c client -- cat /svids/tls.key > svids.tls.key +kubectl exec alice -c client -- cat /svids/svid_bundle.pem > svids.bundle.pem + +# Set ENV variables for the client +export DIRECTORY_CLIENT_SERVER_ADDRESS=127.0.0.1:8888 +export DIRECTORY_CLIENT_AUTH_MODE=tls +export DIRECTORY_CLIENT_TLS_SKIP_VERIFY=true +export DIRECTORY_CLIENT_TLS_CERT_FILE=$(pwd)/svids.tls.crt +export DIRECTORY_CLIENT_TLS_CA_FILE=$(pwd)/svids.bundle.pem +export DIRECTORY_CLIENT_TLS_KEY_FILE=$(pwd)/svids.tls.key diff --git a/docs/research/token/fetcher.yaml b/docs/research/token/fetcher.yaml index 9c47923a6..ec6d3304a 100644 --- a/docs/research/token/fetcher.yaml +++ b/docs/research/token/fetcher.yaml @@ -1,99 +1,99 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: alice - labels: - app.kubernetes.io/name: dir-client - app.kubernetes.io/component: alice ---- -apiVersion: spire.spiffe.io/v1alpha1 -kind: ClusterSPIFFEID -metadata: - name: alice - labels: - app.kubernetes.io/name: dir-client - app.kubernetes.io/component: alice -spec: - className: spire-spire - podSelector: - matchExpressions: - - key: app.kubernetes.io/component - operator: In - values: - - alice - workloadSelectorTemplates: - - k8s:sa:alice - spiffeIDTemplate: "spiffe://{{ .TrustDomain }}/ns/default/sa/alice" - dnsNameTemplates: - - "alice.dir.svc.cluster.local" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: alice-spiffe-config - labels: - app.kubernetes.io/name: dir-client - app.kubernetes.io/component: alice -data: - helper.conf: |- - agent_address = "/run/spire/agent-sockets/api.sock" - cmd = "" - cmd_args = "" - cert_dir = "/svids" - renew_signal = "" - svid_file_name = "tls.crt" - svid_key_file_name = "tls.key" - svid_bundle_file_name = "svid_bundle.pem" - jwt_bundle_file_name = "key.jwt" - cert_file_mode = 0644 - key_file_mode = 0644 - jwt_svid_file_mode = 0644 - jwt_bundle_file_mode = 0644 - jwt_svids = [{jwt_audience="dir-demo", jwt_svid_file_name="jwt_svid.token"}] - daemon_mode = true ---- -apiVersion: v1 -kind: Pod -metadata: - name: alice - labels: - app.kubernetes.io/name: dir-client - app.kubernetes.io/component: alice -spec: - containers: - - args: - - -config - - config/helper.conf - image: ghcr.io/spiffe/spiffe-helper:0.10.0 - imagePullPolicy: IfNotPresent - name: spiffe-helper - resources: {} - volumeMounts: - - mountPath: /config/helper.conf - name: config-volume - subPath: helper.conf - - mountPath: /run/spire/agent-sockets - name: spire-agent-socket - - mountPath: /svids - name: svids-volume - - command: [ "/bin/bash", "-c", "--" ] - args: [ "while true; do sleep 30; done;" ] - image: ubuntu:latest - imagePullPolicy: Always - name: client - volumeMounts: - - mountPath: /svids - name: svids-volume - restartPolicy: OnFailure - serviceAccountName: alice - volumes: - - hostPath: - path: /run/spire/agent-sockets - type: Directory - name: spire-agent-socket - - emptyDir: {} - name: svids-volume - - configMap: - defaultMode: 420 - name: alice-spiffe-config - name: config-volume +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alice + labels: + app.kubernetes.io/name: dir-client + app.kubernetes.io/component: alice +--- +apiVersion: spire.spiffe.io/v1alpha1 +kind: ClusterSPIFFEID +metadata: + name: alice + labels: + app.kubernetes.io/name: dir-client + app.kubernetes.io/component: alice +spec: + className: spire-spire + podSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - alice + workloadSelectorTemplates: + - k8s:sa:alice + spiffeIDTemplate: "spiffe://{{ .TrustDomain }}/ns/default/sa/alice" + dnsNameTemplates: + - "alice.dir.svc.cluster.local" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: alice-spiffe-config + labels: + app.kubernetes.io/name: dir-client + app.kubernetes.io/component: alice +data: + helper.conf: |- + agent_address = "/run/spire/agent-sockets/api.sock" + cmd = "" + cmd_args = "" + cert_dir = "/svids" + renew_signal = "" + svid_file_name = "tls.crt" + svid_key_file_name = "tls.key" + svid_bundle_file_name = "svid_bundle.pem" + jwt_bundle_file_name = "key.jwt" + cert_file_mode = 0644 + key_file_mode = 0644 + jwt_svid_file_mode = 0644 + jwt_bundle_file_mode = 0644 + jwt_svids = [{jwt_audience="dir-demo", jwt_svid_file_name="jwt_svid.token"}] + daemon_mode = true +--- +apiVersion: v1 +kind: Pod +metadata: + name: alice + labels: + app.kubernetes.io/name: dir-client + app.kubernetes.io/component: alice +spec: + containers: + - args: + - -config + - config/helper.conf + image: ghcr.io/spiffe/spiffe-helper:0.10.0 + imagePullPolicy: IfNotPresent + name: spiffe-helper + resources: {} + volumeMounts: + - mountPath: /config/helper.conf + name: config-volume + subPath: helper.conf + - mountPath: /run/spire/agent-sockets + name: spire-agent-socket + - mountPath: /svids + name: svids-volume + - command: [ "/bin/bash", "-c", "--" ] + args: [ "while true; do sleep 30; done;" ] + image: ubuntu:latest + imagePullPolicy: Always + name: client + volumeMounts: + - mountPath: /svids + name: svids-volume + restartPolicy: OnFailure + serviceAccountName: alice + volumes: + - hostPath: + path: /run/spire/agent-sockets + type: Directory + name: spire-agent-socket + - emptyDir: {} + name: svids-volume + - configMap: + defaultMode: 420 + name: alice-spiffe-config + name: config-volume diff --git a/docs/rfc/01-security-schema.md b/docs/rfc/01-security-schema.md index 242ee02ef..972afd3a6 100644 --- a/docs/rfc/01-security-schema.md +++ b/docs/rfc/01-security-schema.md @@ -1,269 +1,269 @@ -# [RFC] SPIFFE Protocol Integration for Directory Security Schema - -*Note: this RFC template follows HashiCrop RFC format described [here](https://works.hashicorp.com/articles/rfc-template)* - -| | | -| ------------- | ------------------------------------------- | -| **Created** | 2025-08-12 | -| **Status** | **WIP** \| InReview \| Approved \| Obsolete | -| **Owner** | Ramiz Polic (@ramizpolic) | -| **Approvers** | AGNTCY Core WG | - ---- - -This RFC proposes the integration of SPIFFE (Secure Production Identity Framework for Everyone) protocol for establishing a comprehensive security framework in the Directory project. SPIFFE provides a standardized approach to workload identity management in distributed systems through cryptographic identities and automatic credential lifecycle management. This enables production-ready zero-trust architecture with support for both X.509 and JWT-based authentication methods, enabling secure federation across trust domains and comprehensive authorization for all client interactions. - -## Background - -The Directory project currently operates in an insecure mode, lacking any authentication or authorization mechanisms. This architectural gap presents significant security risks as the project transitions toward production deployment and broader adoption. Namely, the system's current state presents several critical vulnerabilities: - -1. **Open Access Architecture**: All Directory services accept connections without identity verification, creating unrestricted access to system resources -2. **Unencrypted Communication**: Inter-service and client-service communication occurs over plain gRPC without transport security -3. **Absence of Access Controls**: No authorization policies exist to govern resource access or operation permissions -4. **Compliance and Audit Gaps**: Lack of identity tracking prevents audit trail generation and regulatory compliance - -### Production Security Requirements - -Enterprise deployment demands a comprehensive security framework addressing: - -- **Identity Management**: Cryptographic identity for all workloads and users with automatic credential lifecycle management -- **Authentication Framework**: Support for both programmatic access (X.509 certificates) and human interaction (JWT tokens) with seamless integration -- **Authorization System**: Fine-grained access control based on cryptographic identity verification and policy enforcement -- **Federation Capabilities**: Cross-domain authentication enabling multi-tenant deployments, partner integrations, and environment isolation -- **Operational Security**: Automatic credential rotation, comprehensive audit logging, and zero-trust architecture principles - -## Proposal - -This RFC proposes implementing SPIFFE as the foundational security framework for the Directory project, establishing comprehensive authentication and authorization capabilities while enabling secure federation across organizational and environmental boundaries. - -### SPIFFE Protocol Advantages - -SPIFFE addresses the Directory project's security requirements through several key capabilities, with particular strength in distributed system environments: - -- **Standardized Workload Identity**: Provides cryptographically verifiable identity for every workload without manual certificate management, ensuring consistent authentication across distributed service instances -- **Automatic Credential Lifecycle**: Eliminates operational overhead through automated certificate issuance, renewal, and revocation, critical for managing authentication at scale in distributed architectures -- **Distributed Trust Architecture**: Agent-based design provides local authentication caching and validation, ensuring continued operation during network partitions common in distributed systems -- **Federation Architecture**: Enables secure cross-domain communication through standardized trust establishment mechanisms, supporting multi-region and partner organization integration -- **Network Partition Tolerance**: Services maintain authentication capabilities through local SPIRE agents even when connectivity to central authority is lost, essential for distributed system resilience -- **Geographic Distribution Support**: Hierarchical SPIRE server deployment optimizes authentication latency across multiple data centers and cloud regions -- **Zero-Trust Foundation**: Establishes cryptographic verification for every transaction, supporting modern security architecture principles in complex distributed topologies -- **Ecosystem Integration**: Seamless compatibility with Kubernetes, service mesh technologies, and cloud-native infrastructure across distributed environments - - -### Implementation Strategy - -The security framework will provide universal authentication coverage across all Directory components, eliminating the current insecure communication patterns while addressing the unique challenges of distributed system architectures. - -- **Distributed Authentication**: SPIRE agents deployed alongside each service instance provide local certificate caching and validation, ensuring authentication operations continue during network partitions and reducing latency in geographically distributed deployments. -- **Unified Authentication Layer**: Implement support for both X.509-SVID and JWT-SVID authentication methods, enabling optimal security approaches for different interaction patterns. -- **Policy-Based Authorization**: Deploy fine-grained authorization controls based on cryptographic identity verification and configurable access policies. -- **Hierarchical Trust Management**: Regional SPIRE servers establish trust hierarchies that optimize for network topology while maintaining centralized policy control, enabling efficient authentication across multiple data centers and cloud regions. -- **Resilient Operation**: The agent-based architecture ensures that services maintain authentication capabilities even when connectivity to central authority is temporarily lost, critical for the Directory project's distributed service mesh. -- **Federation Framework**: Enable secure cross-domain authentication supporting multi-tenant deployments, partner integrations, and environment isolation. - -Authorization policies will be enforced based on cryptographic identity verification, supporting both service-to-service and user-initiated operations. The federation capability will enable secure communication across different trust domains, supporting complex deployment scenarios including multi-cloud and partner organization integration. - ---- - -## Implementation - -### Architecture Overview - -The SPIFFE integration establishes a comprehensive security framework supporting federation across multiple trust domains while providing unified authentication for all client types. - -```mermaid -architecture-beta - group prod_domain(cloud)[Production Trust Domain] - - service spire_server_prod(server)[SPIRE Server] in prod_domain - service store_service_prod(database)[Store Service] in prod_domain - service routing_service_prod(internet)[Routing Service] in prod_domain - service search_service_prod(disk)[Search Service] in prod_domain - - group dev_domain(cloud)[Development Trust Domain] - - service spire_server_dev(server)[SPIRE Server] in dev_domain - service cli_client(server)[CLI Client] in dev_domain - service sdk_client(server)[SDK Client] in dev_domain - - group partner_domain(cloud)[Partner Trust Domain] - - service spire_server_partner(server)[SPIRE Server] in partner_domain - service external_service(internet)[External Service] in partner_domain - - spire_server_prod:R -- L:spire_server_dev - spire_server_prod:L -- R:spire_server_partner - - store_service_prod:R -- L:routing_service_prod - store_service_prod:R -- L:search_service_prod - cli_client:R -- L:store_service_prod - sdk_client:R -- L:store_service_prod - external_service:R -- L:store_service_prod -``` - -### Technical Integration - -- **Server Enhancement**: Directory services will integrate the SPIFFE Workload API to automatically obtain and manage cryptographic identities. Authentication interceptors will validate both X.509 certificates and JWT tokens, extracting SPIFFE IDs for authorization decisions. -- **Client Integration**: All clients will be configured to obtain appropriate SPIFFE credentials through the local SPIRE agent. The authentication method (X.509 or JWT) will be determined by operational requirements rather than client type restrictions. -- **Policy Framework**: Authorization decisions will be based on SPIFFE ID verification and configurable access policies, supporting both local and federated trust relationships. - -### Federation Architecture - -The multi-domain architecture enables secure communication across organizational boundaries through SPIRE server federation. Trust relationships are established through cryptographic verification, allowing controlled access between different environments and partner organizations while maintaining security isolation. - -## UX - -## User Experience - -### Operational Simplicity - -The SPIFFE integration maintains operational simplicity while introducing enterprise-grade security. All authentication mechanisms operate transparently through local SPIRE agent connections, requiring minimal configuration changes to existing workflows. - -Users and applications receive automatic identity provisioning without manual certificate management. The authentication process becomes seamless through standardized SPIFFE protocols, ensuring security without operational complexity. - -### Infrastructure Requirements - -- **SPIRE Server Deployment**: Central identity authority managing certificate issuance and trust relationships. Deployed with high availability configuration and persistent storage for production environments. -- **SPIRE Agent Distribution**: Lightweight agents deployed alongside Directory services and client applications, providing seamless identity management without application code changes. -- **Trust Domain Configuration**: Establishes the security boundary and identity namespace for the Directory ecosystem, with support for federation with external partners and environments. - -### Operational Integration - -- **Service Enhancement**: Directory services integrate with the local SPIRE agent through standard Unix socket connections, requiring minimal configuration changes to existing deployment manifests. -- **Client Configuration**: All client types receive automatic identity provisioning through environment-specific SPIRE agent connections, eliminating manual certificate management. -- **Federation Enablement**: Cross-environment access is established through SPIRE server federation, allowing secure communication between development, staging, and production environments without compromising security isolation. - ---- - -## Alternative Solutions - -### 1. Service Mesh (Istio/Linkerd) with mTLS - -**Pros:** -- Automatic mTLS between services -- Rich traffic management features -- Mature ecosystem and tooling - -**Cons:** -- Significant operational overhead -- Requires complete service mesh deployment -- Less granular identity control -- Vendor lock-in considerations - -**Decision:** SPIFFE provides identity without requiring full service mesh adoption, allowing incremental deployment. - -### 2. HashiCorp Vault with PKI - -**Pros:** -- Comprehensive secret management -- Established enterprise adoption -- Rich policy engine - -**Cons:** -- More complex than needed for identity only -- Requires Vault infrastructure management -- Less standardized workload identity -- Additional licensing considerations - -**Decision:** SPIFFE is purpose-built for workload identity and offers better standardization. - -### 3. Custom JWT-based System - -**Pros:** -- Simpler implementation -- No external dependencies -- HTTP-native - -**Cons:** -- Lack of automatic rotation -- No cryptographic verification -- Custom security implementation risks -- Limited ecosystem integration - -**Decision:** SPIFFE provides proven, standardized security with automatic rotation. - -### 4. Cloud Provider IAM (AWS IAM, GCP IAM, Azure AD) - -**Pros:** -- Native cloud integration -- Comprehensive permission systems -- Managed service (no infrastructure) - -**Cons:** -- Cloud vendor lock-in -- Limited cross-cloud functionality -- Less granular workload identity -- Complex configuration for service-to-service auth - -**Decision:** SPIFFE provides cloud-agnostic solution with better workload-focused identity. - -### 5. Decentralized Identity Systems (DID/Verifiable Credentials) - -**Pros:** -- Self-sovereign identity principles -- No central authority dependency -- Cryptographic verifiability -- Standards-based (W3C DID specification) - -**Cons:** -- Immature ecosystem for service authentication -- Complex key management and recovery -- Limited enterprise tooling and support -- Performance concerns for high-frequency operations -- Blockchain dependency in many implementations - -**Decision:** While promising for user identity, decentralized systems lack maturity for service-to-service authentication in enterprise environments. - -### 6. Distributed Certificate Authorities (Gossip-based PKI) - -**Pros:** -- No single point of failure -- Distributed trust establishment -- Resilient to network partitions -- Self-healing infrastructure - -**Cons:** -- Complex consensus mechanisms -- Difficult operational troubleshooting -- Potential for certificate conflicts -- Limited standardization and tooling -- Network overhead from gossip protocols - -**Decision:** SPIFFE provides proven distributed trust through federation without requiring complex consensus protocols. - -### Decentralized and Distributed Systems Considerations - -The Directory project's distributed architecture presents unique challenges that influence authentication system selection: - -**Network Partitions**: Directory services must continue operating during network splits. SPIFFE's cached certificates and local validation enable continued operation, while decentralized consensus systems may become unavailable during partitions. - -**Geographic Distribution**: Services deployed across multiple regions require efficient identity verification. SPIFFE's hierarchical trust model with regional SPIRE servers provides low-latency authentication, whereas blockchain-based systems may introduce unacceptable latency. - -**Scalability Requirements**: The Directory project anticipates significant growth in service instances and client connections. SPIFFE's agent-based architecture scales horizontally, while centralized systems create bottlenecks and fully decentralized systems may face performance degradation. - -**Operational Complexity**: Distributed systems require sophisticated monitoring and debugging capabilities. SPIFFE provides centralized logging and metrics through SPIRE servers, while maintaining distributed operation through agents. - -**Trust Establishment**: In distributed environments, trust bootstrapping becomes critical. SPIFFE's attestation mechanisms provide secure, automated identity verification, while decentralized systems often require complex out-of-band trust establishment. - -### Comparison Matrix - -| Solution | Implementation Complexity | Operational Overhead | Security Strength | Vendor Lock-in | Standards Compliance | Distributed System Fit | Network Partition Tolerance | -|----------|-------------------------|---------------------|------------------|----------------|---------------------|------------------------|----------------------------| -| SPIFFE/SPIRE | Medium | Medium | High | None | High (CNCF) | Excellent | High | -| Service Mesh | High | High | High | Medium | Medium | Good | Medium | -| HashiCorp Vault | High | High | High | Medium | Low | Good | Low | -| Custom JWT | Low | Low | Medium | None | None | Poor | Medium | -| Cloud IAM | Medium | Low | Medium | High | Low | Poor | Low | -| Decentralized Identity | High | High | High | None | High (W3C) | Fair | High | -| Distributed CA | Very High | Very High | High | None | Low | Good | Very High | - -### Summary - -1. **Standardization**: CNCF project with industry-wide adoption and proven distributed system patterns -2. **Flexibility**: Works across clouds, on-premises, and hybrid environments with geographic distribution support -3. **Automatic Rotation**: Built-in certificate and token lifecycle management without consensus overhead -4. **Zero-Trust Ready**: Cryptographic identity verification for every workload in distributed architectures -5. **Federation**: Native support for cross-domain authentication enabling multi-region and partner integration -6. **Ecosystem Integration**: Works with Kubernetes, service meshes, and CI/CD systems across distributed environments -7. **Distributed System Optimization**: Agent-based architecture provides network partition tolerance and local caching -8. **Operational Simplicity**: Centralized management with distributed operation, avoiding complex consensus mechanisms +# [RFC] SPIFFE Protocol Integration for Directory Security Schema + +*Note: this RFC template follows HashiCrop RFC format described [here](https://works.hashicorp.com/articles/rfc-template)* + +| | | +| ------------- | ------------------------------------------- | +| **Created** | 2025-08-12 | +| **Status** | **WIP** \| InReview \| Approved \| Obsolete | +| **Owner** | Ramiz Polic (@ramizpolic) | +| **Approvers** | AGNTCY Core WG | + +--- + +This RFC proposes the integration of SPIFFE (Secure Production Identity Framework for Everyone) protocol for establishing a comprehensive security framework in the Directory project. SPIFFE provides a standardized approach to workload identity management in distributed systems through cryptographic identities and automatic credential lifecycle management. This enables production-ready zero-trust architecture with support for both X.509 and JWT-based authentication methods, enabling secure federation across trust domains and comprehensive authorization for all client interactions. + +## Background + +The Directory project currently operates in an insecure mode, lacking any authentication or authorization mechanisms. This architectural gap presents significant security risks as the project transitions toward production deployment and broader adoption. Namely, the system's current state presents several critical vulnerabilities: + +1. **Open Access Architecture**: All Directory services accept connections without identity verification, creating unrestricted access to system resources +2. **Unencrypted Communication**: Inter-service and client-service communication occurs over plain gRPC without transport security +3. **Absence of Access Controls**: No authorization policies exist to govern resource access or operation permissions +4. **Compliance and Audit Gaps**: Lack of identity tracking prevents audit trail generation and regulatory compliance + +### Production Security Requirements + +Enterprise deployment demands a comprehensive security framework addressing: + +- **Identity Management**: Cryptographic identity for all workloads and users with automatic credential lifecycle management +- **Authentication Framework**: Support for both programmatic access (X.509 certificates) and human interaction (JWT tokens) with seamless integration +- **Authorization System**: Fine-grained access control based on cryptographic identity verification and policy enforcement +- **Federation Capabilities**: Cross-domain authentication enabling multi-tenant deployments, partner integrations, and environment isolation +- **Operational Security**: Automatic credential rotation, comprehensive audit logging, and zero-trust architecture principles + +## Proposal + +This RFC proposes implementing SPIFFE as the foundational security framework for the Directory project, establishing comprehensive authentication and authorization capabilities while enabling secure federation across organizational and environmental boundaries. + +### SPIFFE Protocol Advantages + +SPIFFE addresses the Directory project's security requirements through several key capabilities, with particular strength in distributed system environments: + +- **Standardized Workload Identity**: Provides cryptographically verifiable identity for every workload without manual certificate management, ensuring consistent authentication across distributed service instances +- **Automatic Credential Lifecycle**: Eliminates operational overhead through automated certificate issuance, renewal, and revocation, critical for managing authentication at scale in distributed architectures +- **Distributed Trust Architecture**: Agent-based design provides local authentication caching and validation, ensuring continued operation during network partitions common in distributed systems +- **Federation Architecture**: Enables secure cross-domain communication through standardized trust establishment mechanisms, supporting multi-region and partner organization integration +- **Network Partition Tolerance**: Services maintain authentication capabilities through local SPIRE agents even when connectivity to central authority is lost, essential for distributed system resilience +- **Geographic Distribution Support**: Hierarchical SPIRE server deployment optimizes authentication latency across multiple data centers and cloud regions +- **Zero-Trust Foundation**: Establishes cryptographic verification for every transaction, supporting modern security architecture principles in complex distributed topologies +- **Ecosystem Integration**: Seamless compatibility with Kubernetes, service mesh technologies, and cloud-native infrastructure across distributed environments + + +### Implementation Strategy + +The security framework will provide universal authentication coverage across all Directory components, eliminating the current insecure communication patterns while addressing the unique challenges of distributed system architectures. + +- **Distributed Authentication**: SPIRE agents deployed alongside each service instance provide local certificate caching and validation, ensuring authentication operations continue during network partitions and reducing latency in geographically distributed deployments. +- **Unified Authentication Layer**: Implement support for both X.509-SVID and JWT-SVID authentication methods, enabling optimal security approaches for different interaction patterns. +- **Policy-Based Authorization**: Deploy fine-grained authorization controls based on cryptographic identity verification and configurable access policies. +- **Hierarchical Trust Management**: Regional SPIRE servers establish trust hierarchies that optimize for network topology while maintaining centralized policy control, enabling efficient authentication across multiple data centers and cloud regions. +- **Resilient Operation**: The agent-based architecture ensures that services maintain authentication capabilities even when connectivity to central authority is temporarily lost, critical for the Directory project's distributed service mesh. +- **Federation Framework**: Enable secure cross-domain authentication supporting multi-tenant deployments, partner integrations, and environment isolation. + +Authorization policies will be enforced based on cryptographic identity verification, supporting both service-to-service and user-initiated operations. The federation capability will enable secure communication across different trust domains, supporting complex deployment scenarios including multi-cloud and partner organization integration. + +--- + +## Implementation + +### Architecture Overview + +The SPIFFE integration establishes a comprehensive security framework supporting federation across multiple trust domains while providing unified authentication for all client types. + +```mermaid +architecture-beta + group prod_domain(cloud)[Production Trust Domain] + + service spire_server_prod(server)[SPIRE Server] in prod_domain + service store_service_prod(database)[Store Service] in prod_domain + service routing_service_prod(internet)[Routing Service] in prod_domain + service search_service_prod(disk)[Search Service] in prod_domain + + group dev_domain(cloud)[Development Trust Domain] + + service spire_server_dev(server)[SPIRE Server] in dev_domain + service cli_client(server)[CLI Client] in dev_domain + service sdk_client(server)[SDK Client] in dev_domain + + group partner_domain(cloud)[Partner Trust Domain] + + service spire_server_partner(server)[SPIRE Server] in partner_domain + service external_service(internet)[External Service] in partner_domain + + spire_server_prod:R -- L:spire_server_dev + spire_server_prod:L -- R:spire_server_partner + + store_service_prod:R -- L:routing_service_prod + store_service_prod:R -- L:search_service_prod + cli_client:R -- L:store_service_prod + sdk_client:R -- L:store_service_prod + external_service:R -- L:store_service_prod +``` + +### Technical Integration + +- **Server Enhancement**: Directory services will integrate the SPIFFE Workload API to automatically obtain and manage cryptographic identities. Authentication interceptors will validate both X.509 certificates and JWT tokens, extracting SPIFFE IDs for authorization decisions. +- **Client Integration**: All clients will be configured to obtain appropriate SPIFFE credentials through the local SPIRE agent. The authentication method (X.509 or JWT) will be determined by operational requirements rather than client type restrictions. +- **Policy Framework**: Authorization decisions will be based on SPIFFE ID verification and configurable access policies, supporting both local and federated trust relationships. + +### Federation Architecture + +The multi-domain architecture enables secure communication across organizational boundaries through SPIRE server federation. Trust relationships are established through cryptographic verification, allowing controlled access between different environments and partner organizations while maintaining security isolation. + +## UX + +## User Experience + +### Operational Simplicity + +The SPIFFE integration maintains operational simplicity while introducing enterprise-grade security. All authentication mechanisms operate transparently through local SPIRE agent connections, requiring minimal configuration changes to existing workflows. + +Users and applications receive automatic identity provisioning without manual certificate management. The authentication process becomes seamless through standardized SPIFFE protocols, ensuring security without operational complexity. + +### Infrastructure Requirements + +- **SPIRE Server Deployment**: Central identity authority managing certificate issuance and trust relationships. Deployed with high availability configuration and persistent storage for production environments. +- **SPIRE Agent Distribution**: Lightweight agents deployed alongside Directory services and client applications, providing seamless identity management without application code changes. +- **Trust Domain Configuration**: Establishes the security boundary and identity namespace for the Directory ecosystem, with support for federation with external partners and environments. + +### Operational Integration + +- **Service Enhancement**: Directory services integrate with the local SPIRE agent through standard Unix socket connections, requiring minimal configuration changes to existing deployment manifests. +- **Client Configuration**: All client types receive automatic identity provisioning through environment-specific SPIRE agent connections, eliminating manual certificate management. +- **Federation Enablement**: Cross-environment access is established through SPIRE server federation, allowing secure communication between development, staging, and production environments without compromising security isolation. + +--- + +## Alternative Solutions + +### 1. Service Mesh (Istio/Linkerd) with mTLS + +**Pros:** +- Automatic mTLS between services +- Rich traffic management features +- Mature ecosystem and tooling + +**Cons:** +- Significant operational overhead +- Requires complete service mesh deployment +- Less granular identity control +- Vendor lock-in considerations + +**Decision:** SPIFFE provides identity without requiring full service mesh adoption, allowing incremental deployment. + +### 2. HashiCorp Vault with PKI + +**Pros:** +- Comprehensive secret management +- Established enterprise adoption +- Rich policy engine + +**Cons:** +- More complex than needed for identity only +- Requires Vault infrastructure management +- Less standardized workload identity +- Additional licensing considerations + +**Decision:** SPIFFE is purpose-built for workload identity and offers better standardization. + +### 3. Custom JWT-based System + +**Pros:** +- Simpler implementation +- No external dependencies +- HTTP-native + +**Cons:** +- Lack of automatic rotation +- No cryptographic verification +- Custom security implementation risks +- Limited ecosystem integration + +**Decision:** SPIFFE provides proven, standardized security with automatic rotation. + +### 4. Cloud Provider IAM (AWS IAM, GCP IAM, Azure AD) + +**Pros:** +- Native cloud integration +- Comprehensive permission systems +- Managed service (no infrastructure) + +**Cons:** +- Cloud vendor lock-in +- Limited cross-cloud functionality +- Less granular workload identity +- Complex configuration for service-to-service auth + +**Decision:** SPIFFE provides cloud-agnostic solution with better workload-focused identity. + +### 5. Decentralized Identity Systems (DID/Verifiable Credentials) + +**Pros:** +- Self-sovereign identity principles +- No central authority dependency +- Cryptographic verifiability +- Standards-based (W3C DID specification) + +**Cons:** +- Immature ecosystem for service authentication +- Complex key management and recovery +- Limited enterprise tooling and support +- Performance concerns for high-frequency operations +- Blockchain dependency in many implementations + +**Decision:** While promising for user identity, decentralized systems lack maturity for service-to-service authentication in enterprise environments. + +### 6. Distributed Certificate Authorities (Gossip-based PKI) + +**Pros:** +- No single point of failure +- Distributed trust establishment +- Resilient to network partitions +- Self-healing infrastructure + +**Cons:** +- Complex consensus mechanisms +- Difficult operational troubleshooting +- Potential for certificate conflicts +- Limited standardization and tooling +- Network overhead from gossip protocols + +**Decision:** SPIFFE provides proven distributed trust through federation without requiring complex consensus protocols. + +### Decentralized and Distributed Systems Considerations + +The Directory project's distributed architecture presents unique challenges that influence authentication system selection: + +**Network Partitions**: Directory services must continue operating during network splits. SPIFFE's cached certificates and local validation enable continued operation, while decentralized consensus systems may become unavailable during partitions. + +**Geographic Distribution**: Services deployed across multiple regions require efficient identity verification. SPIFFE's hierarchical trust model with regional SPIRE servers provides low-latency authentication, whereas blockchain-based systems may introduce unacceptable latency. + +**Scalability Requirements**: The Directory project anticipates significant growth in service instances and client connections. SPIFFE's agent-based architecture scales horizontally, while centralized systems create bottlenecks and fully decentralized systems may face performance degradation. + +**Operational Complexity**: Distributed systems require sophisticated monitoring and debugging capabilities. SPIFFE provides centralized logging and metrics through SPIRE servers, while maintaining distributed operation through agents. + +**Trust Establishment**: In distributed environments, trust bootstrapping becomes critical. SPIFFE's attestation mechanisms provide secure, automated identity verification, while decentralized systems often require complex out-of-band trust establishment. + +### Comparison Matrix + +| Solution | Implementation Complexity | Operational Overhead | Security Strength | Vendor Lock-in | Standards Compliance | Distributed System Fit | Network Partition Tolerance | +|----------|-------------------------|---------------------|------------------|----------------|---------------------|------------------------|----------------------------| +| SPIFFE/SPIRE | Medium | Medium | High | None | High (CNCF) | Excellent | High | +| Service Mesh | High | High | High | Medium | Medium | Good | Medium | +| HashiCorp Vault | High | High | High | Medium | Low | Good | Low | +| Custom JWT | Low | Low | Medium | None | None | Poor | Medium | +| Cloud IAM | Medium | Low | Medium | High | Low | Poor | Low | +| Decentralized Identity | High | High | High | None | High (W3C) | Fair | High | +| Distributed CA | Very High | Very High | High | None | Low | Good | Very High | + +### Summary + +1. **Standardization**: CNCF project with industry-wide adoption and proven distributed system patterns +2. **Flexibility**: Works across clouds, on-premises, and hybrid environments with geographic distribution support +3. **Automatic Rotation**: Built-in certificate and token lifecycle management without consensus overhead +4. **Zero-Trust Ready**: Cryptographic identity verification for every workload in distributed architectures +5. **Federation**: Native support for cross-domain authentication enabling multi-region and partner integration +6. **Ecosystem Integration**: Works with Kubernetes, service meshes, and CI/CD systems across distributed environments +7. **Distributed System Optimization**: Agent-based architecture provides network partition tolerance and local caching +8. **Operational Simplicity**: Centralized management with distributed operation, avoiding complex consensus mechanisms diff --git a/docs/rfc/template.md b/docs/rfc/template.md index 357cc1b7d..6f1a41997 100644 --- a/docs/rfc/template.md +++ b/docs/rfc/template.md @@ -1,50 +1,50 @@ -# [RFC] __TITLE__ - -*Note: this RFC template follows HashiCrop RFC format described [here](https://works.hashicorp.com/articles/rfc-template)* - -| | | -| ------------- | ------------------------------------------- | -| **Created** | 2025-03-28 | -| **Status** | **WIP** \| InReview \| Approved \| Obsolete | -| **Owner** | *Github handler for the author* | -| **Approvers** | *Github handler for the approvers* | - ---- - -*The RFC begins with a brief overview. This section should be one or two paragraphs that just explains what the goal of this RFC is going to be, but without diving too deeply into the "why", "why now", "how", etc. Ensure anyone opening the document will form a clear understanding of the RFCs intent from reading this paragraph(s).* - -## Background - -*The next section is the "Background" section. This section should be at least two paragraphs and can take up to a whole page in some cases. The **guiding goal of the background section** is: as a newcomer to this project (new employee, team transfer), can I read the background section and follow any links to get the full context of "why" this change is necessary?* - -## Proposal - -*The next required section is "Proposal" or "Goal". Given the background above, this section proposes a solution. This should be an overview of the "how" for the solution, but for details further sections will be used.* - -### Abandoned Ideas (Optional) - -*As RFCs evolve, it is common that there are ideas that are abandoned. Rather than simply deleting them from the document, you should try to organize them into sections that make it clear they're abandoned while explaining "why" they were abandoned.* - -*When sharing your RFC with others or having someone look back on your RFC in the future, it is common to walk the same path and fall into the same pitfalls that we've since matured from. Abandoned ideas are a way to recognize that path and explain the pitfalls and why they were abandoned.* - ---- - -## Implementation - -*Many RFCs have an "implementation" section which details how the implementation will work. This section should explain the rough API changes (internal and external), package changes, etc. The goal is to give an idea to reviews about the subsystems that require change and the surface area of those changes.* - -*This knowledge can result in recommendations for alternate approaches that perhaps are idiomatic to the project or result in less packages touched. Or, it may result in the realization that the proposed solution in this RFC is too complex given the problem.* - -*For the RFC author, typing out the implementation in a high-level often serves as "[rubber duck debugging](https://en.wikipedia.org/wiki/Rubber_duck_debugging)" and you can catch a lot of issues or unknown unknowns prior to writing any real code.* - -## UX - -*If there are user-impacting changes by this RFC, it is important to have a "UI/UX" section. User-impacting changes include external API changes, configuration format changes, CLI output changes, etc.* - -*This section is effectively the "implementation" section for the user experience. The goal is to explain the changes necessary, any impacts to backwards compatibility, any impacts to normal workflow, etc.* - -*As a reviewer, this section should be checked to see if the proposed changes **feel** like the project in question. For example, if the UX changes are proposing a flag "-foo_bar" but all our flags use hyphens like "-foo-bar", then that is a noteworthy review comment. Further, if the breaking changes are intolerable or there is a way to make a change while preserving compatibility, that should be explored.* - -## UI - -*Will this RFC have implications for the web UI? If so, be sure to collaborate with a frontend engineer and/or product designer. They can add UI design assets (user flows, wireframes, mockups or prototypes) to this document, and if changes are substantial, they may wish to create a separate RFC to dive further into details on the UI changes.* +# [RFC] __TITLE__ + +*Note: this RFC template follows HashiCrop RFC format described [here](https://works.hashicorp.com/articles/rfc-template)* + +| | | +| ------------- | ------------------------------------------- | +| **Created** | 2025-03-28 | +| **Status** | **WIP** \| InReview \| Approved \| Obsolete | +| **Owner** | *Github handler for the author* | +| **Approvers** | *Github handler for the approvers* | + +--- + +*The RFC begins with a brief overview. This section should be one or two paragraphs that just explains what the goal of this RFC is going to be, but without diving too deeply into the "why", "why now", "how", etc. Ensure anyone opening the document will form a clear understanding of the RFCs intent from reading this paragraph(s).* + +## Background + +*The next section is the "Background" section. This section should be at least two paragraphs and can take up to a whole page in some cases. The **guiding goal of the background section** is: as a newcomer to this project (new employee, team transfer), can I read the background section and follow any links to get the full context of "why" this change is necessary?* + +## Proposal + +*The next required section is "Proposal" or "Goal". Given the background above, this section proposes a solution. This should be an overview of the "how" for the solution, but for details further sections will be used.* + +### Abandoned Ideas (Optional) + +*As RFCs evolve, it is common that there are ideas that are abandoned. Rather than simply deleting them from the document, you should try to organize them into sections that make it clear they're abandoned while explaining "why" they were abandoned.* + +*When sharing your RFC with others or having someone look back on your RFC in the future, it is common to walk the same path and fall into the same pitfalls that we've since matured from. Abandoned ideas are a way to recognize that path and explain the pitfalls and why they were abandoned.* + +--- + +## Implementation + +*Many RFCs have an "implementation" section which details how the implementation will work. This section should explain the rough API changes (internal and external), package changes, etc. The goal is to give an idea to reviews about the subsystems that require change and the surface area of those changes.* + +*This knowledge can result in recommendations for alternate approaches that perhaps are idiomatic to the project or result in less packages touched. Or, it may result in the realization that the proposed solution in this RFC is too complex given the problem.* + +*For the RFC author, typing out the implementation in a high-level often serves as "[rubber duck debugging](https://en.wikipedia.org/wiki/Rubber_duck_debugging)" and you can catch a lot of issues or unknown unknowns prior to writing any real code.* + +## UX + +*If there are user-impacting changes by this RFC, it is important to have a "UI/UX" section. User-impacting changes include external API changes, configuration format changes, CLI output changes, etc.* + +*This section is effectively the "implementation" section for the user experience. The goal is to explain the changes necessary, any impacts to backwards compatibility, any impacts to normal workflow, etc.* + +*As a reviewer, this section should be checked to see if the proposed changes **feel** like the project in question. For example, if the UX changes are proposing a flag "-foo_bar" but all our flags use hyphens like "-foo-bar", then that is a noteworthy review comment. Further, if the breaking changes are intolerable or there is a way to make a change while preserving compatibility, that should be explored.* + +## UI + +*Will this RFC have implications for the web UI? If so, be sure to collaborate with a frontend engineer and/or product designer. They can add UI design assets (user flows, wireframes, mockups or prototypes) to this document, and if changes are substantial, they may wish to create a separate RFC to dive further into details on the UI changes.* diff --git a/docs/security-schema.md b/docs/security-schema.md index 3d85b6e26..bae1bee2b 100644 --- a/docs/security-schema.md +++ b/docs/security-schema.md @@ -1,208 +1,208 @@ -# Directory Security Trust Schema - -## Overview - -Directory is a system designed to provide secure, authenticated, and authorized access to services and resources across multiple environments and organizations. It leverages SPIRE (SPIFFE Runtime Environment) to manage workload identities and enable zero-trust security principles. - -SPIRE (SPIFFE Runtime Environment) is an open-source system that provides automated, cryptographically secure identities to workloads in modern infrastructure. It implements the SPIFFE (Secure Production Identity Framework For Everyone) standard, enabling zero-trust security by assigning each workload a unique, verifiable identity (SVID). - -In the Directory project, SPIRE is used to: -- Securely identify and authenticate workloads (services, applications, etc.) -- Enable authentication between services using JWT or X.509 SVIDs -- Support dynamic, scalable, and multi-environment deployments -- Enable interconnectivity between different organizations -- Provide primitives for authorization logic - -## Authentication and Authorization - -### Authentication - -SPIRE provides strong, cryptographically verifiable identities (SPIFFE IDs) to every workload. These identities are used for: -- **Workload Authentication:** Every service, whether running in Kubernetes, on a VM, or on bare metal, receives a unique SPIFFE ID (e.g., `spiffe://dir.example/ns/default/sa/my-service`). -- **Cross-Organization Authentication:** Through federation, workloads from different organizations or clusters can mutually authenticate using their SPIFFE IDs, without the need to implement custom cross-org authentication logic. -- **Secure Communication:** SPIRE issues SVIDs (JWT or X.509) that are used for authentication and encrypted communication. - -**What problem does SPIRE solve?** -- Eliminates the need to build and maintain custom authentication systems for each environment or organization. -- Provides a standard, interoperable identity for every workload, regardless of where it runs. -- Enables secure, automated trust establishment between independent organizations or clusters. - -#### How Directory uses SPIRE for Authentication - -- **Workload Identity:** Each Directory component (API server, clients, etc.) is assigned a SPIFFE ID based on its SPIRE Agent configuration. -- **Cross-Organization Authentication:** Directory can authenticate workloads from other organizations or clusters using their SPIFFE IDs, enabling secure communication without custom integration. -- **Secure Communication:** Directory establishes secure connections between components using the SVID certificates issued by SPIRE, ensuring secure and authenticated communication. - -### Authorization - -SPIRE itself does not enforce authorization, but it enables fine-grained authorization by providing strong workload identities: -- **Policy-Based Access Control:** Applications and infrastructure can use SPIFFE IDs to define and enforce access policies (e.g., only workloads with a specific SPIFFE ID can access a sensitive API). -- **Attribute-Based Authorization:** SPIFFE IDs can encode attributes (namespace, service account, environment) that can be used in authorization decisions. -- **Cross-Domain Authorization:** Because SPIRE federates trust domains, authorization policies can include or exclude identities from other organizations or clusters, enabling secure collaboration without manual certificate management. - -**What problem does SPIRE solve?** -- Enables authorization decisions based on workload identity, not just network location or static credentials. -- Simplifies policy management by using a standard identity format (SPIFFE ID) across all environments. -- Makes it possible to securely authorize workloads from federated domains (e.g., partner orgs, multi-cloud, hybrid setups) without custom integration. - -#### How Directory uses SPIRE for Authorization - -- **Policy Enforcement:** Directory components can enforce access control policies based on the SPIFFE IDs of incoming requests, ensuring that only authorized workloads can access specific services or APIs. -- **Attribute-Based Access Control:** Directory can leverage attributes encoded in SPIFFE IDs to implement fine-grained access control policies. -- **Federated Authorization:** Directory can use SPIFFE IDs to authorize workloads from other organizations or clusters, enabling secure collaboration without custom integration. - -Currently, Directory implements static authorization policies based on SPIFFE IDs, with plans to enhance this with dynamic, attribute-based policies in future releases. The Authorization policies are enforced based on external trust domains in the following manner: - -| API Method | Authorized Trust Domains | -| --------------------------------- | ------------------------------------------- | -| `*` | Your own trust domain (e.g., `dir.example`) | -| `Store.Pull` | External Trust domain | -| `Store.Lookup` | External Trust domain | -| `Store.PullReferrer` | External Trust domain | -| `Sync.RequestRegistryCredentials` | External Trust domain | - -## Topology - -The Directory's security trust schema supports both single and federated trust domain topology setup, with SPIRE deployed across various environments: - -### Single Trust Domain - -- **SPIRE Server**: Central authority for the trust domain -- **SPIRE Agents**: Deployed in different environments, connect to the SPIRE Server - - Kubernetes clusters (as DaemonSets or sidecars) - - VMs (as systemd services or processes) - - Bare metal/SSH hosts -- **Workloads**: Obtain identities from local SPIRE Agent via the Workload API -```mermaid -flowchart LR - subgraph Trust_Domain[Trust Domain: example.org] - SPIRE_SERVER[SPIRE Server] - AGENT_K8S1[SPIRE Agent K8s] - AGENT_VM[SPIRE Agent VM] - AGENT_SSH[SPIRE Agent SSH] - SPIRE_SERVER <--> AGENT_K8S1 - SPIRE_SERVER <--> AGENT_VM - SPIRE_SERVER <--> AGENT_SSH - end -``` - -### Federated Trust Domains - -- Each environment (e.g., cluster, organization) runs its own SPIRE Server and agents -- SPIRE Servers exchange bundles to establish federation -- Enables secure, authenticated communication between workloads in different domains -```mermaid -flowchart TD - subgraph DIR_Trust_Domain[Trust Domain: dir.example] - DIR_SPIRE_SERVER[SPIRE Server] - DIR_SPIRE_AGENT1[SPIRE Agent K8s] - DIR_SPIRE_AGENT1[SPIRE Agent VM] - DIR_SPIRE_SERVER <--> DIR_SPIRE_AGENT1 - DIR_SPIRE_SERVER <--> DIR_SPIRE_AGENT2 - end - subgraph DIRCTL_Trust_Domain[Trust Domain: dirctl.example] - DIRCTL_SPIRE_SERVER[SPIRE Server] - DIRCTL_SPIRE_AGENT1[SPIRE Agent k8s] - DIRCTL_SPIRE_AGENT2[SPIRE Agent VM] - DIRCTL_SPIRE_SERVER <--> DIRCTL_SPIRE_AGENT1 - DIRCTL_SPIRE_SERVER <--> DIRCTL_SPIRE_AGENT2 - end - DIR_SPIRE_SERVER <-.->|"Federation (SPIFFE Bundle)"| DIRCTL_SPIRE_SERVER -``` - -## Deployment - -### SPIRE Server - -- Deployed as a Kubernetes service (or on VMs) -- Configured with a unique trust domain name (e.g., `dir.example`) -- Federation enabled to allow cross-domain trust -- Exposes a bundle endpoint for federation - -```bash -export TRUST_DOMAIN="my-service.local" -export SERVICE_TYPE="LoadBalancer" -helm repo add spiffe https://spiffe.github.io/helm-charts-hardened -helm upgrade spire-crds spire-crds \ - --repo https://spiffe.github.io/helm-charts-hardened/ \ - --create-namespace -n spire-crds \ - --install \ - --wait \ - --wait-for-jobs \ - --timeout "15m" -helm upgrade spire spire \ - --repo https://spiffe.github.io/helm-charts-hardened/ \ - --set global.spire.trustDomain="$TRUST_DOMAIN" \ - --set spire-server.service.type="$SERVICE_TYPE" \ - --set spire-server.federation.enabled="true" \ - --set spire-server.controllerManager.watchClassless="true" \ - --namespace spire \ - --create-namespace \ - --install \ - --wait \ - --wait-for-jobs \ - --timeout "15m" -``` - -### SPIRE Agent - -- Deployed as DaemonSets in Kubernetes, or as services on VMs/bare metal -- Connect to the SPIRE Server to obtain workload identities -- Attest workloads and provide SVIDs via the Workload API - -### Directory - -Directory components can be deployed in the trust domain and configured to use SPIRE for identity: - -```yaml -spire: - enabled: true - trustDomain: dir.example - federation: - - trustDomain: dirctl.example - bundleEndpointURL: https://${DIRCTL_BUNDLE_ADDRESS} - bundleEndpointProfile: - type: https_spiffe - endpointSPIFFEID: spiffe://dirctl.example/spire/server - trustDomainBundle: | - ${DIRCTL_BUNDLE_CONTENT} -``` - -## Test Example - -- Two Kubernetes Kind clusters are created (one for each trust domain) -- SPIRE Servers and Agents are deployed in each cluster -- Federation is established between the clusters -- Directory services (DIR API Server, DIRCTL Client Internal, DIRCTL Client External) are deployed and communicate securely using SPIFFE identities - -```mermaid -flowchart TD - subgraph DIR_Trust_Domain[DIR: dir.example] - DIR_SPIRE_SERVER[SPIRE Server] - DIR_API_SERVER[DIR API Server] - DIRCTL_API_CLIENT[DIRCTL Admin Client] - DIR_SPIRE_AGENT1[SPIRE Agent K8s] - DIR_SPIRE_SERVER <--> DIR_SPIRE_AGENT1 - DIR_SPIRE_AGENT1 -->|"Workload API"| DIR_API_SERVER - DIR_SPIRE_AGENT1 -->|"Workload API"| DIRCTL_API_CLIENT - DIRCTL_API_CLIENT -->|"API Call"| DIR_API_SERVER - end - subgraph DIRCTL_Trust_Domain[DIRCTL: dirctl.example] - DIRCTL_SPIRE_SERVER[SPIRE Server] - DIRCTL_CLIENT[DIRCTL Client] - DIRCTL_SPIRE_AGENT1[SPIRE Agent K8s] - DIRCTL_SPIRE_SERVER <--> DIRCTL_SPIRE_AGENT1 - DIRCTL_SPIRE_AGENT1 -->|"Workload API"| DIRCTL_CLIENT - end - DIR_SPIRE_SERVER <-.->|"Federation (SPIFFE Bundle)"| DIRCTL_SPIRE_SERVER - DIRCTL_CLIENT -->|"API Calls"| DIR_API_SERVER -``` - -**Deployment Tasks:** -```bash -sudo task test:spire # Deploys the full federation setup -task test:spire:cleanup # Cleans up the test environment -``` ---- - -For more details, see the [SPIRE Documentation](https://spiffe.io/docs/latest/spiffe-about/overview/) and [SPIRE Federation Guide](https://spiffe.io/docs/latest/spire-helm-charts-hardened-advanced/federation/). +# Directory Security Trust Schema + +## Overview + +Directory is a system designed to provide secure, authenticated, and authorized access to services and resources across multiple environments and organizations. It leverages SPIRE (SPIFFE Runtime Environment) to manage workload identities and enable zero-trust security principles. + +SPIRE (SPIFFE Runtime Environment) is an open-source system that provides automated, cryptographically secure identities to workloads in modern infrastructure. It implements the SPIFFE (Secure Production Identity Framework For Everyone) standard, enabling zero-trust security by assigning each workload a unique, verifiable identity (SVID). + +In the Directory project, SPIRE is used to: +- Securely identify and authenticate workloads (services, applications, etc.) +- Enable authentication between services using JWT or X.509 SVIDs +- Support dynamic, scalable, and multi-environment deployments +- Enable interconnectivity between different organizations +- Provide primitives for authorization logic + +## Authentication and Authorization + +### Authentication + +SPIRE provides strong, cryptographically verifiable identities (SPIFFE IDs) to every workload. These identities are used for: +- **Workload Authentication:** Every service, whether running in Kubernetes, on a VM, or on bare metal, receives a unique SPIFFE ID (e.g., `spiffe://dir.example/ns/default/sa/my-service`). +- **Cross-Organization Authentication:** Through federation, workloads from different organizations or clusters can mutually authenticate using their SPIFFE IDs, without the need to implement custom cross-org authentication logic. +- **Secure Communication:** SPIRE issues SVIDs (JWT or X.509) that are used for authentication and encrypted communication. + +**What problem does SPIRE solve?** +- Eliminates the need to build and maintain custom authentication systems for each environment or organization. +- Provides a standard, interoperable identity for every workload, regardless of where it runs. +- Enables secure, automated trust establishment between independent organizations or clusters. + +#### How Directory uses SPIRE for Authentication + +- **Workload Identity:** Each Directory component (API server, clients, etc.) is assigned a SPIFFE ID based on its SPIRE Agent configuration. +- **Cross-Organization Authentication:** Directory can authenticate workloads from other organizations or clusters using their SPIFFE IDs, enabling secure communication without custom integration. +- **Secure Communication:** Directory establishes secure connections between components using the SVID certificates issued by SPIRE, ensuring secure and authenticated communication. + +### Authorization + +SPIRE itself does not enforce authorization, but it enables fine-grained authorization by providing strong workload identities: +- **Policy-Based Access Control:** Applications and infrastructure can use SPIFFE IDs to define and enforce access policies (e.g., only workloads with a specific SPIFFE ID can access a sensitive API). +- **Attribute-Based Authorization:** SPIFFE IDs can encode attributes (namespace, service account, environment) that can be used in authorization decisions. +- **Cross-Domain Authorization:** Because SPIRE federates trust domains, authorization policies can include or exclude identities from other organizations or clusters, enabling secure collaboration without manual certificate management. + +**What problem does SPIRE solve?** +- Enables authorization decisions based on workload identity, not just network location or static credentials. +- Simplifies policy management by using a standard identity format (SPIFFE ID) across all environments. +- Makes it possible to securely authorize workloads from federated domains (e.g., partner orgs, multi-cloud, hybrid setups) without custom integration. + +#### How Directory uses SPIRE for Authorization + +- **Policy Enforcement:** Directory components can enforce access control policies based on the SPIFFE IDs of incoming requests, ensuring that only authorized workloads can access specific services or APIs. +- **Attribute-Based Access Control:** Directory can leverage attributes encoded in SPIFFE IDs to implement fine-grained access control policies. +- **Federated Authorization:** Directory can use SPIFFE IDs to authorize workloads from other organizations or clusters, enabling secure collaboration without custom integration. + +Currently, Directory implements static authorization policies based on SPIFFE IDs, with plans to enhance this with dynamic, attribute-based policies in future releases. The Authorization policies are enforced based on external trust domains in the following manner: + +| API Method | Authorized Trust Domains | +| --------------------------------- | ------------------------------------------- | +| `*` | Your own trust domain (e.g., `dir.example`) | +| `Store.Pull` | External Trust domain | +| `Store.Lookup` | External Trust domain | +| `Store.PullReferrer` | External Trust domain | +| `Sync.RequestRegistryCredentials` | External Trust domain | + +## Topology + +The Directory's security trust schema supports both single and federated trust domain topology setup, with SPIRE deployed across various environments: + +### Single Trust Domain + +- **SPIRE Server**: Central authority for the trust domain +- **SPIRE Agents**: Deployed in different environments, connect to the SPIRE Server + - Kubernetes clusters (as DaemonSets or sidecars) + - VMs (as systemd services or processes) + - Bare metal/SSH hosts +- **Workloads**: Obtain identities from local SPIRE Agent via the Workload API +```mermaid +flowchart LR + subgraph Trust_Domain[Trust Domain: example.org] + SPIRE_SERVER[SPIRE Server] + AGENT_K8S1[SPIRE Agent K8s] + AGENT_VM[SPIRE Agent VM] + AGENT_SSH[SPIRE Agent SSH] + SPIRE_SERVER <--> AGENT_K8S1 + SPIRE_SERVER <--> AGENT_VM + SPIRE_SERVER <--> AGENT_SSH + end +``` + +### Federated Trust Domains + +- Each environment (e.g., cluster, organization) runs its own SPIRE Server and agents +- SPIRE Servers exchange bundles to establish federation +- Enables secure, authenticated communication between workloads in different domains +```mermaid +flowchart TD + subgraph DIR_Trust_Domain[Trust Domain: dir.example] + DIR_SPIRE_SERVER[SPIRE Server] + DIR_SPIRE_AGENT1[SPIRE Agent K8s] + DIR_SPIRE_AGENT1[SPIRE Agent VM] + DIR_SPIRE_SERVER <--> DIR_SPIRE_AGENT1 + DIR_SPIRE_SERVER <--> DIR_SPIRE_AGENT2 + end + subgraph DIRCTL_Trust_Domain[Trust Domain: dirctl.example] + DIRCTL_SPIRE_SERVER[SPIRE Server] + DIRCTL_SPIRE_AGENT1[SPIRE Agent k8s] + DIRCTL_SPIRE_AGENT2[SPIRE Agent VM] + DIRCTL_SPIRE_SERVER <--> DIRCTL_SPIRE_AGENT1 + DIRCTL_SPIRE_SERVER <--> DIRCTL_SPIRE_AGENT2 + end + DIR_SPIRE_SERVER <-.->|"Federation (SPIFFE Bundle)"| DIRCTL_SPIRE_SERVER +``` + +## Deployment + +### SPIRE Server + +- Deployed as a Kubernetes service (or on VMs) +- Configured with a unique trust domain name (e.g., `dir.example`) +- Federation enabled to allow cross-domain trust +- Exposes a bundle endpoint for federation + +```bash +export TRUST_DOMAIN="my-service.local" +export SERVICE_TYPE="LoadBalancer" +helm repo add spiffe https://spiffe.github.io/helm-charts-hardened +helm upgrade spire-crds spire-crds \ + --repo https://spiffe.github.io/helm-charts-hardened/ \ + --create-namespace -n spire-crds \ + --install \ + --wait \ + --wait-for-jobs \ + --timeout "15m" +helm upgrade spire spire \ + --repo https://spiffe.github.io/helm-charts-hardened/ \ + --set global.spire.trustDomain="$TRUST_DOMAIN" \ + --set spire-server.service.type="$SERVICE_TYPE" \ + --set spire-server.federation.enabled="true" \ + --set spire-server.controllerManager.watchClassless="true" \ + --namespace spire \ + --create-namespace \ + --install \ + --wait \ + --wait-for-jobs \ + --timeout "15m" +``` + +### SPIRE Agent + +- Deployed as DaemonSets in Kubernetes, or as services on VMs/bare metal +- Connect to the SPIRE Server to obtain workload identities +- Attest workloads and provide SVIDs via the Workload API + +### Directory + +Directory components can be deployed in the trust domain and configured to use SPIRE for identity: + +```yaml +spire: + enabled: true + trustDomain: dir.example + federation: + - trustDomain: dirctl.example + bundleEndpointURL: https://${DIRCTL_BUNDLE_ADDRESS} + bundleEndpointProfile: + type: https_spiffe + endpointSPIFFEID: spiffe://dirctl.example/spire/server + trustDomainBundle: | + ${DIRCTL_BUNDLE_CONTENT} +``` + +## Test Example + +- Two Kubernetes Kind clusters are created (one for each trust domain) +- SPIRE Servers and Agents are deployed in each cluster +- Federation is established between the clusters +- Directory services (DIR API Server, DIRCTL Client Internal, DIRCTL Client External) are deployed and communicate securely using SPIFFE identities + +```mermaid +flowchart TD + subgraph DIR_Trust_Domain[DIR: dir.example] + DIR_SPIRE_SERVER[SPIRE Server] + DIR_API_SERVER[DIR API Server] + DIRCTL_API_CLIENT[DIRCTL Admin Client] + DIR_SPIRE_AGENT1[SPIRE Agent K8s] + DIR_SPIRE_SERVER <--> DIR_SPIRE_AGENT1 + DIR_SPIRE_AGENT1 -->|"Workload API"| DIR_API_SERVER + DIR_SPIRE_AGENT1 -->|"Workload API"| DIRCTL_API_CLIENT + DIRCTL_API_CLIENT -->|"API Call"| DIR_API_SERVER + end + subgraph DIRCTL_Trust_Domain[DIRCTL: dirctl.example] + DIRCTL_SPIRE_SERVER[SPIRE Server] + DIRCTL_CLIENT[DIRCTL Client] + DIRCTL_SPIRE_AGENT1[SPIRE Agent K8s] + DIRCTL_SPIRE_SERVER <--> DIRCTL_SPIRE_AGENT1 + DIRCTL_SPIRE_AGENT1 -->|"Workload API"| DIRCTL_CLIENT + end + DIR_SPIRE_SERVER <-.->|"Federation (SPIFFE Bundle)"| DIRCTL_SPIRE_SERVER + DIRCTL_CLIENT -->|"API Calls"| DIR_API_SERVER +``` + +**Deployment Tasks:** +```bash +sudo task test:spire # Deploys the full federation setup +task test:spire:cleanup # Cleans up the test environment +``` +--- + +For more details, see the [SPIRE Documentation](https://spiffe.io/docs/latest/spiffe-about/overview/) and [SPIRE Federation Guide](https://spiffe.io/docs/latest/spire-helm-charts-hardened-advanced/federation/). diff --git a/e2e/README.md b/e2e/README.md index 8b1453663..5ce969a73 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -1,389 +1,389 @@ -# E2E Test Suite Documentation - -This directory contains comprehensive end-to-end tests for the Directory system, organized into separate packages by deployment mode and API type for better isolation and maintainability. - -## 🏗️ Test Suite Architecture - -**Structure**: 3 separate test suites with 103+ test cases organized by deployment mode and API type - -``` -e2e/ -├── shared/ # package shared - Common utilities -│ ├── config/ # Deployment mode configuration -│ ├── utils/ # CLI helpers, validation utilities -│ └── testdata/ # Test record files with embedding -├── local/ # package local - CLI tests (local mode) -│ ├── local_suite_test.go # TestLocalE2E(t *testing.T) -│ ├── 01_storage_test.go # Storage operations -│ ├── 02_search_test.go # Search functionality -│ ├── 03_routing_test.go # Local routing operations -│ ├── 04_signature_test.go # Signature workflows -│ └── 05_network_cmd_test.go # Network command utilities -├── client/ # package client - Go library tests (local mode) -│ ├── client_suite_test.go # TestClientE2E(t *testing.T) -│ └── 01_client_test.go # Client library APIs -└── network/ # package network - CLI tests (network mode) - ├── network_suite_test.go # TestNetworkE2E(t *testing.T) - ├── cleanup.go # Inter-test cleanup utilities - ├── 01_deploy_test.go # Multi-peer deployment - ├── 02_sync_test.go # Peer synchronization - └── 03_search_test.go # Remote routing search -``` - -## 📦 Test Packages - -### 🏠 **Local Package** (`e2e/local/`) -**Deployment**: Local single node -**Focus**: CLI commands in local deployment mode -**Suite**: `TestLocalE2E(t *testing.T)` - -#### **`01_storage_test.go`** - CLI Storage & Search Operations -**Focus**: Core CLI commands with OASF version compatibility - -**Test Cases:** -- `should successfully push a record` - Tests `dirctl push` with 0.3.1/0.7.0 record formats -- `should successfully pull an existing record` - Tests `dirctl pull` functionality -- `should return identical record when pulled after push` - Validates data integrity across push/pull cycle -- `should push the same record again and return the same cid` - Tests CID determinism -- `should search for records with first skill and return their CID` - Tests general search API (searchv1) with skill queries -- `should search for records with second skill and return their CID` - Validates all skills are preserved during storage -- `should pull a non-existent record and return an error` - Tests error handling for missing records -- `should successfully delete a record` - Tests `dirctl delete` functionality -- `should fail to pull a deleted record` - Validates deletion actually removes records - -**Key Features:** -- OASF version compatibility (0.3.1, 0.7.0) -- JSON data integrity validation -- CID determinism testing -- General search API testing (searchv1, not routing) - -#### **`02_search_test.go`** - Search Functionality with Wildcards -**Focus**: Advanced search patterns and wildcard support - -**Test Cases:** -- Exact match searches (no wildcards) -- Wildcard searches with `*` pattern (name, version, skill, locator, module fields) -- Wildcard searches with `?` pattern (single character matching) -- Wildcard searches with `[]` list patterns (character classes and ranges) -- Mixed wildcard patterns and complex combinations -- Negative tests for non-matching patterns -- Edge cases and special characters - -**Key Features:** -- Comprehensive wildcard pattern testing -- Complex search query validation -- Pattern matching edge cases -- Error handling for invalid patterns - -#### **`03_routing_test.go`** - Local Routing Commands -**Focus**: Complete routing subcommand testing in local environment - -**Test Cases:** -- `should push a record first (prerequisite for publish)` - Setup record for routing tests -- `should publish a record to local routing` - Tests `dirctl routing publish` in local mode -- `should fail to publish non-existent record` - Tests publish error handling -- `should list all local records without filters` - Tests `dirctl routing list` without filters -- `should list record by CID` - Tests `dirctl routing list --cid` functionality -- `should list records by skill filter` - Tests `dirctl routing list --skill` with hierarchical matching -- `should list records by specific skill` - Tests specific skill matching -- `should list records by locator filter` - Tests `dirctl routing list --locator` functionality -- `should list records with multiple filters (AND logic)` - Tests multiple filter combination -- `should return empty results for non-matching skill` - Tests filtering with no results -- `should return empty results for non-existent CID` - Tests CID lookup with helpful messages -- `should respect limit parameter` - Tests `dirctl routing list --limit` functionality -- `should search for local records (but return empty in local mode)` - Tests `dirctl routing search` in local mode -- `should handle search with multiple criteria` - Tests complex search queries in local mode -- `should provide helpful guidance when no remote records found` - Tests search guidance messages -- `should show routing statistics for local records` - Tests `dirctl routing info` command -- `should show helpful tips in routing info` - Tests info command guidance -- `should unpublish a previously published record` - Tests `dirctl routing unpublish` command -- `should fail to unpublish non-existent record` - Tests unpublish error handling -- `should not find unpublished record in local list` - Validates unpublish removes from routing -- `should show empty routing info after unpublish` - Tests info after unpublish -- `should validate routing command help` - Tests `dirctl routing --help` functionality - -**Key Features:** -- Complete routing subcommand coverage -- Local-only routing behavior validation -- Error handling and edge cases -- Command integration testing -- Help and guidance message validation - -#### **`04_signature_test.go`** - Cryptographic Signing Operations -**Focus**: Record signing, verification, and cryptographic operations - -**Test Cases:** -- `should create keys for signing` - Tests key generation for signing -- `should push a record to the store` - Setup record for signing tests -- `should sign a record with a key pair` - Tests `dirctl sign` command -- `should verify a signature with a public key on server side` - Tests server-side signature verification -- `should pull a signature from the store` - Tests signature retrieval -- `should pull a public key from the store` - Tests public key retrieval - -**Key Features:** -- Cryptographic signing workflows -- Key management testing -- Signature verification validation - -#### **`05_network_cmd_test.go`** - Network Command Utilities -**Focus**: Network-specific CLI utilities and key management (local mode) - -**Test Cases:** -- `should generate a peer ID from a valid ED25519 key` - Tests `network info` with existing key -- `should fail with non-existent key file` - Tests error handling for missing keys -- `should fail with empty key path` - Tests validation of key path parameter -- `should generate a new peer ID and save the key to specified output` - Tests `network init` key generation -- `should fail when output directory doesn't exist and cannot be created` - Tests error handling for invalid paths - -**Key Features:** -- Network identity management -- Key generation and validation -- CLI utility testing - -### 📚 **Client Package** (`e2e/client/`) -**Deployment**: Local single node -**Focus**: Go client library API methods -**Suite**: `TestClientE2E(t *testing.T)` - -#### **`01_client_test.go`** - Client Library API Tests -**Focus**: Client library API methods with OASF version compatibility - -**Test Cases:** -- `should push a record to store` - Tests `c.Push()` client method -- `should pull a record from store` - Tests `c.Pull()` client method -- `should publish a record` - Tests `c.Publish()` routing method -- `should list published record by one label` - Tests `c.List()` with single query -- `should list published record by multiple labels` - Tests `c.List()` with multiple queries (AND logic) -- `should list published record by feature and domain labels` - Tests domain/feature support (currently skipped) -- `should search routing for remote records` - Tests `c.SearchRouting()` method -- `should unpublish a record` - Tests `c.Unpublish()` routing method -- `should not find unpublished record` - Validates unpublish removes routing announcements -- `should delete a record from store` - Tests `c.Delete()` storage method -- `should not find deleted record in store` - Validates delete removes from storage - -**Key Features:** -- Direct client library API testing -- Routing API validation (publish, list, unpublish, search) -- OASF version compatibility (0.3.1, 0.7.0) -- RecordQuery API testing - -### 🌐 **Network Package** (`e2e/network/`) -**Deployment**: Network with multiple peers -**Focus**: CLI commands in network deployment mode with proper test isolation -**Suite**: `TestNetworkE2E(t *testing.T)` - -#### **`01_deploy_test.go`** - Multi-Peer Routing Operations -**Focus**: Multi-peer routing, DHT operations, local vs remote behavior - -**Test Cases:** -- `should push record_v070.json to peer 1` - Tests storage on specific peer -- `should pull record_v070.json from peer 1` - Tests local retrieval -- `should fail to pull record_v070.json from peer 2` - Validates records are peer-specific -- `should publish record_v070.json to the network on peer 1` - Tests DHT announcement -- `should fail publish record_v070.json to the network on peer 2` - Tests publish validation -- `should list local records correctly (List is local-only)` - Tests local-only list behavior -- `should list by skill correctly on local vs remote peers` - Validates local vs remote filtering -- `should show routing info statistics` - Tests routing statistics command -- `should discover remote records via routing search` - Tests network-wide discovery - -**Key Features:** -- Multi-peer DHT testing -- Local vs remote record validation -- Network announcement and discovery -- Complete routing subcommand testing -- **Cleanup**: `DeferCleanup` ensures clean state for subsequent tests - -#### **`02_sync_test.go`** - Peer-to-Peer Synchronization -**Focus**: Sync service operations, peer-to-peer data replication - -**Test Cases:** -- `should accept valid remote URL format` - Tests sync creation with remote URLs -- `should execute without arguments and return a list with the created sync` - Tests `sync list` command -- `should accept a sync ID argument and return the sync status` - Tests `sync status` command -- `should accept a sync ID argument and delete the sync` - Tests `sync delete` command -- `should return deleted status` - Validates sync deletion -- `should push record_v070_sync_v4.json to peer 1` - Setup for sync testing -- `should publish record_v070_sync_v4.json` - Tests routing publish for sync records -- `should push record_v070_sync_v5.json to peer 1` - Setup second record for multi-peer sync -- `should publish record_v070_sync_v5.json` - Tests routing publish for second record -- `should fail to pull record_v070_sync_v4.json from peer 2` - Validates initial isolation -- `should create sync from peer 1 to peer 2` - Tests sync creation between peers -- `should list the sync` - Tests sync listing on target peer -- `should wait for sync to complete` - Tests sync completion monitoring -- `should succeed to pull record_v070_sync_v4.json from peer 2 after sync` - Validates sync transferred data -- `should succeed to search for record_v070_sync_v4.json from peer 2 after sync` - Tests search after sync -- `should verify the record_v070_sync_v4.json from peer 2 after sync` - Tests verification after sync -- `should delete sync from peer 2` - Tests sync cleanup -- `should wait for delete to complete` - Tests sync deletion completion -- `should create sync from peer 1 to peer 3 using routing search piped to sync create` - Tests advanced sync creation with routing search -- `should wait for sync to complete` - Tests sync completion for peer 3 -- `should succeed to pull record_v070_sync_v5.json from peer 3 after sync` - Validates selective sync (Audio skill) -- `should fail to pull record_v070_sync_v4.json from peer 3 after sync` - Validates sync filtering by skills - -**Key Features:** -- Peer-to-peer synchronization testing -- Sync lifecycle management -- Data replication validation -- Multi-peer sync scenarios (peer 1 → peer 2, peer 1 → peer 3) -- Selective sync based on routing search and skill filtering -- Uses general search API (searchv1, not routing) -- **Cleanup**: `DeferCleanup` ensures clean state for subsequent tests - -#### **`03_search_test.go`** - Remote Routing Search with OR Logic -**Focus**: Remote routing search functionality with OR logic and minMatchScore - -**Test Cases:** -- `should push record_v070.json to peer 1` - Setup record for search tests -- `should publish record_v070.json to routing on peer 1 only` - Creates remote search scenario -- `should verify setup - peer 1 has local record, peer 2 does not` - Validates test setup -- `should debug: test working pattern first (minScore=1)` - Tests basic search functionality -- `should debug: test exact skill matching (minScore=1)` - Tests exact skill searches -- `should debug: test two skills with minScore=2` - Tests multiple skill matching -- `should demonstrate OR logic success - minScore=2 finds record` - Tests OR logic with partial matches -- `should demonstrate threshold filtering - minScore=3 filters out record` - Tests score thresholds -- `should demonstrate single query match - minScore=1 finds record` - Tests single query scenarios -- `should demonstrate all queries match - minScore=2 with 2 real queries` - Tests complete matches -- `should handle minScore=0 (should default to minScore=1)` - Tests edge case handling -- `should handle empty queries with appropriate error` - Tests error handling - -**Key Features:** -- Remote routing search testing (routingv1) -- OR logic and minMatchScore validation -- DHT discovery testing -- Complex search query scenarios -- **Cleanup**: `DeferCleanup` ensures clean state after all tests - -#### **`cleanup.go`** - Inter-Test Cleanup Utilities -**Focus**: Shared cleanup utilities for network test isolation - -**Functions:** -- `CleanupNetworkRecords()` - Removes CIDs from all peers (unpublish + delete) -- `RegisterCIDForCleanup()` - Tracks CIDs for cleanup by test file -- `CleanupAllNetworkTests()` - Comprehensive cleanup for AfterSuite - -**Key Features:** -- **Solves test contamination**: Ensures clean state between test files -- **Multi-peer cleanup**: Removes records from all peers (Peer1, Peer2, Peer3) -- **Dual operations**: Both unpublish (routing) and delete (storage) -- **Graceful handling**: Continues cleanup even if individual operations fail - -## 🚀 **Test Execution Commands:** - -### **All E2E Tests:** -```bash -# Run all e2e tests (client → local CLI → network CLI) -task test:e2e -task e2e -``` - -### **Local Deployment Tests:** -```bash -# Run local tests (client library + CLI with shared infrastructure) -task test:e2e:local -task e2e:local - -# Run individual test suites (with dedicated infrastructure) -task test:e2e:client # Client library tests only -task test:e2e:local:cli # Local CLI tests only -``` - -### **Network Deployment Tests:** -```bash -# Run network tests (multi-peer CLI with proper cleanup) -task test:e2e:network -task e2e:network -``` - -## 📋 **Test Execution Flow:** - -### **🏠 Local Mode Execution:** -``` -task test:e2e:local: -├── 🏗️ Setup local Kubernetes (single node) -├── 🔗 Setup port-forwarding -├── 📚 Run client library tests (Go APIs) -├── ⚙️ Run local CLI tests (dirctl commands) -└── 🧹 Cleanup infrastructure -``` - -### **🌐 Network Mode Execution:** -``` -task test:e2e:network: -├── 🏗️ Setup network Kubernetes (multi-peer) -├── 🔗 Setup port-forwarding -├── 🚀 Run 01_deploy_test.go → DeferCleanup → Clean all peers -├── 🔄 Run 02_sync_test.go → DeferCleanup → Clean all peers -├── 🔍 Run 03_search_test.go → DeferCleanup → Clean all peers -└── 🧹 Cleanup infrastructure -``` - -## 🎯 **Package Organization Benefits:** - -### **✅ True Isolation:** -- **Local vs Network**: Separate Go packages prevent cross-contamination -- **CLI vs Client**: Different test suites for different API types -- **Inter-test cleanup**: Network tests clean up between files using `cleanup.go` - -### **✅ Maintainability:** -- **Focused packages**: Each package has clear responsibility -- **Numbered files**: Predictable execution order within packages -- **Shared utilities**: Common code in `shared/` package -- **Clean architecture**: Logical separation of concerns - -### **✅ Performance:** -- **Shared infrastructure**: Local tests share single deployment -- **Parallel capability**: Different packages can run independently -- **Efficient cleanup**: Targeted cleanup only where needed - -## 🎯 **Key Test Features:** - -### **✅ Comprehensive Coverage:** -- **103+ test cases** across all major functionality -- **OASF version compatibility** (0.3.1, 0.7.0) -- **Both API types** - Client library and CLI commands -- **Error handling** - Validation of failure scenarios -- **Integration testing** - Multi-step workflows - -### **✅ Search API Testing:** -- **General Search** (searchv1) - Tested in `local/01_storage_test.go` and `network/02_sync_test.go` -- **Routing Search** (routingv1) - Tested in `client/01_client_test.go`, `local/03_routing_test.go`, and `network/` tests -- **Network Discovery** - Multi-peer search scenarios in `network/03_search_test.go` -- **Wildcard Patterns** - Comprehensive pattern testing in `local/02_search_test.go` - -### **✅ Routing Operations:** -- **Complete lifecycle** - Publish → List → Search → Unpublish -- **Local vs Remote** - Clear distinction and validation in network tests -- **Statistics** - Routing info and summary data -- **Error scenarios** - Comprehensive failure case testing -- **Test Isolation** - Proper cleanup between network test files - -### **✅ Architecture Improvements:** -- **Package separation** - True isolation between deployment modes -- **API type separation** - CLI tests vs Go library tests in separate packages -- **Controlled execution** - Numbered files ensure predictable test order -- **Efficient infrastructure** - Shared deployment for compatible test suites -- **Robust cleanup** - Inter-test cleanup prevents contamination - -## 🛠️ **Development Workflow:** - -### **Working on Local Features:** -```bash -# Fast feedback during development -task test:e2e:client # Test Go library changes - -# Full local testing -task test:e2e:local # Test both client and CLI -``` - -### **Working on Network Features:** -```bash -# Test specific network functionality -task test:e2e:network # Test multi-peer scenarios with proper cleanup -``` - -### **Debugging Test Issues:** -```bash -# Run individual test files (with Ginkgo focus) -go test -C ./e2e/network . -ginkgo.focus="Deploy" -go test -C ./e2e/local . -ginkgo.focus="Storage" -``` +# E2E Test Suite Documentation + +This directory contains comprehensive end-to-end tests for the Directory system, organized into separate packages by deployment mode and API type for better isolation and maintainability. + +## 🏗️ Test Suite Architecture + +**Structure**: 3 separate test suites with 103+ test cases organized by deployment mode and API type + +``` +e2e/ +├── shared/ # package shared - Common utilities +│ ├── config/ # Deployment mode configuration +│ ├── utils/ # CLI helpers, validation utilities +│ └── testdata/ # Test record files with embedding +├── local/ # package local - CLI tests (local mode) +│ ├── local_suite_test.go # TestLocalE2E(t *testing.T) +│ ├── 01_storage_test.go # Storage operations +│ ├── 02_search_test.go # Search functionality +│ ├── 03_routing_test.go # Local routing operations +│ ├── 04_signature_test.go # Signature workflows +│ └── 05_network_cmd_test.go # Network command utilities +├── client/ # package client - Go library tests (local mode) +│ ├── client_suite_test.go # TestClientE2E(t *testing.T) +│ └── 01_client_test.go # Client library APIs +└── network/ # package network - CLI tests (network mode) + ├── network_suite_test.go # TestNetworkE2E(t *testing.T) + ├── cleanup.go # Inter-test cleanup utilities + ├── 01_deploy_test.go # Multi-peer deployment + ├── 02_sync_test.go # Peer synchronization + └── 03_search_test.go # Remote routing search +``` + +## 📦 Test Packages + +### 🏠 **Local Package** (`e2e/local/`) +**Deployment**: Local single node +**Focus**: CLI commands in local deployment mode +**Suite**: `TestLocalE2E(t *testing.T)` + +#### **`01_storage_test.go`** - CLI Storage & Search Operations +**Focus**: Core CLI commands with OASF version compatibility + +**Test Cases:** +- `should successfully push a record` - Tests `dirctl push` with 0.3.1/0.7.0 record formats +- `should successfully pull an existing record` - Tests `dirctl pull` functionality +- `should return identical record when pulled after push` - Validates data integrity across push/pull cycle +- `should push the same record again and return the same cid` - Tests CID determinism +- `should search for records with first skill and return their CID` - Tests general search API (searchv1) with skill queries +- `should search for records with second skill and return their CID` - Validates all skills are preserved during storage +- `should pull a non-existent record and return an error` - Tests error handling for missing records +- `should successfully delete a record` - Tests `dirctl delete` functionality +- `should fail to pull a deleted record` - Validates deletion actually removes records + +**Key Features:** +- OASF version compatibility (0.3.1, 0.7.0) +- JSON data integrity validation +- CID determinism testing +- General search API testing (searchv1, not routing) + +#### **`02_search_test.go`** - Search Functionality with Wildcards +**Focus**: Advanced search patterns and wildcard support + +**Test Cases:** +- Exact match searches (no wildcards) +- Wildcard searches with `*` pattern (name, version, skill, locator, module fields) +- Wildcard searches with `?` pattern (single character matching) +- Wildcard searches with `[]` list patterns (character classes and ranges) +- Mixed wildcard patterns and complex combinations +- Negative tests for non-matching patterns +- Edge cases and special characters + +**Key Features:** +- Comprehensive wildcard pattern testing +- Complex search query validation +- Pattern matching edge cases +- Error handling for invalid patterns + +#### **`03_routing_test.go`** - Local Routing Commands +**Focus**: Complete routing subcommand testing in local environment + +**Test Cases:** +- `should push a record first (prerequisite for publish)` - Setup record for routing tests +- `should publish a record to local routing` - Tests `dirctl routing publish` in local mode +- `should fail to publish non-existent record` - Tests publish error handling +- `should list all local records without filters` - Tests `dirctl routing list` without filters +- `should list record by CID` - Tests `dirctl routing list --cid` functionality +- `should list records by skill filter` - Tests `dirctl routing list --skill` with hierarchical matching +- `should list records by specific skill` - Tests specific skill matching +- `should list records by locator filter` - Tests `dirctl routing list --locator` functionality +- `should list records with multiple filters (AND logic)` - Tests multiple filter combination +- `should return empty results for non-matching skill` - Tests filtering with no results +- `should return empty results for non-existent CID` - Tests CID lookup with helpful messages +- `should respect limit parameter` - Tests `dirctl routing list --limit` functionality +- `should search for local records (but return empty in local mode)` - Tests `dirctl routing search` in local mode +- `should handle search with multiple criteria` - Tests complex search queries in local mode +- `should provide helpful guidance when no remote records found` - Tests search guidance messages +- `should show routing statistics for local records` - Tests `dirctl routing info` command +- `should show helpful tips in routing info` - Tests info command guidance +- `should unpublish a previously published record` - Tests `dirctl routing unpublish` command +- `should fail to unpublish non-existent record` - Tests unpublish error handling +- `should not find unpublished record in local list` - Validates unpublish removes from routing +- `should show empty routing info after unpublish` - Tests info after unpublish +- `should validate routing command help` - Tests `dirctl routing --help` functionality + +**Key Features:** +- Complete routing subcommand coverage +- Local-only routing behavior validation +- Error handling and edge cases +- Command integration testing +- Help and guidance message validation + +#### **`04_signature_test.go`** - Cryptographic Signing Operations +**Focus**: Record signing, verification, and cryptographic operations + +**Test Cases:** +- `should create keys for signing` - Tests key generation for signing +- `should push a record to the store` - Setup record for signing tests +- `should sign a record with a key pair` - Tests `dirctl sign` command +- `should verify a signature with a public key on server side` - Tests server-side signature verification +- `should pull a signature from the store` - Tests signature retrieval +- `should pull a public key from the store` - Tests public key retrieval + +**Key Features:** +- Cryptographic signing workflows +- Key management testing +- Signature verification validation + +#### **`05_network_cmd_test.go`** - Network Command Utilities +**Focus**: Network-specific CLI utilities and key management (local mode) + +**Test Cases:** +- `should generate a peer ID from a valid ED25519 key` - Tests `network info` with existing key +- `should fail with non-existent key file` - Tests error handling for missing keys +- `should fail with empty key path` - Tests validation of key path parameter +- `should generate a new peer ID and save the key to specified output` - Tests `network init` key generation +- `should fail when output directory doesn't exist and cannot be created` - Tests error handling for invalid paths + +**Key Features:** +- Network identity management +- Key generation and validation +- CLI utility testing + +### 📚 **Client Package** (`e2e/client/`) +**Deployment**: Local single node +**Focus**: Go client library API methods +**Suite**: `TestClientE2E(t *testing.T)` + +#### **`01_client_test.go`** - Client Library API Tests +**Focus**: Client library API methods with OASF version compatibility + +**Test Cases:** +- `should push a record to store` - Tests `c.Push()` client method +- `should pull a record from store` - Tests `c.Pull()` client method +- `should publish a record` - Tests `c.Publish()` routing method +- `should list published record by one label` - Tests `c.List()` with single query +- `should list published record by multiple labels` - Tests `c.List()` with multiple queries (AND logic) +- `should list published record by feature and domain labels` - Tests domain/feature support (currently skipped) +- `should search routing for remote records` - Tests `c.SearchRouting()` method +- `should unpublish a record` - Tests `c.Unpublish()` routing method +- `should not find unpublished record` - Validates unpublish removes routing announcements +- `should delete a record from store` - Tests `c.Delete()` storage method +- `should not find deleted record in store` - Validates delete removes from storage + +**Key Features:** +- Direct client library API testing +- Routing API validation (publish, list, unpublish, search) +- OASF version compatibility (0.3.1, 0.7.0) +- RecordQuery API testing + +### 🌐 **Network Package** (`e2e/network/`) +**Deployment**: Network with multiple peers +**Focus**: CLI commands in network deployment mode with proper test isolation +**Suite**: `TestNetworkE2E(t *testing.T)` + +#### **`01_deploy_test.go`** - Multi-Peer Routing Operations +**Focus**: Multi-peer routing, DHT operations, local vs remote behavior + +**Test Cases:** +- `should push record_v070.json to peer 1` - Tests storage on specific peer +- `should pull record_v070.json from peer 1` - Tests local retrieval +- `should fail to pull record_v070.json from peer 2` - Validates records are peer-specific +- `should publish record_v070.json to the network on peer 1` - Tests DHT announcement +- `should fail publish record_v070.json to the network on peer 2` - Tests publish validation +- `should list local records correctly (List is local-only)` - Tests local-only list behavior +- `should list by skill correctly on local vs remote peers` - Validates local vs remote filtering +- `should show routing info statistics` - Tests routing statistics command +- `should discover remote records via routing search` - Tests network-wide discovery + +**Key Features:** +- Multi-peer DHT testing +- Local vs remote record validation +- Network announcement and discovery +- Complete routing subcommand testing +- **Cleanup**: `DeferCleanup` ensures clean state for subsequent tests + +#### **`02_sync_test.go`** - Peer-to-Peer Synchronization +**Focus**: Sync service operations, peer-to-peer data replication + +**Test Cases:** +- `should accept valid remote URL format` - Tests sync creation with remote URLs +- `should execute without arguments and return a list with the created sync` - Tests `sync list` command +- `should accept a sync ID argument and return the sync status` - Tests `sync status` command +- `should accept a sync ID argument and delete the sync` - Tests `sync delete` command +- `should return deleted status` - Validates sync deletion +- `should push record_v070_sync_v4.json to peer 1` - Setup for sync testing +- `should publish record_v070_sync_v4.json` - Tests routing publish for sync records +- `should push record_v070_sync_v5.json to peer 1` - Setup second record for multi-peer sync +- `should publish record_v070_sync_v5.json` - Tests routing publish for second record +- `should fail to pull record_v070_sync_v4.json from peer 2` - Validates initial isolation +- `should create sync from peer 1 to peer 2` - Tests sync creation between peers +- `should list the sync` - Tests sync listing on target peer +- `should wait for sync to complete` - Tests sync completion monitoring +- `should succeed to pull record_v070_sync_v4.json from peer 2 after sync` - Validates sync transferred data +- `should succeed to search for record_v070_sync_v4.json from peer 2 after sync` - Tests search after sync +- `should verify the record_v070_sync_v4.json from peer 2 after sync` - Tests verification after sync +- `should delete sync from peer 2` - Tests sync cleanup +- `should wait for delete to complete` - Tests sync deletion completion +- `should create sync from peer 1 to peer 3 using routing search piped to sync create` - Tests advanced sync creation with routing search +- `should wait for sync to complete` - Tests sync completion for peer 3 +- `should succeed to pull record_v070_sync_v5.json from peer 3 after sync` - Validates selective sync (Audio skill) +- `should fail to pull record_v070_sync_v4.json from peer 3 after sync` - Validates sync filtering by skills + +**Key Features:** +- Peer-to-peer synchronization testing +- Sync lifecycle management +- Data replication validation +- Multi-peer sync scenarios (peer 1 → peer 2, peer 1 → peer 3) +- Selective sync based on routing search and skill filtering +- Uses general search API (searchv1, not routing) +- **Cleanup**: `DeferCleanup` ensures clean state for subsequent tests + +#### **`03_search_test.go`** - Remote Routing Search with OR Logic +**Focus**: Remote routing search functionality with OR logic and minMatchScore + +**Test Cases:** +- `should push record_v070.json to peer 1` - Setup record for search tests +- `should publish record_v070.json to routing on peer 1 only` - Creates remote search scenario +- `should verify setup - peer 1 has local record, peer 2 does not` - Validates test setup +- `should debug: test working pattern first (minScore=1)` - Tests basic search functionality +- `should debug: test exact skill matching (minScore=1)` - Tests exact skill searches +- `should debug: test two skills with minScore=2` - Tests multiple skill matching +- `should demonstrate OR logic success - minScore=2 finds record` - Tests OR logic with partial matches +- `should demonstrate threshold filtering - minScore=3 filters out record` - Tests score thresholds +- `should demonstrate single query match - minScore=1 finds record` - Tests single query scenarios +- `should demonstrate all queries match - minScore=2 with 2 real queries` - Tests complete matches +- `should handle minScore=0 (should default to minScore=1)` - Tests edge case handling +- `should handle empty queries with appropriate error` - Tests error handling + +**Key Features:** +- Remote routing search testing (routingv1) +- OR logic and minMatchScore validation +- DHT discovery testing +- Complex search query scenarios +- **Cleanup**: `DeferCleanup` ensures clean state after all tests + +#### **`cleanup.go`** - Inter-Test Cleanup Utilities +**Focus**: Shared cleanup utilities for network test isolation + +**Functions:** +- `CleanupNetworkRecords()` - Removes CIDs from all peers (unpublish + delete) +- `RegisterCIDForCleanup()` - Tracks CIDs for cleanup by test file +- `CleanupAllNetworkTests()` - Comprehensive cleanup for AfterSuite + +**Key Features:** +- **Solves test contamination**: Ensures clean state between test files +- **Multi-peer cleanup**: Removes records from all peers (Peer1, Peer2, Peer3) +- **Dual operations**: Both unpublish (routing) and delete (storage) +- **Graceful handling**: Continues cleanup even if individual operations fail + +## 🚀 **Test Execution Commands:** + +### **All E2E Tests:** +```bash +# Run all e2e tests (client → local CLI → network CLI) +task test:e2e +task e2e +``` + +### **Local Deployment Tests:** +```bash +# Run local tests (client library + CLI with shared infrastructure) +task test:e2e:local +task e2e:local + +# Run individual test suites (with dedicated infrastructure) +task test:e2e:client # Client library tests only +task test:e2e:local:cli # Local CLI tests only +``` + +### **Network Deployment Tests:** +```bash +# Run network tests (multi-peer CLI with proper cleanup) +task test:e2e:network +task e2e:network +``` + +## 📋 **Test Execution Flow:** + +### **🏠 Local Mode Execution:** +``` +task test:e2e:local: +├── 🏗️ Setup local Kubernetes (single node) +├── 🔗 Setup port-forwarding +├── 📚 Run client library tests (Go APIs) +├── ⚙️ Run local CLI tests (dirctl commands) +└── 🧹 Cleanup infrastructure +``` + +### **🌐 Network Mode Execution:** +``` +task test:e2e:network: +├── 🏗️ Setup network Kubernetes (multi-peer) +├── 🔗 Setup port-forwarding +├── 🚀 Run 01_deploy_test.go → DeferCleanup → Clean all peers +├── 🔄 Run 02_sync_test.go → DeferCleanup → Clean all peers +├── 🔍 Run 03_search_test.go → DeferCleanup → Clean all peers +└── 🧹 Cleanup infrastructure +``` + +## 🎯 **Package Organization Benefits:** + +### **✅ True Isolation:** +- **Local vs Network**: Separate Go packages prevent cross-contamination +- **CLI vs Client**: Different test suites for different API types +- **Inter-test cleanup**: Network tests clean up between files using `cleanup.go` + +### **✅ Maintainability:** +- **Focused packages**: Each package has clear responsibility +- **Numbered files**: Predictable execution order within packages +- **Shared utilities**: Common code in `shared/` package +- **Clean architecture**: Logical separation of concerns + +### **✅ Performance:** +- **Shared infrastructure**: Local tests share single deployment +- **Parallel capability**: Different packages can run independently +- **Efficient cleanup**: Targeted cleanup only where needed + +## 🎯 **Key Test Features:** + +### **✅ Comprehensive Coverage:** +- **103+ test cases** across all major functionality +- **OASF version compatibility** (0.3.1, 0.7.0) +- **Both API types** - Client library and CLI commands +- **Error handling** - Validation of failure scenarios +- **Integration testing** - Multi-step workflows + +### **✅ Search API Testing:** +- **General Search** (searchv1) - Tested in `local/01_storage_test.go` and `network/02_sync_test.go` +- **Routing Search** (routingv1) - Tested in `client/01_client_test.go`, `local/03_routing_test.go`, and `network/` tests +- **Network Discovery** - Multi-peer search scenarios in `network/03_search_test.go` +- **Wildcard Patterns** - Comprehensive pattern testing in `local/02_search_test.go` + +### **✅ Routing Operations:** +- **Complete lifecycle** - Publish → List → Search → Unpublish +- **Local vs Remote** - Clear distinction and validation in network tests +- **Statistics** - Routing info and summary data +- **Error scenarios** - Comprehensive failure case testing +- **Test Isolation** - Proper cleanup between network test files + +### **✅ Architecture Improvements:** +- **Package separation** - True isolation between deployment modes +- **API type separation** - CLI tests vs Go library tests in separate packages +- **Controlled execution** - Numbered files ensure predictable test order +- **Efficient infrastructure** - Shared deployment for compatible test suites +- **Robust cleanup** - Inter-test cleanup prevents contamination + +## 🛠️ **Development Workflow:** + +### **Working on Local Features:** +```bash +# Fast feedback during development +task test:e2e:client # Test Go library changes + +# Full local testing +task test:e2e:local # Test both client and CLI +``` + +### **Working on Network Features:** +```bash +# Test specific network functionality +task test:e2e:network # Test multi-peer scenarios with proper cleanup +``` + +### **Debugging Test Issues:** +```bash +# Run individual test files (with Ginkgo focus) +go test -C ./e2e/network . -ginkgo.focus="Deploy" +go test -C ./e2e/local . -ginkgo.focus="Storage" +``` diff --git a/e2e/client/01_client_test.go b/e2e/client/01_client_test.go index 07e174532..c35eeedd2 100644 --- a/e2e/client/01_client_test.go +++ b/e2e/client/01_client_test.go @@ -1,324 +1,324 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package client - -import ( - "context" - "strings" - "time" - - corev1 "github.com/agntcy/dir/api/core/v1" - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/client" - "github.com/agntcy/dir/e2e/shared/config" - "github.com/agntcy/dir/e2e/shared/testdata" - "github.com/agntcy/dir/e2e/shared/utils" - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -// convertLabelsToRecordQueries converts legacy label format to RecordQuery format for e2e tests. -func convertLabelsToClientRecordQueries(labels []string) []*routingv1.RecordQuery { - var queries []*routingv1.RecordQuery - - for _, label := range labels { - switch { - case strings.HasPrefix(label, "/skills/"): - skillName := strings.TrimPrefix(label, "/skills/") - queries = append(queries, &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: skillName, - }) - case strings.HasPrefix(label, "/domains/"): - domainName := strings.TrimPrefix(label, "/domains/") - _ = domainName - // Note: domains might need to be mapped to locator or handled differently - // For now, skip domains as they're not in the current RecordQueryType - case strings.HasPrefix(label, "/modules/"): - moduleName := strings.TrimPrefix(label, "/modules/") - queries = append(queries, &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, - Value: moduleName, - }) - case strings.HasPrefix(label, "/locators/"): - locatorType := strings.TrimPrefix(label, "/locators/") - queries = append(queries, &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, - Value: locatorType, - }) - } - } - - return queries -} - -var _ = ginkgo.Describe("Running client end-to-end tests using a local single node deployment", ginkgo.Ordered, ginkgo.Serial, func() { - ginkgo.BeforeEach(func() { - if cfg.DeploymentMode != config.DeploymentModeLocal { - ginkgo.Skip("Skipping test, not in local mode") - } - }) - - var c *client.Client - var ctx context.Context - - ginkgo.BeforeAll(func() { - ctx = context.Background() - - // Create a new client - var err error - c, err = client.New(ctx, client.WithEnvConfig()) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - - ginkgo.AfterAll(func() { - if c != nil { - c.Close() - } - }) - - // Test cases for each OASF version (matches testdata files) - testVersions := []struct { - name string - jsonData []byte - expectedSkillLabels []string - expectedDomainLabel string - expectedModuleLabel string - }{ - { - name: "Record_031_Agent", - jsonData: testdata.ExpectedRecordV031JSON, - expectedSkillLabels: []string{ - "/skills/Natural Language Processing/Text Completion", - "/skills/Natural Language Processing/Problem Solving", - }, - expectedModuleLabel: "", // record_031.json has no modules or extensions - }, - { - name: "Record_070_Agent", - jsonData: testdata.ExpectedRecordV070JSON, - expectedSkillLabels: []string{ - "/skills/natural_language_processing/natural_language_generation/text_completion", - "/skills/natural_language_processing/analytical_reasoning/problem_solving", - }, - expectedDomainLabel: "/domains/life_science/biotechnology", - expectedModuleLabel: "/modules/runtime/model", // From record_070.json modules - }, - { - name: "Record_080_Agent", - jsonData: testdata.ExpectedRecordV080JSON, - expectedSkillLabels: []string{ - "/skills/natural_language_processing/natural_language_generation/text_completion", - "/skills/natural_language_processing/analytical_reasoning/problem_solving", - }, - expectedDomainLabel: "/domains/life_science/biotechnology", - expectedModuleLabel: "/modules/core/llm/model", // From record_080.json modules - }, - } - - // Test each OASF version dynamically - for _, v := range testVersions { - version := v // Capture loop variable by value to avoid closure issues - ginkgo.Context(version.name, ginkgo.Ordered, ginkgo.Serial, func() { - var record *corev1.Record - var canonicalData []byte - var recordRef *corev1.RecordRef // Shared across the business flow - - // Load the record once per version context (inline initialization) - var err error - record, err = corev1.UnmarshalRecord(version.jsonData) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Use canonical marshaling for CID validation - canonicalData, err = record.Marshal() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Step 1: Push - ginkgo.It("should push a record to store", func() { - var err error - recordRef, err = c.Push(ctx, record) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Validate that the returned CID correctly represents the pushed data using canonical marshaling - utils.ValidateCIDAgainstData(recordRef.GetCid(), canonicalData) - }) - - // Step 2: Pull (depends on push) - ginkgo.It("should pull a record from store", func() { - // Pull the record object (using recordRef from push) - pulledRecord, err := c.Pull(ctx, recordRef) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Get canonical data from pulled record for comparison - pulledCanonicalData, err := pulledRecord.Marshal() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Compare pushed and pulled records using canonical data - equal, err := utils.CompareOASFRecords(canonicalData, pulledCanonicalData) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(equal).To(gomega.BeTrue(), "Pushed and pulled records should be identical") - }) - - // Step 3: Publish (depends on push) - ginkgo.It("should publish a record", func() { - err := c.Publish(ctx, &routingv1.PublishRequest{ - Request: &routingv1.PublishRequest_RecordRefs{ - RecordRefs: &routingv1.RecordRefs{ - Refs: []*corev1.RecordRef{recordRef}, - }, - }, - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Wait at least 10 seconds to ensure the record is published. - time.Sleep(15 * time.Second) - }) - - // Step 4: List by one label (depends on publish) - ginkgo.It("should list published record by one label", func() { - // Convert skill label to RecordQuery - queries := convertLabelsToClientRecordQueries([]string{version.expectedSkillLabels[0]}) - - itemsChan, err := c.List(ctx, &routingv1.ListRequest{ - Queries: queries, - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Collect items from the channel using utility. - items := utils.CollectListItems(itemsChan) - - // Validate the response. - gomega.Expect(items).To(gomega.HaveLen(1)) - for _, item := range items { - gomega.Expect(item).NotTo(gomega.BeNil()) - gomega.Expect(item.GetRecordRef().GetCid()).To(gomega.Equal(recordRef.GetCid())) - } - }) - - // Step 5: List by multiple labels (depends on publish) - ginkgo.It("should list published record by multiple labels", func() { - // Convert all skill labels to RecordQueries - queries := convertLabelsToClientRecordQueries(version.expectedSkillLabels) - - itemsChan, err := c.List(ctx, &routingv1.ListRequest{ - Queries: queries, - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Collect items from the channel using utility. - items := utils.CollectListItems(itemsChan) - - // Validate the response. - gomega.Expect(items).To(gomega.HaveLen(1)) - for _, item := range items { - gomega.Expect(item).NotTo(gomega.BeNil()) - gomega.Expect(item.GetRecordRef().GetCid()).To(gomega.Equal(recordRef.GetCid())) - } - }) - - // Step 6: List by module and domain labels (depends on publish) - ginkgo.It("should list published record by module and domain labels", func() { - // ✅ Domain and module queries are now supported in RecordQuery API! - // Test domain query - if version.expectedDomainLabel != "" { - domainQuery := &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, - Value: "life_science/biotechnology", // From record_070.json domains - } - domainItemsChan, err := c.List(ctx, &routingv1.ListRequest{ - Queries: []*routingv1.RecordQuery{domainQuery}, - Limit: utils.Ptr[uint32](10), - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - domainResults := utils.CollectListItems(domainItemsChan) - gomega.Expect(domainResults).ToNot(gomega.BeEmpty(), "Should find record with domain query") - gomega.Expect(domainResults[0].GetRecordRef().GetCid()).To(gomega.Equal(recordRef.GetCid())) - } - - // Test module query using the expected module label from test data - moduleQueries := convertLabelsToClientRecordQueries([]string{version.expectedModuleLabel}) - if len(moduleQueries) > 0 { - moduleItemsChan, err := c.List(ctx, &routingv1.ListRequest{ - Queries: moduleQueries, - Limit: utils.Ptr[uint32](10), - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - moduleResults := utils.CollectListItems(moduleItemsChan) - gomega.Expect(moduleResults).ToNot(gomega.BeEmpty(), "Should find record with module query") - gomega.Expect(moduleResults[0].GetRecordRef().GetCid()).To(gomega.Equal(recordRef.GetCid())) - } - - ginkgo.GinkgoWriter.Printf("✅ SUCCESS: Queries working correctly") - }) - - // Step 7: Search routing for remote records (depends on publish) - ginkgo.It("should search routing for remote records", func() { - // Convert skill labels to RecordQuery format - queries := convertLabelsToClientRecordQueries([]string{version.expectedSkillLabels[0]}) - - searchChan, err := c.SearchRouting(ctx, &routingv1.SearchRequest{ - Queries: queries, - Limit: utils.Ptr[uint32](10), - MinMatchScore: utils.Ptr[uint32](1), - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Collect search results using utility - results := utils.CollectSearchItems(searchChan) - - // For single-peer testing, we should get an empty slice (no remote records) - // This test validates the SearchRouting method works without errors - // In multi-peer e2e tests, we'll test actual remote discovery - gomega.Expect(results).To(gomega.BeEmpty()) // Should be empty slice in local mode - }) - - // Step 8: Unpublish (depends on publish) - ginkgo.It("should unpublish a record", func() { - err := c.Unpublish(ctx, &routingv1.UnpublishRequest{ - Request: &routingv1.UnpublishRequest_RecordRefs{ - RecordRefs: &routingv1.RecordRefs{ - Refs: []*corev1.RecordRef{recordRef}, - }, - }, - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - - // Step 8: Verify unpublished record is not found (depends on unpublish) - ginkgo.It("should not find unpublished record", func() { - // Convert skill label to RecordQuery - queries := convertLabelsToClientRecordQueries([]string{version.expectedSkillLabels[0]}) - - itemsChan, err := c.List(ctx, &routingv1.ListRequest{ - Queries: queries, - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Collect items from the channel using utility. - items := utils.CollectListItems(itemsChan) - - // Validate the response. - gomega.Expect(items).To(gomega.BeEmpty()) - }) - - // Step 9: Delete (depends on previous steps) - ginkgo.It("should delete a record from store", func() { - err := c.Delete(ctx, recordRef) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - - // Step 10: Verify deleted record is not found (depends on delete) - ginkgo.It("should not find deleted record in store", func() { - // Add a small delay to ensure delete operation is fully processed - time.Sleep(100 * time.Millisecond) - - pulledRecord, err := c.Pull(ctx, recordRef) - gomega.Expect(err).To(gomega.HaveOccurred()) - gomega.Expect(pulledRecord).To(gomega.BeNil()) - }) - }) - } -}) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "context" + "strings" + "time" + + corev1 "github.com/agntcy/dir/api/core/v1" + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/client" + "github.com/agntcy/dir/e2e/shared/config" + "github.com/agntcy/dir/e2e/shared/testdata" + "github.com/agntcy/dir/e2e/shared/utils" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +// convertLabelsToRecordQueries converts legacy label format to RecordQuery format for e2e tests. +func convertLabelsToClientRecordQueries(labels []string) []*routingv1.RecordQuery { + var queries []*routingv1.RecordQuery + + for _, label := range labels { + switch { + case strings.HasPrefix(label, "/skills/"): + skillName := strings.TrimPrefix(label, "/skills/") + queries = append(queries, &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: skillName, + }) + case strings.HasPrefix(label, "/domains/"): + domainName := strings.TrimPrefix(label, "/domains/") + _ = domainName + // Note: domains might need to be mapped to locator or handled differently + // For now, skip domains as they're not in the current RecordQueryType + case strings.HasPrefix(label, "/modules/"): + moduleName := strings.TrimPrefix(label, "/modules/") + queries = append(queries, &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, + Value: moduleName, + }) + case strings.HasPrefix(label, "/locators/"): + locatorType := strings.TrimPrefix(label, "/locators/") + queries = append(queries, &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, + Value: locatorType, + }) + } + } + + return queries +} + +var _ = ginkgo.Describe("Running client end-to-end tests using a local single node deployment", ginkgo.Ordered, ginkgo.Serial, func() { + ginkgo.BeforeEach(func() { + if cfg.DeploymentMode != config.DeploymentModeLocal { + ginkgo.Skip("Skipping test, not in local mode") + } + }) + + var c *client.Client + var ctx context.Context + + ginkgo.BeforeAll(func() { + ctx = context.Background() + + // Create a new client + var err error + c, err = client.New(ctx, client.WithEnvConfig()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + ginkgo.AfterAll(func() { + if c != nil { + c.Close() + } + }) + + // Test cases for each OASF version (matches testdata files) + testVersions := []struct { + name string + jsonData []byte + expectedSkillLabels []string + expectedDomainLabel string + expectedModuleLabel string + }{ + { + name: "Record_031_Agent", + jsonData: testdata.ExpectedRecordV031JSON, + expectedSkillLabels: []string{ + "/skills/Natural Language Processing/Text Completion", + "/skills/Natural Language Processing/Problem Solving", + }, + expectedModuleLabel: "", // record_031.json has no modules or extensions + }, + { + name: "Record_070_Agent", + jsonData: testdata.ExpectedRecordV070JSON, + expectedSkillLabels: []string{ + "/skills/natural_language_processing/natural_language_generation/text_completion", + "/skills/natural_language_processing/analytical_reasoning/problem_solving", + }, + expectedDomainLabel: "/domains/life_science/biotechnology", + expectedModuleLabel: "/modules/runtime/model", // From record_070.json modules + }, + { + name: "Record_080_Agent", + jsonData: testdata.ExpectedRecordV080JSON, + expectedSkillLabels: []string{ + "/skills/natural_language_processing/natural_language_generation/text_completion", + "/skills/natural_language_processing/analytical_reasoning/problem_solving", + }, + expectedDomainLabel: "/domains/life_science/biotechnology", + expectedModuleLabel: "/modules/core/llm/model", // From record_080.json modules + }, + } + + // Test each OASF version dynamically + for _, v := range testVersions { + version := v // Capture loop variable by value to avoid closure issues + ginkgo.Context(version.name, ginkgo.Ordered, ginkgo.Serial, func() { + var record *corev1.Record + var canonicalData []byte + var recordRef *corev1.RecordRef // Shared across the business flow + + // Load the record once per version context (inline initialization) + var err error + record, err = corev1.UnmarshalRecord(version.jsonData) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Use canonical marshaling for CID validation + canonicalData, err = record.Marshal() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Step 1: Push + ginkgo.It("should push a record to store", func() { + var err error + recordRef, err = c.Push(ctx, record) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Validate that the returned CID correctly represents the pushed data using canonical marshaling + utils.ValidateCIDAgainstData(recordRef.GetCid(), canonicalData) + }) + + // Step 2: Pull (depends on push) + ginkgo.It("should pull a record from store", func() { + // Pull the record object (using recordRef from push) + pulledRecord, err := c.Pull(ctx, recordRef) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Get canonical data from pulled record for comparison + pulledCanonicalData, err := pulledRecord.Marshal() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Compare pushed and pulled records using canonical data + equal, err := utils.CompareOASFRecords(canonicalData, pulledCanonicalData) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(equal).To(gomega.BeTrue(), "Pushed and pulled records should be identical") + }) + + // Step 3: Publish (depends on push) + ginkgo.It("should publish a record", func() { + err := c.Publish(ctx, &routingv1.PublishRequest{ + Request: &routingv1.PublishRequest_RecordRefs{ + RecordRefs: &routingv1.RecordRefs{ + Refs: []*corev1.RecordRef{recordRef}, + }, + }, + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Wait at least 10 seconds to ensure the record is published. + time.Sleep(15 * time.Second) + }) + + // Step 4: List by one label (depends on publish) + ginkgo.It("should list published record by one label", func() { + // Convert skill label to RecordQuery + queries := convertLabelsToClientRecordQueries([]string{version.expectedSkillLabels[0]}) + + itemsChan, err := c.List(ctx, &routingv1.ListRequest{ + Queries: queries, + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Collect items from the channel using utility. + items := utils.CollectListItems(itemsChan) + + // Validate the response. + gomega.Expect(items).To(gomega.HaveLen(1)) + for _, item := range items { + gomega.Expect(item).NotTo(gomega.BeNil()) + gomega.Expect(item.GetRecordRef().GetCid()).To(gomega.Equal(recordRef.GetCid())) + } + }) + + // Step 5: List by multiple labels (depends on publish) + ginkgo.It("should list published record by multiple labels", func() { + // Convert all skill labels to RecordQueries + queries := convertLabelsToClientRecordQueries(version.expectedSkillLabels) + + itemsChan, err := c.List(ctx, &routingv1.ListRequest{ + Queries: queries, + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Collect items from the channel using utility. + items := utils.CollectListItems(itemsChan) + + // Validate the response. + gomega.Expect(items).To(gomega.HaveLen(1)) + for _, item := range items { + gomega.Expect(item).NotTo(gomega.BeNil()) + gomega.Expect(item.GetRecordRef().GetCid()).To(gomega.Equal(recordRef.GetCid())) + } + }) + + // Step 6: List by module and domain labels (depends on publish) + ginkgo.It("should list published record by module and domain labels", func() { + // ✅ Domain and module queries are now supported in RecordQuery API! + // Test domain query + if version.expectedDomainLabel != "" { + domainQuery := &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, + Value: "life_science/biotechnology", // From record_070.json domains + } + domainItemsChan, err := c.List(ctx, &routingv1.ListRequest{ + Queries: []*routingv1.RecordQuery{domainQuery}, + Limit: utils.Ptr[uint32](10), + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + domainResults := utils.CollectListItems(domainItemsChan) + gomega.Expect(domainResults).ToNot(gomega.BeEmpty(), "Should find record with domain query") + gomega.Expect(domainResults[0].GetRecordRef().GetCid()).To(gomega.Equal(recordRef.GetCid())) + } + + // Test module query using the expected module label from test data + moduleQueries := convertLabelsToClientRecordQueries([]string{version.expectedModuleLabel}) + if len(moduleQueries) > 0 { + moduleItemsChan, err := c.List(ctx, &routingv1.ListRequest{ + Queries: moduleQueries, + Limit: utils.Ptr[uint32](10), + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + moduleResults := utils.CollectListItems(moduleItemsChan) + gomega.Expect(moduleResults).ToNot(gomega.BeEmpty(), "Should find record with module query") + gomega.Expect(moduleResults[0].GetRecordRef().GetCid()).To(gomega.Equal(recordRef.GetCid())) + } + + ginkgo.GinkgoWriter.Printf("✅ SUCCESS: Queries working correctly") + }) + + // Step 7: Search routing for remote records (depends on publish) + ginkgo.It("should search routing for remote records", func() { + // Convert skill labels to RecordQuery format + queries := convertLabelsToClientRecordQueries([]string{version.expectedSkillLabels[0]}) + + searchChan, err := c.SearchRouting(ctx, &routingv1.SearchRequest{ + Queries: queries, + Limit: utils.Ptr[uint32](10), + MinMatchScore: utils.Ptr[uint32](1), + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Collect search results using utility + results := utils.CollectSearchItems(searchChan) + + // For single-peer testing, we should get an empty slice (no remote records) + // This test validates the SearchRouting method works without errors + // In multi-peer e2e tests, we'll test actual remote discovery + gomega.Expect(results).To(gomega.BeEmpty()) // Should be empty slice in local mode + }) + + // Step 8: Unpublish (depends on publish) + ginkgo.It("should unpublish a record", func() { + err := c.Unpublish(ctx, &routingv1.UnpublishRequest{ + Request: &routingv1.UnpublishRequest_RecordRefs{ + RecordRefs: &routingv1.RecordRefs{ + Refs: []*corev1.RecordRef{recordRef}, + }, + }, + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + // Step 8: Verify unpublished record is not found (depends on unpublish) + ginkgo.It("should not find unpublished record", func() { + // Convert skill label to RecordQuery + queries := convertLabelsToClientRecordQueries([]string{version.expectedSkillLabels[0]}) + + itemsChan, err := c.List(ctx, &routingv1.ListRequest{ + Queries: queries, + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Collect items from the channel using utility. + items := utils.CollectListItems(itemsChan) + + // Validate the response. + gomega.Expect(items).To(gomega.BeEmpty()) + }) + + // Step 9: Delete (depends on previous steps) + ginkgo.It("should delete a record from store", func() { + err := c.Delete(ctx, recordRef) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + // Step 10: Verify deleted record is not found (depends on delete) + ginkgo.It("should not find deleted record in store", func() { + // Add a small delay to ensure delete operation is fully processed + time.Sleep(100 * time.Millisecond) + + pulledRecord, err := c.Pull(ctx, recordRef) + gomega.Expect(err).To(gomega.HaveOccurred()) + gomega.Expect(pulledRecord).To(gomega.BeNil()) + }) + }) + } +}) diff --git a/e2e/client/02_events_test.go b/e2e/client/02_events_test.go index 2f110b606..67d5bb238 100644 --- a/e2e/client/02_events_test.go +++ b/e2e/client/02_events_test.go @@ -1,535 +1,535 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package client - -import ( - "context" - "time" - - corev1 "github.com/agntcy/dir/api/core/v1" - eventsv1 "github.com/agntcy/dir/api/events/v1" - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/client" - "github.com/agntcy/dir/client/streaming" - "github.com/agntcy/dir/e2e/shared/config" - "github.com/agntcy/dir/e2e/shared/testdata" - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -// receiveEvent is a helper that receives a single event from a StreamResult. -// It returns the event response or fails the test on error/timeout. -func receiveEvent(ctx context.Context, result streaming.StreamResult[eventsv1.ListenResponse]) *eventsv1.ListenResponse { - select { - case resp := <-result.ResCh(): - return resp - case err := <-result.ErrCh(): - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "stream error") - - return nil - case <-result.DoneCh(): - ginkgo.Fail("stream ended unexpectedly") - - return nil - case <-ctx.Done(): - gomega.Expect(ctx.Err()).NotTo(gomega.HaveOccurred(), "context timeout") - - return nil - } -} - -// tryReceiveEvent attempts to receive an event but doesn't fail on timeout. -// Returns (response, error) where error is non-nil on timeout/completion. -func tryReceiveEvent(ctx context.Context, result streaming.StreamResult[eventsv1.ListenResponse]) (*eventsv1.ListenResponse, error) { - select { - case resp := <-result.ResCh(): - return resp, nil - case err := <-result.ErrCh(): - return nil, err - case <-result.DoneCh(): - // Stream ended normally - not an error, just no more events - //nolint:nilnil - return nil, nil - case <-ctx.Done(): - // Return unwrapped context error so callers can check for context.Canceled - //nolint:wrapcheck - return nil, ctx.Err() - } -} - -var _ = ginkgo.Describe("Event Streaming E2E Tests", ginkgo.Ordered, ginkgo.Serial, func() { - ginkgo.BeforeEach(func() { - if cfg.DeploymentMode != config.DeploymentModeLocal { - ginkgo.Skip("Skipping test, not in local mode") - } - }) - - var c *client.Client - var ctx context.Context - - ginkgo.BeforeAll(func() { - ctx = context.Background() - - // Create a new client - var err error - c, err = client.New(ctx, client.WithEnvConfig()) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - - // Clean up all testdata records after all event tests - // This prevents interfering with existing 01_client_test.go tests - ginkgo.AfterAll(func() { - // Get CIDs for V031, V070, and V080 testdata - v031Record, _ := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) - v070Record, _ := corev1.UnmarshalRecord(testdata.ExpectedRecordV070JSON) - v080Record, _ := corev1.UnmarshalRecord(testdata.ExpectedRecordV080JSON) - - // IMPORTANT: Unpublish first to remove routing labels, then delete from store - if v031Record != nil { - v031Ref := &corev1.RecordRef{Cid: v031Record.GetCid()} - _ = c.Unpublish(context.Background(), &routingv1.UnpublishRequest{ - Request: &routingv1.UnpublishRequest_RecordRefs{ - RecordRefs: &routingv1.RecordRefs{Refs: []*corev1.RecordRef{v031Ref}}, - }, - }) - _ = c.Delete(context.Background(), v031Ref) - } - if v070Record != nil { - v070Ref := &corev1.RecordRef{Cid: v070Record.GetCid()} - _ = c.Unpublish(context.Background(), &routingv1.UnpublishRequest{ - Request: &routingv1.UnpublishRequest_RecordRefs{ - RecordRefs: &routingv1.RecordRefs{Refs: []*corev1.RecordRef{v070Ref}}, - }, - }) - _ = c.Delete(context.Background(), v070Ref) - } - if v080Record != nil { - v080Ref := &corev1.RecordRef{Cid: v080Record.GetCid()} - _ = c.Unpublish(context.Background(), &routingv1.UnpublishRequest{ - Request: &routingv1.UnpublishRequest_RecordRefs{ - RecordRefs: &routingv1.RecordRefs{Refs: []*corev1.RecordRef{v080Ref}}, - }, - }) - _ = c.Delete(context.Background(), v080Ref) - } - - // Close the client - if c != nil { - c.Close() - } - }) - - ginkgo.Context("RECORD_PUSHED events", func() { - ginkgo.It("should receive RECORD_PUSHED event when pushing a record", func() { - // Subscribe to RECORD_PUSHED events - streamCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ - EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Create and push a record in background - go func() { - defer ginkgo.GinkgoRecover() // Required for assertions in goroutines - - time.Sleep(200 * time.Millisecond) - - // Use valid test record from testdata - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - _, pushErr := c.Push(context.Background(), record) - gomega.Expect(pushErr).NotTo(gomega.HaveOccurred()) - }() - - // Receive the event - resp := receiveEvent(streamCtx, result) - gomega.Expect(resp.GetEvent()).NotTo(gomega.BeNil()) - - event := resp.GetEvent() - gomega.Expect(event.GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED)) - gomega.Expect(event.GetResourceId()).NotTo(gomega.BeEmpty()) - gomega.Expect(event.GetLabels()).NotTo(gomega.BeEmpty()) // V031 has skills labels - }) - }) - - ginkgo.Context("RECORD_PUBLISHED events", func() { - ginkgo.It("should receive RECORD_PUBLISHED event when publishing a record", func() { - // First push a record using valid testdata - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV070JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ref, err := c.Push(ctx, record) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Subscribe to RECORD_PUBLISHED events - streamCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ - EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED}, - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Publish the record in background - go func() { - defer ginkgo.GinkgoRecover() // Required for assertions in goroutines - time.Sleep(200 * time.Millisecond) - - publishErr := c.Publish(context.Background(), &routingv1.PublishRequest{ - Request: &routingv1.PublishRequest_RecordRefs{ - RecordRefs: &routingv1.RecordRefs{ - Refs: []*corev1.RecordRef{ref}, - }, - }, - }) - gomega.Expect(publishErr).NotTo(gomega.HaveOccurred()) - - // Wait for async publish to complete - time.Sleep(2 * time.Second) - }() - - // Receive the event - resp := receiveEvent(streamCtx, result) - gomega.Expect(resp.GetEvent()).NotTo(gomega.BeNil()) - - event := resp.GetEvent() - gomega.Expect(event.GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED)) - gomega.Expect(event.GetResourceId()).To(gomega.Equal(ref.GetCid())) - gomega.Expect(event.GetLabels()).NotTo(gomega.BeEmpty()) - }) - }) - - ginkgo.Context("RECORD_DELETED events", func() { - ginkgo.It("should receive RECORD_DELETED event when deleting a record", func() { - // First push a record using valid testdata - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ref, err := c.Push(ctx, record) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Subscribe to RECORD_DELETED events - streamCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ - EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_DELETED}, - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Delete the record in background - go func() { - defer ginkgo.GinkgoRecover() // Required for assertions in goroutines - time.Sleep(200 * time.Millisecond) - - deleteErr := c.Delete(context.Background(), ref) - gomega.Expect(deleteErr).NotTo(gomega.HaveOccurred()) - }() - - // Receive the event - resp := receiveEvent(streamCtx, result) - gomega.Expect(resp.GetEvent()).NotTo(gomega.BeNil()) - - event := resp.GetEvent() - gomega.Expect(event.GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_DELETED)) - gomega.Expect(event.GetResourceId()).To(gomega.Equal(ref.GetCid())) - }) - }) - - ginkgo.Context("Event filtering", func() { - ginkgo.It("should filter events by label", func() { - // Subscribe with label filter (natural_language_processing is in V070 record) - streamCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ - LabelFilters: []string{"/skills/natural_language_processing"}, - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Push a matching record - go func() { - defer ginkgo.GinkgoRecover() // Required for assertions in goroutines - time.Sleep(200 * time.Millisecond) - - // Use V070 test record which has natural_language_processing skills - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV070JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - _, _ = c.Push(context.Background(), record) - }() - - // Should receive the matching event - resp := receiveEvent(streamCtx, result) - gomega.Expect(resp.GetEvent()).NotTo(gomega.BeNil()) - gomega.Expect(resp.GetEvent().GetLabels()).To(gomega.ContainElement(gomega.ContainSubstring("/skills/natural_language_processing"))) - }) - - ginkgo.It("should filter events by CID", func() { - // First push a record using valid testdata - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ref, err := c.Push(ctx, record) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Subscribe with CID filter - streamCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ - CidFilters: []string{ref.GetCid()}, - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Push another record and pull the filtered one - go func() { - defer ginkgo.GinkgoRecover() // Required for assertions in goroutines - time.Sleep(200 * time.Millisecond) - - // Push another record (different CID, should be filtered out) - otherRecord, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV070JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - _, _ = c.Push(context.Background(), otherRecord) - - time.Sleep(100 * time.Millisecond) - - // Pull the filtered record (triggers RECORD_PULLED for the target CID) - _, _ = c.Pull(context.Background(), ref) - }() - - // Should receive only the event for the filtered CID - resp := receiveEvent(streamCtx, result) - gomega.Expect(resp.GetEvent()).NotTo(gomega.BeNil()) - gomega.Expect(resp.GetEvent().GetResourceId()).To(gomega.Equal(ref.GetCid())) - }) - - ginkgo.It("should filter events by event type", func() { - // Subscribe only to PUSHED events (not PULLED or DELETED) - streamCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ - EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Perform multiple operations - go func() { - defer ginkgo.GinkgoRecover() // Required for assertions in goroutines - time.Sleep(200 * time.Millisecond) - - // Use valid testdata - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Push (should trigger PUSHED event - we should receive this) - ref, pushErr := c.Push(context.Background(), record) - gomega.Expect(pushErr).NotTo(gomega.HaveOccurred()) - - time.Sleep(100 * time.Millisecond) - - // Pull (triggers PULLED event - should be filtered out) - _, _ = c.Pull(context.Background(), ref) - }() - - // Should receive only the PUSHED event - resp := receiveEvent(streamCtx, result) - gomega.Expect(resp.GetEvent()).NotTo(gomega.BeNil()) - gomega.Expect(resp.GetEvent().GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED)) - - // Try to receive another event (should timeout - pull event was filtered) - resp2, err := tryReceiveEvent(streamCtx, result) - if err == nil && resp2 != nil { - // If we got another event, it should still be PUSHED (not PULLED) - gomega.Expect(resp2.GetEvent().GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED)) - } - }) - }) - - ginkgo.Context("Multiple subscribers", func() { - ginkgo.It("should deliver events to multiple subscribers", func() { - // Create two subscriptions - streamCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - result1, err1 := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ - EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, - }) - gomega.Expect(err1).NotTo(gomega.HaveOccurred()) - - result2, err2 := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ - EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, - }) - gomega.Expect(err2).NotTo(gomega.HaveOccurred()) - - // Push a record - go func() { - defer ginkgo.GinkgoRecover() // Required for assertions in goroutines - time.Sleep(200 * time.Millisecond) - - // Use valid testdata - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV070JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - _, pushErr := c.Push(context.Background(), record) - gomega.Expect(pushErr).NotTo(gomega.HaveOccurred()) - }() - - // Both streams should receive the same event - resp1 := receiveEvent(streamCtx, result1) - gomega.Expect(resp1.GetEvent().GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED)) - - resp2 := receiveEvent(streamCtx, result2) - gomega.Expect(resp2.GetEvent().GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED)) - - // Both should have same resource ID (non-empty) - gomega.Expect(resp1.GetEvent().GetResourceId()).NotTo(gomega.BeEmpty()) - gomega.Expect(resp1.GetEvent().GetResourceId()).To(gomega.Equal(resp2.GetEvent().GetResourceId())) - }) - }) - - ginkgo.Context("Stream lifecycle", func() { - ginkgo.It("should handle context cancellation gracefully", func() { - cancelCtx, cancelFunc := context.WithCancel(ctx) - - result, err := c.ListenStream(cancelCtx, &eventsv1.ListenRequest{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Cancel context immediately - cancelFunc() - - // Should receive error or completion - _, err = tryReceiveEvent(cancelCtx, result) - gomega.Expect(err).To(gomega.HaveOccurred()) - }) - - ginkgo.It("should receive multiple events in sequence", func() { - streamCtx, cancel := context.WithTimeout(ctx, 15*time.Second) - defer cancel() - - result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ - EventTypes: []eventsv1.EventType{ - eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - eventsv1.EventType_EVENT_TYPE_RECORD_PULLED, - }, - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Perform multiple operations - go func() { - defer ginkgo.GinkgoRecover() // Required for assertions in goroutines - time.Sleep(200 * time.Millisecond) - - // Push first record - record1, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ref1, _ := c.Push(context.Background(), record1) - - time.Sleep(200 * time.Millisecond) - - // Push second record - record2, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV070JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - _, _ = c.Push(context.Background(), record2) - - time.Sleep(200 * time.Millisecond) - - // Pull first record - _, _ = c.Pull(context.Background(), ref1) - }() - - // Receive first PUSHED event - resp1 := receiveEvent(streamCtx, result) - gomega.Expect(resp1.GetEvent().GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED)) - - // Receive second PUSHED event - resp2 := receiveEvent(streamCtx, result) - gomega.Expect(resp2.GetEvent().GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED)) - - // Receive PULLED event - resp3 := receiveEvent(streamCtx, result) - gomega.Expect(resp3.GetEvent().GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_PULLED)) - }) - }) - - ginkgo.Context("Event metadata", func() { - ginkgo.It("should include labels in record events", func() { - streamCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ - EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Push a record with multiple skills - go func() { - defer ginkgo.GinkgoRecover() // Required for assertions in goroutines - time.Sleep(200 * time.Millisecond) - - // Use valid testdata (V031 has multiple skills) - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - _, _ = c.Push(context.Background(), record) - }() - - // Receive event and verify labels - resp := receiveEvent(streamCtx, result) - gomega.Expect(resp.GetEvent().GetLabels()).NotTo(gomega.BeEmpty()) - // V031 has "Natural Language Processing" skills - gomega.Expect(resp.GetEvent().GetLabels()).To(gomega.ContainElement(gomega.ContainSubstring("/skills/"))) - }) - - ginkgo.It("should include timestamp in all events", func() { - streamCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Push a record - go func() { - defer ginkgo.GinkgoRecover() // Required for assertions in goroutines - time.Sleep(200 * time.Millisecond) - - // Use valid testdata - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - _, _ = c.Push(context.Background(), record) - }() - - // Receive event and verify timestamp - resp := receiveEvent(streamCtx, result) - gomega.Expect(resp.GetEvent().GetTimestamp()).NotTo(gomega.BeNil()) - gomega.Expect(resp.GetEvent().GetTimestamp().AsTime()).To(gomega.BeTemporally("~", time.Now(), 5*time.Second)) - }) - }) - - ginkgo.Context("No events scenario", func() { - ginkgo.It("should timeout when no events occur", func() { - // Subscribe with very specific filter that won't match - streamCtx, cancel := context.WithTimeout(ctx, 2*time.Second) - defer cancel() - - result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ - CidFilters: []string{"bafynonexistent123456789"}, - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Try to receive - should eventually timeout - _, err = tryReceiveEvent(streamCtx, result) - gomega.Expect(err).To(gomega.Or( - gomega.Equal(context.DeadlineExceeded), - gomega.MatchError(gomega.ContainSubstring("deadline")), - gomega.MatchError(gomega.ContainSubstring("cancel")), - )) - }) - }) -}) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "context" + "time" + + corev1 "github.com/agntcy/dir/api/core/v1" + eventsv1 "github.com/agntcy/dir/api/events/v1" + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/client" + "github.com/agntcy/dir/client/streaming" + "github.com/agntcy/dir/e2e/shared/config" + "github.com/agntcy/dir/e2e/shared/testdata" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +// receiveEvent is a helper that receives a single event from a StreamResult. +// It returns the event response or fails the test on error/timeout. +func receiveEvent(ctx context.Context, result streaming.StreamResult[eventsv1.ListenResponse]) *eventsv1.ListenResponse { + select { + case resp := <-result.ResCh(): + return resp + case err := <-result.ErrCh(): + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "stream error") + + return nil + case <-result.DoneCh(): + ginkgo.Fail("stream ended unexpectedly") + + return nil + case <-ctx.Done(): + gomega.Expect(ctx.Err()).NotTo(gomega.HaveOccurred(), "context timeout") + + return nil + } +} + +// tryReceiveEvent attempts to receive an event but doesn't fail on timeout. +// Returns (response, error) where error is non-nil on timeout/completion. +func tryReceiveEvent(ctx context.Context, result streaming.StreamResult[eventsv1.ListenResponse]) (*eventsv1.ListenResponse, error) { + select { + case resp := <-result.ResCh(): + return resp, nil + case err := <-result.ErrCh(): + return nil, err + case <-result.DoneCh(): + // Stream ended normally - not an error, just no more events + //nolint:nilnil + return nil, nil + case <-ctx.Done(): + // Return unwrapped context error so callers can check for context.Canceled + //nolint:wrapcheck + return nil, ctx.Err() + } +} + +var _ = ginkgo.Describe("Event Streaming E2E Tests", ginkgo.Ordered, ginkgo.Serial, func() { + ginkgo.BeforeEach(func() { + if cfg.DeploymentMode != config.DeploymentModeLocal { + ginkgo.Skip("Skipping test, not in local mode") + } + }) + + var c *client.Client + var ctx context.Context + + ginkgo.BeforeAll(func() { + ctx = context.Background() + + // Create a new client + var err error + c, err = client.New(ctx, client.WithEnvConfig()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + // Clean up all testdata records after all event tests + // This prevents interfering with existing 01_client_test.go tests + ginkgo.AfterAll(func() { + // Get CIDs for V031, V070, and V080 testdata + v031Record, _ := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) + v070Record, _ := corev1.UnmarshalRecord(testdata.ExpectedRecordV070JSON) + v080Record, _ := corev1.UnmarshalRecord(testdata.ExpectedRecordV080JSON) + + // IMPORTANT: Unpublish first to remove routing labels, then delete from store + if v031Record != nil { + v031Ref := &corev1.RecordRef{Cid: v031Record.GetCid()} + _ = c.Unpublish(context.Background(), &routingv1.UnpublishRequest{ + Request: &routingv1.UnpublishRequest_RecordRefs{ + RecordRefs: &routingv1.RecordRefs{Refs: []*corev1.RecordRef{v031Ref}}, + }, + }) + _ = c.Delete(context.Background(), v031Ref) + } + if v070Record != nil { + v070Ref := &corev1.RecordRef{Cid: v070Record.GetCid()} + _ = c.Unpublish(context.Background(), &routingv1.UnpublishRequest{ + Request: &routingv1.UnpublishRequest_RecordRefs{ + RecordRefs: &routingv1.RecordRefs{Refs: []*corev1.RecordRef{v070Ref}}, + }, + }) + _ = c.Delete(context.Background(), v070Ref) + } + if v080Record != nil { + v080Ref := &corev1.RecordRef{Cid: v080Record.GetCid()} + _ = c.Unpublish(context.Background(), &routingv1.UnpublishRequest{ + Request: &routingv1.UnpublishRequest_RecordRefs{ + RecordRefs: &routingv1.RecordRefs{Refs: []*corev1.RecordRef{v080Ref}}, + }, + }) + _ = c.Delete(context.Background(), v080Ref) + } + + // Close the client + if c != nil { + c.Close() + } + }) + + ginkgo.Context("RECORD_PUSHED events", func() { + ginkgo.It("should receive RECORD_PUSHED event when pushing a record", func() { + // Subscribe to RECORD_PUSHED events + streamCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ + EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Create and push a record in background + go func() { + defer ginkgo.GinkgoRecover() // Required for assertions in goroutines + + time.Sleep(200 * time.Millisecond) + + // Use valid test record from testdata + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + _, pushErr := c.Push(context.Background(), record) + gomega.Expect(pushErr).NotTo(gomega.HaveOccurred()) + }() + + // Receive the event + resp := receiveEvent(streamCtx, result) + gomega.Expect(resp.GetEvent()).NotTo(gomega.BeNil()) + + event := resp.GetEvent() + gomega.Expect(event.GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED)) + gomega.Expect(event.GetResourceId()).NotTo(gomega.BeEmpty()) + gomega.Expect(event.GetLabels()).NotTo(gomega.BeEmpty()) // V031 has skills labels + }) + }) + + ginkgo.Context("RECORD_PUBLISHED events", func() { + ginkgo.It("should receive RECORD_PUBLISHED event when publishing a record", func() { + // First push a record using valid testdata + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV070JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ref, err := c.Push(ctx, record) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Subscribe to RECORD_PUBLISHED events + streamCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ + EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED}, + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Publish the record in background + go func() { + defer ginkgo.GinkgoRecover() // Required for assertions in goroutines + time.Sleep(200 * time.Millisecond) + + publishErr := c.Publish(context.Background(), &routingv1.PublishRequest{ + Request: &routingv1.PublishRequest_RecordRefs{ + RecordRefs: &routingv1.RecordRefs{ + Refs: []*corev1.RecordRef{ref}, + }, + }, + }) + gomega.Expect(publishErr).NotTo(gomega.HaveOccurred()) + + // Wait for async publish to complete + time.Sleep(2 * time.Second) + }() + + // Receive the event + resp := receiveEvent(streamCtx, result) + gomega.Expect(resp.GetEvent()).NotTo(gomega.BeNil()) + + event := resp.GetEvent() + gomega.Expect(event.GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED)) + gomega.Expect(event.GetResourceId()).To(gomega.Equal(ref.GetCid())) + gomega.Expect(event.GetLabels()).NotTo(gomega.BeEmpty()) + }) + }) + + ginkgo.Context("RECORD_DELETED events", func() { + ginkgo.It("should receive RECORD_DELETED event when deleting a record", func() { + // First push a record using valid testdata + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ref, err := c.Push(ctx, record) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Subscribe to RECORD_DELETED events + streamCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ + EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_DELETED}, + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Delete the record in background + go func() { + defer ginkgo.GinkgoRecover() // Required for assertions in goroutines + time.Sleep(200 * time.Millisecond) + + deleteErr := c.Delete(context.Background(), ref) + gomega.Expect(deleteErr).NotTo(gomega.HaveOccurred()) + }() + + // Receive the event + resp := receiveEvent(streamCtx, result) + gomega.Expect(resp.GetEvent()).NotTo(gomega.BeNil()) + + event := resp.GetEvent() + gomega.Expect(event.GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_DELETED)) + gomega.Expect(event.GetResourceId()).To(gomega.Equal(ref.GetCid())) + }) + }) + + ginkgo.Context("Event filtering", func() { + ginkgo.It("should filter events by label", func() { + // Subscribe with label filter (natural_language_processing is in V070 record) + streamCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ + LabelFilters: []string{"/skills/natural_language_processing"}, + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Push a matching record + go func() { + defer ginkgo.GinkgoRecover() // Required for assertions in goroutines + time.Sleep(200 * time.Millisecond) + + // Use V070 test record which has natural_language_processing skills + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV070JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + _, _ = c.Push(context.Background(), record) + }() + + // Should receive the matching event + resp := receiveEvent(streamCtx, result) + gomega.Expect(resp.GetEvent()).NotTo(gomega.BeNil()) + gomega.Expect(resp.GetEvent().GetLabels()).To(gomega.ContainElement(gomega.ContainSubstring("/skills/natural_language_processing"))) + }) + + ginkgo.It("should filter events by CID", func() { + // First push a record using valid testdata + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ref, err := c.Push(ctx, record) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Subscribe with CID filter + streamCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ + CidFilters: []string{ref.GetCid()}, + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Push another record and pull the filtered one + go func() { + defer ginkgo.GinkgoRecover() // Required for assertions in goroutines + time.Sleep(200 * time.Millisecond) + + // Push another record (different CID, should be filtered out) + otherRecord, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV070JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + _, _ = c.Push(context.Background(), otherRecord) + + time.Sleep(100 * time.Millisecond) + + // Pull the filtered record (triggers RECORD_PULLED for the target CID) + _, _ = c.Pull(context.Background(), ref) + }() + + // Should receive only the event for the filtered CID + resp := receiveEvent(streamCtx, result) + gomega.Expect(resp.GetEvent()).NotTo(gomega.BeNil()) + gomega.Expect(resp.GetEvent().GetResourceId()).To(gomega.Equal(ref.GetCid())) + }) + + ginkgo.It("should filter events by event type", func() { + // Subscribe only to PUSHED events (not PULLED or DELETED) + streamCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ + EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Perform multiple operations + go func() { + defer ginkgo.GinkgoRecover() // Required for assertions in goroutines + time.Sleep(200 * time.Millisecond) + + // Use valid testdata + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Push (should trigger PUSHED event - we should receive this) + ref, pushErr := c.Push(context.Background(), record) + gomega.Expect(pushErr).NotTo(gomega.HaveOccurred()) + + time.Sleep(100 * time.Millisecond) + + // Pull (triggers PULLED event - should be filtered out) + _, _ = c.Pull(context.Background(), ref) + }() + + // Should receive only the PUSHED event + resp := receiveEvent(streamCtx, result) + gomega.Expect(resp.GetEvent()).NotTo(gomega.BeNil()) + gomega.Expect(resp.GetEvent().GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED)) + + // Try to receive another event (should timeout - pull event was filtered) + resp2, err := tryReceiveEvent(streamCtx, result) + if err == nil && resp2 != nil { + // If we got another event, it should still be PUSHED (not PULLED) + gomega.Expect(resp2.GetEvent().GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED)) + } + }) + }) + + ginkgo.Context("Multiple subscribers", func() { + ginkgo.It("should deliver events to multiple subscribers", func() { + // Create two subscriptions + streamCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + result1, err1 := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ + EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, + }) + gomega.Expect(err1).NotTo(gomega.HaveOccurred()) + + result2, err2 := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ + EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, + }) + gomega.Expect(err2).NotTo(gomega.HaveOccurred()) + + // Push a record + go func() { + defer ginkgo.GinkgoRecover() // Required for assertions in goroutines + time.Sleep(200 * time.Millisecond) + + // Use valid testdata + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV070JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + _, pushErr := c.Push(context.Background(), record) + gomega.Expect(pushErr).NotTo(gomega.HaveOccurred()) + }() + + // Both streams should receive the same event + resp1 := receiveEvent(streamCtx, result1) + gomega.Expect(resp1.GetEvent().GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED)) + + resp2 := receiveEvent(streamCtx, result2) + gomega.Expect(resp2.GetEvent().GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED)) + + // Both should have same resource ID (non-empty) + gomega.Expect(resp1.GetEvent().GetResourceId()).NotTo(gomega.BeEmpty()) + gomega.Expect(resp1.GetEvent().GetResourceId()).To(gomega.Equal(resp2.GetEvent().GetResourceId())) + }) + }) + + ginkgo.Context("Stream lifecycle", func() { + ginkgo.It("should handle context cancellation gracefully", func() { + cancelCtx, cancelFunc := context.WithCancel(ctx) + + result, err := c.ListenStream(cancelCtx, &eventsv1.ListenRequest{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Cancel context immediately + cancelFunc() + + // Should receive error or completion + _, err = tryReceiveEvent(cancelCtx, result) + gomega.Expect(err).To(gomega.HaveOccurred()) + }) + + ginkgo.It("should receive multiple events in sequence", func() { + streamCtx, cancel := context.WithTimeout(ctx, 15*time.Second) + defer cancel() + + result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ + EventTypes: []eventsv1.EventType{ + eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + eventsv1.EventType_EVENT_TYPE_RECORD_PULLED, + }, + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Perform multiple operations + go func() { + defer ginkgo.GinkgoRecover() // Required for assertions in goroutines + time.Sleep(200 * time.Millisecond) + + // Push first record + record1, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ref1, _ := c.Push(context.Background(), record1) + + time.Sleep(200 * time.Millisecond) + + // Push second record + record2, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV070JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + _, _ = c.Push(context.Background(), record2) + + time.Sleep(200 * time.Millisecond) + + // Pull first record + _, _ = c.Pull(context.Background(), ref1) + }() + + // Receive first PUSHED event + resp1 := receiveEvent(streamCtx, result) + gomega.Expect(resp1.GetEvent().GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED)) + + // Receive second PUSHED event + resp2 := receiveEvent(streamCtx, result) + gomega.Expect(resp2.GetEvent().GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED)) + + // Receive PULLED event + resp3 := receiveEvent(streamCtx, result) + gomega.Expect(resp3.GetEvent().GetType()).To(gomega.Equal(eventsv1.EventType_EVENT_TYPE_RECORD_PULLED)) + }) + }) + + ginkgo.Context("Event metadata", func() { + ginkgo.It("should include labels in record events", func() { + streamCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ + EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Push a record with multiple skills + go func() { + defer ginkgo.GinkgoRecover() // Required for assertions in goroutines + time.Sleep(200 * time.Millisecond) + + // Use valid testdata (V031 has multiple skills) + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + _, _ = c.Push(context.Background(), record) + }() + + // Receive event and verify labels + resp := receiveEvent(streamCtx, result) + gomega.Expect(resp.GetEvent().GetLabels()).NotTo(gomega.BeEmpty()) + // V031 has "Natural Language Processing" skills + gomega.Expect(resp.GetEvent().GetLabels()).To(gomega.ContainElement(gomega.ContainSubstring("/skills/"))) + }) + + ginkgo.It("should include timestamp in all events", func() { + streamCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Push a record + go func() { + defer ginkgo.GinkgoRecover() // Required for assertions in goroutines + time.Sleep(200 * time.Millisecond) + + // Use valid testdata + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + _, _ = c.Push(context.Background(), record) + }() + + // Receive event and verify timestamp + resp := receiveEvent(streamCtx, result) + gomega.Expect(resp.GetEvent().GetTimestamp()).NotTo(gomega.BeNil()) + gomega.Expect(resp.GetEvent().GetTimestamp().AsTime()).To(gomega.BeTemporally("~", time.Now(), 5*time.Second)) + }) + }) + + ginkgo.Context("No events scenario", func() { + ginkgo.It("should timeout when no events occur", func() { + // Subscribe with very specific filter that won't match + streamCtx, cancel := context.WithTimeout(ctx, 2*time.Second) + defer cancel() + + result, err := c.ListenStream(streamCtx, &eventsv1.ListenRequest{ + CidFilters: []string{"bafynonexistent123456789"}, + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Try to receive - should eventually timeout + _, err = tryReceiveEvent(streamCtx, result) + gomega.Expect(err).To(gomega.Or( + gomega.Equal(context.DeadlineExceeded), + gomega.MatchError(gomega.ContainSubstring("deadline")), + gomega.MatchError(gomega.ContainSubstring("cancel")), + )) + }) + }) +}) diff --git a/e2e/client/03_ratelimit_test.go b/e2e/client/03_ratelimit_test.go index eeb62a315..9858e9633 100644 --- a/e2e/client/03_ratelimit_test.go +++ b/e2e/client/03_ratelimit_test.go @@ -1,523 +1,523 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Package client contains end-to-end tests for the client library including rate limiting. -// -// Rate Limiting Test Configuration: -// These tests expect the server to be configured with rate limiting enabled. -// Default configuration from Taskfile (test:e2e:client and test:e2e:local): -// - RATELIMIT_ENABLED: true -// - RATELIMIT_GLOBAL_RPS: 100 (requests per second for all clients) -// - RATELIMIT_GLOBAL_BURST: 200 (burst capacity for all clients) -// -// Note: The tests use GLOBAL rate limiting (not per-client) because authentication -// is not enabled in e2e tests. Without authentication, all clients are treated as -// "unauthenticated" and share the global rate limiter. In production with authentication -// enabled, per-client rate limiting would be used instead. -// -// The tests are designed to: -// - Verify requests within limits succeed -// - Verify rapid requests exceeding burst capacity are rate limited -// - Handle cases where rate limiting is disabled with informative warnings -// -// Run with: task test:e2e:client -// Or customize: task test:e2e:client RATELIMIT_GLOBAL_RPS=50 RATELIMIT_GLOBAL_BURST=100 -package client - -import ( - "context" - "strings" - "time" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/client" - "github.com/agntcy/dir/e2e/shared/config" - "github.com/agntcy/dir/e2e/shared/testdata" - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// isRateLimitError checks if the error is a rate limit error (codes.ResourceExhausted). -func isRateLimitError(err error) bool { - if err == nil { - return false - } - - st, ok := status.FromError(err) - - return ok && st.Code() == codes.ResourceExhausted -} - -var _ = ginkgo.Describe("Rate Limiting E2E Tests", ginkgo.Label("ratelimit"), ginkgo.Ordered, ginkgo.Serial, func() { - ginkgo.BeforeEach(func() { - if cfg.DeploymentMode != config.DeploymentModeLocal { - ginkgo.Skip("Skipping test, not in local mode") - } - }) - - var c *client.Client - var ctx context.Context - - ginkgo.BeforeAll(func() { - ctx = context.Background() - - // Create a new client - var err error - c, err = client.New(ctx, client.WithEnvConfig()) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - - ginkgo.AfterAll(func() { - if c != nil { - c.Close() - } - }) - - ginkgo.Context("Rate limiting behavior", func() { - ginkgo.It("should allow requests within rate limit", func() { - // Push multiple records within the rate limit - // Test expects: Global RPS=100, Burst=200 (default from Taskfile) - - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Push 5 records with a small delay between each - // This should be well within any reasonable rate limit - for range 5 { - ref, pushErr := c.Push(ctx, record) - gomega.Expect(pushErr).NotTo(gomega.HaveOccurred()) - gomega.Expect(ref).NotTo(gomega.BeNil()) - gomega.Expect(ref.GetCid()).NotTo(gomega.BeEmpty()) - - // Clean up immediately - _ = c.Delete(ctx, ref) - - // Small delay to avoid burst issues - time.Sleep(50 * time.Millisecond) - } - }) - - ginkgo.It("should reject requests when rate limit is exceeded", func() { - // This test attempts to exceed the rate limit by making rapid sequential requests - // Test expects: Global RPS=100, Burst=200 (default from Taskfile) - // With burst=200, first 200 requests succeed immediately, then rate limiting kicks in - - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Make more requests than burst capacity to ensure we hit rate limiting - // With burst=200, we need >200 rapid requests to trigger rate limiting - var successCount int - var rateLimitErrorCount int - const totalRequests = 250 // Increased from 200 to exceed burst capacity - - ginkgo.By("Making rapid sequential requests to exceed rate limit") - for i := range totalRequests { - ref, pushErr := c.Push(ctx, record) - if pushErr == nil { - successCount++ - // Clean up successful pushes - _ = c.Delete(ctx, ref) - } else if isRateLimitError(pushErr) { - rateLimitErrorCount++ - ginkgo.GinkgoWriter.Printf("Rate limit error on request %d: %v\n", i+1, pushErr) - } else { - // Unexpected error - gomega.Expect(pushErr).NotTo(gomega.HaveOccurred(), - "Unexpected error (not rate limit): %v", pushErr) - } - } - - ginkgo.GinkgoWriter.Printf("Results: %d successful, %d rate limited out of %d requests\n", - successCount, rateLimitErrorCount, totalRequests) - - // We should have some successful requests and some rate limited requests - // This validates that rate limiting is working - gomega.Expect(successCount).To(gomega.BeNumerically(">", 0), - "Should have at least some successful requests") - - // If rate limiting is properly configured and enabled, we should see some rate limit errors - // Note: If this fails, check that rate limiting is enabled in the server config - if rateLimitErrorCount == 0 { - ginkgo.GinkgoWriter.Println("WARNING: No rate limit errors detected. Rate limiting may be disabled or set too high.") - } - }) - - ginkgo.It("should handle burst requests correctly", func() { - // Test burst capacity by making quick successive requests - // Test expects: Global RPS=100, Burst=200 (default from Taskfile) - // Small bursts (<<200) should always succeed without rate limiting - - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV070JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Make a burst of requests well within burst capacity (10 << 200) - const burstSize = 10 - successCount := 0 - - ginkgo.By("Making burst of requests") - for range burstSize { - ref, pushErr := c.Push(ctx, record) - if pushErr == nil { - successCount++ - _ = c.Delete(ctx, ref) - } else if isRateLimitError(pushErr) { - // Rate limited - unexpected for small burst - break - } else { - gomega.Expect(pushErr).NotTo(gomega.HaveOccurred()) - } - } - - // We should successfully complete at least a few requests within burst capacity - gomega.Expect(successCount).To(gomega.BeNumerically(">=", 1), - "Should allow at least some burst requests") - - ginkgo.GinkgoWriter.Printf("Burst test: %d/%d requests succeeded\n", successCount, burstSize) - }) - - ginkgo.It("should apply global rate limiting to all clients", func() { - // This test verifies that global rate limits are applied - // Note: Without authentication, all clients share the global rate limiter - - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Make requests until we hit rate limit - const maxAttempts = 150 - var hitRateLimit bool - - for i := range maxAttempts { - ref, pushErr := c.Push(ctx, record) - if pushErr == nil { - _ = c.Delete(ctx, ref) - } else if isRateLimitError(pushErr) { - hitRateLimit = true - ginkgo.GinkgoWriter.Printf("Hit rate limit on attempt %d\n", i+1) - - break - } else { - gomega.Expect(pushErr).NotTo(gomega.HaveOccurred()) - } - } - - if !hitRateLimit { - ginkgo.GinkgoWriter.Println("INFO: Did not hit rate limit. This is expected if rate limits are high or disabled.") - } - }) - - ginkgo.It("should recover after rate limit period expires", func() { - // Push enough requests to potentially hit rate limit - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By("Making rapid requests to potentially hit rate limit") - for range 100 { - ref, _ := c.Push(ctx, record) - if ref != nil { - _ = c.Delete(ctx, ref) - } - } - - // Wait for rate limiter to refill tokens (typically 1 second for token bucket) - ginkgo.By("Waiting for rate limit to reset") - time.Sleep(2 * time.Second) - - // Now requests should succeed again - ginkgo.By("Verifying requests succeed after rate limit reset") - ref, err := c.Push(ctx, record) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(ref).NotTo(gomega.BeNil()) - - // Clean up - _ = c.Delete(ctx, ref) - }) - }) - - ginkgo.Context("Different operations", func() { - ginkgo.It("should apply rate limiting to all gRPC operations", func() { - // Test that rate limiting works for different operations - // Push, Pull, Delete should all be rate limited - - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // First, push a record to have something to pull - ref, err := c.Push(ctx, record) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Make rapid Pull requests - ginkgo.By("Testing rate limit on Pull operations") - var pullRateLimitHit bool - for i := range 100 { - _, pullErr := c.Pull(ctx, ref) - if isRateLimitError(pullErr) { - pullRateLimitHit = true - ginkgo.GinkgoWriter.Printf("Pull rate limited on attempt %d\n", i+1) - - break - } - } - - if !pullRateLimitHit { - ginkgo.GinkgoWriter.Println("INFO: Pull operations did not hit rate limit") - } - - // Clean up - _ = c.Delete(ctx, ref) - }) - - ginkgo.It("should handle rate limit errors with proper status codes", func() { - // Verify that rate limit errors return the correct gRPC status code - - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV070JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Make many rapid requests to trigger rate limiting - var rateLimitErr error - for range 150 { - ref, pushErr := c.Push(ctx, record) - if pushErr != nil { - if isRateLimitError(pushErr) { - rateLimitErr = pushErr - - break - } - } else if ref != nil { - _ = c.Delete(ctx, ref) - } - } - - // If we got a rate limit error, verify its properties - if rateLimitErr != nil { - ginkgo.By("Verifying rate limit error properties") - st, ok := status.FromError(rateLimitErr) - gomega.Expect(ok).To(gomega.BeTrue(), "Error should be a gRPC status error") - gomega.Expect(st.Code()).To(gomega.Equal(codes.ResourceExhausted), - "Rate limit error should have ResourceExhausted code") - gomega.Expect(strings.ToLower(st.Message())).To(gomega.ContainSubstring("rate limit"), - "Error message should mention rate limit") - } else { - ginkgo.GinkgoWriter.Println("INFO: Did not trigger rate limit error for status code test") - } - }) - }) - - ginkgo.Context("Rate limit configuration", func() { - ginkgo.It("should respect per-method rate limits if configured", func() { - // This test validates that per-method rate limits work correctly - // The actual limits depend on server configuration - - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Test different methods: Push vs Pull - // If per-method limits are configured, they might have different limits - - ginkgo.By("Testing Push operation rate limit") - var pushRateLimitHit bool - for i := range 100 { - ref, pushErr := c.Push(ctx, record) - if pushErr != nil && isRateLimitError(pushErr) { - pushRateLimitHit = true - ginkgo.GinkgoWriter.Printf("Push rate limited on attempt %d\n", i+1) - - break - } else if ref != nil { - _ = c.Delete(ctx, ref) - } - } - - // Wait for rate limiter to reset - time.Sleep(2 * time.Second) - - // Test pull operations - ginkgo.By("Testing Pull operation rate limit") - ref, err := c.Push(ctx, record) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - var pullRateLimitHit bool - for i := range 100 { - _, pullErr := c.Pull(ctx, ref) - if pullErr != nil && isRateLimitError(pullErr) { - pullRateLimitHit = true - ginkgo.GinkgoWriter.Printf("Pull rate limited on attempt %d\n", i+1) - - break - } - } - - _ = c.Delete(ctx, ref) - - ginkgo.GinkgoWriter.Printf("Results: Push rate limited: %v, Pull rate limited: %v\n", - pushRateLimitHit, pullRateLimitHit) - - // Note: If per-method limits are not configured, both operations share the same limit - }) - }) - - ginkgo.Context("Concurrent clients", func() { - ginkgo.It("should handle rate limiting with concurrent requests", func() { - // Test that rate limiting works correctly with concurrent requests from the same client - - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV070JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - const numGoroutines = 10 - const requestsPerGoroutine = 20 - - type result struct { - success int - rateLimited int - otherErrors int - errorMsgs []string - } - - results := make(chan result, numGoroutines) - - ginkgo.By("Launching concurrent goroutines to test rate limiting") - for range numGoroutines { - go func() { - defer ginkgo.GinkgoRecover() // Required for assertions in goroutines - - var r result - for range requestsPerGoroutine { - ref, pushErr := c.Push(context.Background(), record) - if pushErr == nil { - r.success++ - _ = c.Delete(context.Background(), ref) - } else if isRateLimitError(pushErr) { - r.rateLimited++ - } else { - r.otherErrors++ - r.errorMsgs = append(r.errorMsgs, pushErr.Error()) - } - - // Small random delay to vary timing - time.Sleep(10 * time.Millisecond) - } - results <- r - }() - } - - // Collect results - var totalSuccess, totalRateLimited, totalOtherErrors int - var allErrors []string - for range numGoroutines { - r := <-results - totalSuccess += r.success - totalRateLimited += r.rateLimited - totalOtherErrors += r.otherErrors - allErrors = append(allErrors, r.errorMsgs...) - } - - ginkgo.GinkgoWriter.Printf("Concurrent test results: %d successful, %d rate limited, %d other errors\n", - totalSuccess, totalRateLimited, totalOtherErrors) - - // Log any unexpected errors for debugging - if totalOtherErrors > 0 { - ginkgo.GinkgoWriter.Printf("Unexpected errors encountered:\n") - for i, errMsg := range allErrors { - ginkgo.GinkgoWriter.Printf(" Error %d: %s\n", i+1, errMsg) - } - } - - // We should have at least some successful requests - gomega.Expect(totalSuccess).To(gomega.BeNumerically(">", 0), - "Should have at least some successful requests") - - // Note: Concurrent requests might have occasional network/timing errors - // We allow a small number of non-rate-limit errors (e.g., transient connection issues) - // but they should be rare (<=5% of requests) - if totalOtherErrors > 0 { - errorRate := float64(totalOtherErrors) / float64(numGoroutines*requestsPerGoroutine) - ginkgo.GinkgoWriter.Printf("Other error rate: %.2f%% (%d/%d)\n", - errorRate*100, totalOtherErrors, numGoroutines*requestsPerGoroutine) - - // Allow up to 5% error rate for transient issues - gomega.Expect(errorRate).To(gomega.BeNumerically("<=", 0.05), - "Error rate should be less than or equal to 5%% (transient errors)") - } - - // If rate limiting is properly configured, we should see some rate limiting - if totalRateLimited == 0 { - ginkgo.GinkgoWriter.Println("INFO: No rate limiting detected in concurrent test") - } - }) - }) - - ginkgo.Context("Edge cases", func() { - ginkgo.It("should handle rate limiting with context timeout", func() { - // Test that rate limiting works correctly when combined with context timeouts - - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Create context with timeout - timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - - // Make rapid requests with timeout context - var successCount, rateLimitCount, timeoutCount int - - for range 100 { - ref, pushErr := c.Push(timeoutCtx, record) - if pushErr == nil { - successCount++ - _ = c.Delete(timeoutCtx, ref) - } else if isRateLimitError(pushErr) { - rateLimitCount++ - } else if timeoutCtx.Err() != nil { - timeoutCount++ - - break - } - } - - ginkgo.GinkgoWriter.Printf("Timeout test: %d successful, %d rate limited, %d timeout\n", - successCount, rateLimitCount, timeoutCount) - - gomega.Expect(successCount+rateLimitCount+timeoutCount).To(gomega.BeNumerically(">", 0), - "Should have processed at least some requests") - }) - - ginkgo.It("should maintain rate limit state across multiple operations", func() { - // Verify that rate limiter maintains state correctly across different operations - - record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // First batch of requests - ginkgo.By("Making first batch of requests") - for range 50 { - ref, _ := c.Push(ctx, record) - if ref != nil { - _ = c.Delete(ctx, ref) - } - } - - // Immediate second batch (should continue from previous state) - ginkgo.By("Making second batch immediately after") - var secondBatchRateLimited bool - for i := range 50 { - ref, pushErr := c.Push(ctx, record) - if pushErr != nil && isRateLimitError(pushErr) { - secondBatchRateLimited = true - ginkgo.GinkgoWriter.Printf("Second batch rate limited on request %d\n", i+1) - - break - } else if ref != nil { - _ = c.Delete(ctx, ref) - } - } - - if secondBatchRateLimited { - ginkgo.GinkgoWriter.Println("Rate limiter correctly maintained state across batches") - } else { - ginkgo.GinkgoWriter.Println("INFO: Did not hit rate limit in second batch") - } - }) - }) -}) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Package client contains end-to-end tests for the client library including rate limiting. +// +// Rate Limiting Test Configuration: +// These tests expect the server to be configured with rate limiting enabled. +// Default configuration from Taskfile (test:e2e:client and test:e2e:local): +// - RATELIMIT_ENABLED: true +// - RATELIMIT_GLOBAL_RPS: 100 (requests per second for all clients) +// - RATELIMIT_GLOBAL_BURST: 200 (burst capacity for all clients) +// +// Note: The tests use GLOBAL rate limiting (not per-client) because authentication +// is not enabled in e2e tests. Without authentication, all clients are treated as +// "unauthenticated" and share the global rate limiter. In production with authentication +// enabled, per-client rate limiting would be used instead. +// +// The tests are designed to: +// - Verify requests within limits succeed +// - Verify rapid requests exceeding burst capacity are rate limited +// - Handle cases where rate limiting is disabled with informative warnings +// +// Run with: task test:e2e:client +// Or customize: task test:e2e:client RATELIMIT_GLOBAL_RPS=50 RATELIMIT_GLOBAL_BURST=100 +package client + +import ( + "context" + "strings" + "time" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/client" + "github.com/agntcy/dir/e2e/shared/config" + "github.com/agntcy/dir/e2e/shared/testdata" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// isRateLimitError checks if the error is a rate limit error (codes.ResourceExhausted). +func isRateLimitError(err error) bool { + if err == nil { + return false + } + + st, ok := status.FromError(err) + + return ok && st.Code() == codes.ResourceExhausted +} + +var _ = ginkgo.Describe("Rate Limiting E2E Tests", ginkgo.Label("ratelimit"), ginkgo.Ordered, ginkgo.Serial, func() { + ginkgo.BeforeEach(func() { + if cfg.DeploymentMode != config.DeploymentModeLocal { + ginkgo.Skip("Skipping test, not in local mode") + } + }) + + var c *client.Client + var ctx context.Context + + ginkgo.BeforeAll(func() { + ctx = context.Background() + + // Create a new client + var err error + c, err = client.New(ctx, client.WithEnvConfig()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + ginkgo.AfterAll(func() { + if c != nil { + c.Close() + } + }) + + ginkgo.Context("Rate limiting behavior", func() { + ginkgo.It("should allow requests within rate limit", func() { + // Push multiple records within the rate limit + // Test expects: Global RPS=100, Burst=200 (default from Taskfile) + + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Push 5 records with a small delay between each + // This should be well within any reasonable rate limit + for range 5 { + ref, pushErr := c.Push(ctx, record) + gomega.Expect(pushErr).NotTo(gomega.HaveOccurred()) + gomega.Expect(ref).NotTo(gomega.BeNil()) + gomega.Expect(ref.GetCid()).NotTo(gomega.BeEmpty()) + + // Clean up immediately + _ = c.Delete(ctx, ref) + + // Small delay to avoid burst issues + time.Sleep(50 * time.Millisecond) + } + }) + + ginkgo.It("should reject requests when rate limit is exceeded", func() { + // This test attempts to exceed the rate limit by making rapid sequential requests + // Test expects: Global RPS=100, Burst=200 (default from Taskfile) + // With burst=200, first 200 requests succeed immediately, then rate limiting kicks in + + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Make more requests than burst capacity to ensure we hit rate limiting + // With burst=200, we need >200 rapid requests to trigger rate limiting + var successCount int + var rateLimitErrorCount int + const totalRequests = 250 // Increased from 200 to exceed burst capacity + + ginkgo.By("Making rapid sequential requests to exceed rate limit") + for i := range totalRequests { + ref, pushErr := c.Push(ctx, record) + if pushErr == nil { + successCount++ + // Clean up successful pushes + _ = c.Delete(ctx, ref) + } else if isRateLimitError(pushErr) { + rateLimitErrorCount++ + ginkgo.GinkgoWriter.Printf("Rate limit error on request %d: %v\n", i+1, pushErr) + } else { + // Unexpected error + gomega.Expect(pushErr).NotTo(gomega.HaveOccurred(), + "Unexpected error (not rate limit): %v", pushErr) + } + } + + ginkgo.GinkgoWriter.Printf("Results: %d successful, %d rate limited out of %d requests\n", + successCount, rateLimitErrorCount, totalRequests) + + // We should have some successful requests and some rate limited requests + // This validates that rate limiting is working + gomega.Expect(successCount).To(gomega.BeNumerically(">", 0), + "Should have at least some successful requests") + + // If rate limiting is properly configured and enabled, we should see some rate limit errors + // Note: If this fails, check that rate limiting is enabled in the server config + if rateLimitErrorCount == 0 { + ginkgo.GinkgoWriter.Println("WARNING: No rate limit errors detected. Rate limiting may be disabled or set too high.") + } + }) + + ginkgo.It("should handle burst requests correctly", func() { + // Test burst capacity by making quick successive requests + // Test expects: Global RPS=100, Burst=200 (default from Taskfile) + // Small bursts (<<200) should always succeed without rate limiting + + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV070JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Make a burst of requests well within burst capacity (10 << 200) + const burstSize = 10 + successCount := 0 + + ginkgo.By("Making burst of requests") + for range burstSize { + ref, pushErr := c.Push(ctx, record) + if pushErr == nil { + successCount++ + _ = c.Delete(ctx, ref) + } else if isRateLimitError(pushErr) { + // Rate limited - unexpected for small burst + break + } else { + gomega.Expect(pushErr).NotTo(gomega.HaveOccurred()) + } + } + + // We should successfully complete at least a few requests within burst capacity + gomega.Expect(successCount).To(gomega.BeNumerically(">=", 1), + "Should allow at least some burst requests") + + ginkgo.GinkgoWriter.Printf("Burst test: %d/%d requests succeeded\n", successCount, burstSize) + }) + + ginkgo.It("should apply global rate limiting to all clients", func() { + // This test verifies that global rate limits are applied + // Note: Without authentication, all clients share the global rate limiter + + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Make requests until we hit rate limit + const maxAttempts = 150 + var hitRateLimit bool + + for i := range maxAttempts { + ref, pushErr := c.Push(ctx, record) + if pushErr == nil { + _ = c.Delete(ctx, ref) + } else if isRateLimitError(pushErr) { + hitRateLimit = true + ginkgo.GinkgoWriter.Printf("Hit rate limit on attempt %d\n", i+1) + + break + } else { + gomega.Expect(pushErr).NotTo(gomega.HaveOccurred()) + } + } + + if !hitRateLimit { + ginkgo.GinkgoWriter.Println("INFO: Did not hit rate limit. This is expected if rate limits are high or disabled.") + } + }) + + ginkgo.It("should recover after rate limit period expires", func() { + // Push enough requests to potentially hit rate limit + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("Making rapid requests to potentially hit rate limit") + for range 100 { + ref, _ := c.Push(ctx, record) + if ref != nil { + _ = c.Delete(ctx, ref) + } + } + + // Wait for rate limiter to refill tokens (typically 1 second for token bucket) + ginkgo.By("Waiting for rate limit to reset") + time.Sleep(2 * time.Second) + + // Now requests should succeed again + ginkgo.By("Verifying requests succeed after rate limit reset") + ref, err := c.Push(ctx, record) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(ref).NotTo(gomega.BeNil()) + + // Clean up + _ = c.Delete(ctx, ref) + }) + }) + + ginkgo.Context("Different operations", func() { + ginkgo.It("should apply rate limiting to all gRPC operations", func() { + // Test that rate limiting works for different operations + // Push, Pull, Delete should all be rate limited + + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // First, push a record to have something to pull + ref, err := c.Push(ctx, record) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Make rapid Pull requests + ginkgo.By("Testing rate limit on Pull operations") + var pullRateLimitHit bool + for i := range 100 { + _, pullErr := c.Pull(ctx, ref) + if isRateLimitError(pullErr) { + pullRateLimitHit = true + ginkgo.GinkgoWriter.Printf("Pull rate limited on attempt %d\n", i+1) + + break + } + } + + if !pullRateLimitHit { + ginkgo.GinkgoWriter.Println("INFO: Pull operations did not hit rate limit") + } + + // Clean up + _ = c.Delete(ctx, ref) + }) + + ginkgo.It("should handle rate limit errors with proper status codes", func() { + // Verify that rate limit errors return the correct gRPC status code + + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV070JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Make many rapid requests to trigger rate limiting + var rateLimitErr error + for range 150 { + ref, pushErr := c.Push(ctx, record) + if pushErr != nil { + if isRateLimitError(pushErr) { + rateLimitErr = pushErr + + break + } + } else if ref != nil { + _ = c.Delete(ctx, ref) + } + } + + // If we got a rate limit error, verify its properties + if rateLimitErr != nil { + ginkgo.By("Verifying rate limit error properties") + st, ok := status.FromError(rateLimitErr) + gomega.Expect(ok).To(gomega.BeTrue(), "Error should be a gRPC status error") + gomega.Expect(st.Code()).To(gomega.Equal(codes.ResourceExhausted), + "Rate limit error should have ResourceExhausted code") + gomega.Expect(strings.ToLower(st.Message())).To(gomega.ContainSubstring("rate limit"), + "Error message should mention rate limit") + } else { + ginkgo.GinkgoWriter.Println("INFO: Did not trigger rate limit error for status code test") + } + }) + }) + + ginkgo.Context("Rate limit configuration", func() { + ginkgo.It("should respect per-method rate limits if configured", func() { + // This test validates that per-method rate limits work correctly + // The actual limits depend on server configuration + + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Test different methods: Push vs Pull + // If per-method limits are configured, they might have different limits + + ginkgo.By("Testing Push operation rate limit") + var pushRateLimitHit bool + for i := range 100 { + ref, pushErr := c.Push(ctx, record) + if pushErr != nil && isRateLimitError(pushErr) { + pushRateLimitHit = true + ginkgo.GinkgoWriter.Printf("Push rate limited on attempt %d\n", i+1) + + break + } else if ref != nil { + _ = c.Delete(ctx, ref) + } + } + + // Wait for rate limiter to reset + time.Sleep(2 * time.Second) + + // Test pull operations + ginkgo.By("Testing Pull operation rate limit") + ref, err := c.Push(ctx, record) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + var pullRateLimitHit bool + for i := range 100 { + _, pullErr := c.Pull(ctx, ref) + if pullErr != nil && isRateLimitError(pullErr) { + pullRateLimitHit = true + ginkgo.GinkgoWriter.Printf("Pull rate limited on attempt %d\n", i+1) + + break + } + } + + _ = c.Delete(ctx, ref) + + ginkgo.GinkgoWriter.Printf("Results: Push rate limited: %v, Pull rate limited: %v\n", + pushRateLimitHit, pullRateLimitHit) + + // Note: If per-method limits are not configured, both operations share the same limit + }) + }) + + ginkgo.Context("Concurrent clients", func() { + ginkgo.It("should handle rate limiting with concurrent requests", func() { + // Test that rate limiting works correctly with concurrent requests from the same client + + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV070JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + const numGoroutines = 10 + const requestsPerGoroutine = 20 + + type result struct { + success int + rateLimited int + otherErrors int + errorMsgs []string + } + + results := make(chan result, numGoroutines) + + ginkgo.By("Launching concurrent goroutines to test rate limiting") + for range numGoroutines { + go func() { + defer ginkgo.GinkgoRecover() // Required for assertions in goroutines + + var r result + for range requestsPerGoroutine { + ref, pushErr := c.Push(context.Background(), record) + if pushErr == nil { + r.success++ + _ = c.Delete(context.Background(), ref) + } else if isRateLimitError(pushErr) { + r.rateLimited++ + } else { + r.otherErrors++ + r.errorMsgs = append(r.errorMsgs, pushErr.Error()) + } + + // Small random delay to vary timing + time.Sleep(10 * time.Millisecond) + } + results <- r + }() + } + + // Collect results + var totalSuccess, totalRateLimited, totalOtherErrors int + var allErrors []string + for range numGoroutines { + r := <-results + totalSuccess += r.success + totalRateLimited += r.rateLimited + totalOtherErrors += r.otherErrors + allErrors = append(allErrors, r.errorMsgs...) + } + + ginkgo.GinkgoWriter.Printf("Concurrent test results: %d successful, %d rate limited, %d other errors\n", + totalSuccess, totalRateLimited, totalOtherErrors) + + // Log any unexpected errors for debugging + if totalOtherErrors > 0 { + ginkgo.GinkgoWriter.Printf("Unexpected errors encountered:\n") + for i, errMsg := range allErrors { + ginkgo.GinkgoWriter.Printf(" Error %d: %s\n", i+1, errMsg) + } + } + + // We should have at least some successful requests + gomega.Expect(totalSuccess).To(gomega.BeNumerically(">", 0), + "Should have at least some successful requests") + + // Note: Concurrent requests might have occasional network/timing errors + // We allow a small number of non-rate-limit errors (e.g., transient connection issues) + // but they should be rare (<=5% of requests) + if totalOtherErrors > 0 { + errorRate := float64(totalOtherErrors) / float64(numGoroutines*requestsPerGoroutine) + ginkgo.GinkgoWriter.Printf("Other error rate: %.2f%% (%d/%d)\n", + errorRate*100, totalOtherErrors, numGoroutines*requestsPerGoroutine) + + // Allow up to 5% error rate for transient issues + gomega.Expect(errorRate).To(gomega.BeNumerically("<=", 0.05), + "Error rate should be less than or equal to 5%% (transient errors)") + } + + // If rate limiting is properly configured, we should see some rate limiting + if totalRateLimited == 0 { + ginkgo.GinkgoWriter.Println("INFO: No rate limiting detected in concurrent test") + } + }) + }) + + ginkgo.Context("Edge cases", func() { + ginkgo.It("should handle rate limiting with context timeout", func() { + // Test that rate limiting works correctly when combined with context timeouts + + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Create context with timeout + timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + // Make rapid requests with timeout context + var successCount, rateLimitCount, timeoutCount int + + for range 100 { + ref, pushErr := c.Push(timeoutCtx, record) + if pushErr == nil { + successCount++ + _ = c.Delete(timeoutCtx, ref) + } else if isRateLimitError(pushErr) { + rateLimitCount++ + } else if timeoutCtx.Err() != nil { + timeoutCount++ + + break + } + } + + ginkgo.GinkgoWriter.Printf("Timeout test: %d successful, %d rate limited, %d timeout\n", + successCount, rateLimitCount, timeoutCount) + + gomega.Expect(successCount+rateLimitCount+timeoutCount).To(gomega.BeNumerically(">", 0), + "Should have processed at least some requests") + }) + + ginkgo.It("should maintain rate limit state across multiple operations", func() { + // Verify that rate limiter maintains state correctly across different operations + + record, err := corev1.UnmarshalRecord(testdata.ExpectedRecordV031JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // First batch of requests + ginkgo.By("Making first batch of requests") + for range 50 { + ref, _ := c.Push(ctx, record) + if ref != nil { + _ = c.Delete(ctx, ref) + } + } + + // Immediate second batch (should continue from previous state) + ginkgo.By("Making second batch immediately after") + var secondBatchRateLimited bool + for i := range 50 { + ref, pushErr := c.Push(ctx, record) + if pushErr != nil && isRateLimitError(pushErr) { + secondBatchRateLimited = true + ginkgo.GinkgoWriter.Printf("Second batch rate limited on request %d\n", i+1) + + break + } else if ref != nil { + _ = c.Delete(ctx, ref) + } + } + + if secondBatchRateLimited { + ginkgo.GinkgoWriter.Println("Rate limiter correctly maintained state across batches") + } else { + ginkgo.GinkgoWriter.Println("INFO: Did not hit rate limit in second batch") + } + }) + }) +}) diff --git a/e2e/client/client_suite_test.go b/e2e/client/client_suite_test.go index d0cb536ca..b43f1ee08 100644 --- a/e2e/client/client_suite_test.go +++ b/e2e/client/client_suite_test.go @@ -1,29 +1,29 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package client - -import ( - "testing" - - "github.com/agntcy/dir/e2e/shared/config" - ginkgo "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -var cfg *config.Config - -func TestClientE2E(t *testing.T) { - gomega.RegisterFailHandler(ginkgo.Fail) - - var err error - - cfg, err = config.LoadConfig() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - if cfg.DeploymentMode != config.DeploymentModeLocal { - t.Skip("Skipping client tests - not in local mode") - } - - ginkgo.RunSpecs(t, "Client Library E2E Test Suite") -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package client + +import ( + "testing" + + "github.com/agntcy/dir/e2e/shared/config" + ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +var cfg *config.Config + +func TestClientE2E(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + + var err error + + cfg, err = config.LoadConfig() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if cfg.DeploymentMode != config.DeploymentModeLocal { + t.Skip("Skipping client tests - not in local mode") + } + + ginkgo.RunSpecs(t, "Client Library E2E Test Suite") +} diff --git a/e2e/go.mod b/e2e/go.mod index 56c8c287d..8bd329b4e 100644 --- a/e2e/go.mod +++ b/e2e/go.mod @@ -1,297 +1,297 @@ -module github.com/agntcy/dir/e2e - -go 1.25.2 - -replace ( - github.com/agntcy/dir/api => ../api - github.com/agntcy/dir/cli => ../cli - github.com/agntcy/dir/client => ../client - github.com/agntcy/dir/importer => ../importer - github.com/agntcy/dir/mcp => ../mcp - github.com/agntcy/dir/utils => ../utils -) - -require ( - github.com/agntcy/dir/api v0.6.0 - github.com/agntcy/dir/cli v0.6.0 - github.com/agntcy/dir/client v0.6.0 - github.com/agntcy/dir/utils v0.6.0 - github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c - github.com/onsi/ginkgo/v2 v2.23.0 - github.com/onsi/gomega v1.36.2 - github.com/spf13/cobra v1.10.2 - github.com/spf13/pflag v1.0.10 - github.com/spf13/viper v1.21.0 - google.golang.org/grpc v1.77.0 -) - -require ( - buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 // indirect - buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 // indirect - cloud.google.com/go v0.121.6 // indirect - cloud.google.com/go/auth v0.17.0 // indirect - cloud.google.com/go/compute/metadata v0.9.0 // indirect - github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect - github.com/JohannesKaufmann/html-to-markdown v1.6.0 // indirect - github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/PuerkitoBio/goquery v1.10.3 // indirect - github.com/ThalesIgnite/crypto11 v1.2.5 // indirect - github.com/agntcy/dir/importer v0.6.0 // indirect - github.com/agntcy/dir/mcp v0.6.0 // indirect - github.com/agntcy/oasf-sdk/pkg v0.0.14 // indirect - github.com/alecthomas/chroma/v2 v2.20.0 // indirect - github.com/andybalholm/cascadia v1.3.3 // indirect - github.com/anthropics/anthropic-sdk-go v1.10.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/atotto/clipboard v0.1.4 // indirect - github.com/aws/aws-sdk-go-v2 v1.40.0 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 // indirect - github.com/aws/aws-sdk-go-v2/config v1.32.2 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.19.2 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 // indirect - github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 // indirect - github.com/aws/smithy-go v1.24.0 // indirect - github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect - github.com/aymerick/douceur v0.2.0 // indirect - github.com/bahlo/generic-list-go v0.2.0 // indirect - github.com/blang/semver v3.5.1+incompatible // indirect - github.com/buger/jsonparser v1.1.1 // indirect - github.com/bytedance/gopkg v0.1.3 // indirect - github.com/bytedance/sonic v1.14.1 // indirect - github.com/bytedance/sonic/loader v0.3.0 // indirect - github.com/cenkalti/backoff/v5 v5.0.3 // indirect - github.com/charmbracelet/bubbles v0.21.0 // indirect - github.com/charmbracelet/bubbletea v1.3.10 // indirect - github.com/charmbracelet/colorprofile v0.3.2 // indirect - github.com/charmbracelet/glamour v0.10.0 // indirect - github.com/charmbracelet/harmonica v0.2.0 // indirect - github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 // indirect - github.com/charmbracelet/x/ansi v0.10.2 // indirect - github.com/charmbracelet/x/cellbuf v0.0.13 // indirect - github.com/charmbracelet/x/exp/slice v0.0.0-20250902204034-1cdc10c66d5b // indirect - github.com/charmbracelet/x/term v0.2.1 // indirect - github.com/cloudwego/base64x v0.1.6 // indirect - github.com/cloudwego/eino v0.5.0-alpha.11 // indirect - github.com/cloudwego/eino-ext/components/model/claude v0.1.0 // indirect - github.com/cloudwego/eino-ext/components/model/ollama v0.1.2 // indirect - github.com/cloudwego/eino-ext/components/model/openai v0.0.0-20250903035842-96774a3ec845 // indirect - github.com/cloudwego/eino-ext/libs/acl/openai v0.0.0-20250826113018-8c6f6358d4bb // indirect - github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect - github.com/coreos/go-oidc/v3 v3.17.0 // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect - github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect - github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect - github.com/djherbis/times v1.6.0 // indirect - github.com/dlclark/regexp2 v1.11.5 // indirect - github.com/docker/cli v29.0.3+incompatible // indirect - github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker-credential-helpers v0.9.4 // indirect - github.com/dustin/go-humanize v1.0.1 // indirect - github.com/eino-contrib/jsonschema v1.0.0 // indirect - github.com/emicklei/go-restful/v3 v3.12.2 // indirect - github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect - github.com/evanphx/json-patch v0.5.2 // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/fxamacker/cbor/v2 v2.9.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.10 // indirect - github.com/getkin/kin-openapi v0.120.0 // indirect - github.com/go-chi/chi/v5 v5.2.3 // indirect - github.com/go-jose/go-jose/v4 v4.1.3 // indirect - github.com/go-logr/logr v1.4.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.24.1 // indirect - github.com/go-openapi/errors v0.22.4 // indirect - github.com/go-openapi/jsonpointer v0.22.1 // indirect - github.com/go-openapi/jsonreference v0.21.3 // indirect - github.com/go-openapi/loads v0.23.2 // indirect - github.com/go-openapi/runtime v0.29.2 // indirect - github.com/go-openapi/spec v0.22.1 // indirect - github.com/go-openapi/strfmt v0.25.0 // indirect - github.com/go-openapi/swag v0.25.4 // indirect - github.com/go-openapi/swag/cmdutils v0.25.4 // indirect - github.com/go-openapi/swag/conv v0.25.4 // indirect - github.com/go-openapi/swag/fileutils v0.25.4 // indirect - github.com/go-openapi/swag/jsonname v0.25.4 // indirect - github.com/go-openapi/swag/jsonutils v0.25.4 // indirect - github.com/go-openapi/swag/loading v0.25.4 // indirect - github.com/go-openapi/swag/mangling v0.25.4 // indirect - github.com/go-openapi/swag/netutils v0.25.4 // indirect - github.com/go-openapi/swag/stringutils v0.25.4 // indirect - github.com/go-openapi/swag/typeutils v0.25.4 // indirect - github.com/go-openapi/swag/yamlutils v0.25.4 // indirect - github.com/go-openapi/validate v0.25.1 // indirect - github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/go-viper/mapstructure/v2 v2.4.0 // indirect - github.com/gobwas/glob v0.2.3 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect - github.com/google/certificate-transparency-go v1.3.2 // indirect - github.com/google/gnostic-models v0.7.0 // indirect - github.com/google/go-cmp v0.7.0 // indirect - github.com/google/go-containerregistry v0.20.7 // indirect - github.com/google/go-github/v73 v73.0.0 // indirect - github.com/google/go-querystring v1.1.0 // indirect - github.com/google/jsonschema-go v0.3.0 // indirect - github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e // indirect - github.com/google/s2a-go v0.1.9 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect - github.com/googleapis/gax-go/v2 v2.15.0 // indirect - github.com/goph/emperror v0.17.2 // indirect - github.com/gorilla/css v1.0.1 // indirect - github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect - github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-retryablehttp v0.7.8 // indirect - github.com/in-toto/attestation v1.1.2 // indirect - github.com/in-toto/in-toto-golang v0.9.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/invopop/jsonschema v0.13.0 // indirect - github.com/invopop/yaml v0.2.0 // indirect - github.com/ipfs/go-cid v0.5.0 // indirect - github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect - github.com/josharian/intern v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.18.1 // indirect - github.com/klauspost/cpuid/v2 v2.3.0 // indirect - github.com/letsencrypt/boulder v0.20251110.0 // indirect - github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/libp2p/go-libp2p v0.44.0 // indirect - github.com/lucasb-eyer/go-colorful v1.3.0 // indirect - github.com/mailru/easyjson v0.9.0 // indirect - github.com/mark3labs/mcp-filesystem-server v0.11.1 // indirect - github.com/mark3labs/mcp-go v0.41.1 // indirect - github.com/mark3labs/mcphost v0.31.3 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-localereader v0.0.1 // indirect - github.com/mattn/go-runewidth v0.0.17 // indirect - github.com/meguminnnnnnnnn/go-openai v0.0.0-20250821095446-07791bea23a0 // indirect - github.com/microcosm-cc/bluemonday v1.0.27 // indirect - github.com/miekg/pkcs11 v1.1.1 // indirect - github.com/minio/sha256-simd v1.0.1 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/moby/term v0.5.2 // indirect - github.com/modelcontextprotocol/go-sdk v0.8.0 // indirect - github.com/modelcontextprotocol/registry v1.2.3 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect - github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect - github.com/mr-tron/base58 v1.2.0 // indirect - github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect - github.com/muesli/cancelreader v0.2.2 // indirect - github.com/muesli/reflow v0.3.0 // indirect - github.com/muesli/termenv v0.16.0 // indirect - github.com/multiformats/go-base32 v0.1.0 // indirect - github.com/multiformats/go-base36 v0.2.0 // indirect - github.com/multiformats/go-multiaddr v0.16.0 // indirect - github.com/multiformats/go-multibase v0.2.0 // indirect - github.com/multiformats/go-multicodec v0.9.1 // indirect - github.com/multiformats/go-multihash v0.2.3 // indirect - github.com/multiformats/go-varint v0.0.7 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/nikolalohinski/gonja v1.5.3 // indirect - github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect - github.com/oklog/ulid v1.3.1 // indirect - github.com/ollama/ollama v0.12.9 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.1 // indirect - github.com/pelletier/go-toml/v2 v2.2.4 // indirect - github.com/perimeterx/marshmallow v1.1.5 // indirect - github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/rivo/uniseg v0.4.7 // indirect - github.com/sagikazarmark/locafero v0.11.0 // indirect - github.com/sassoftware/relic v7.2.1+incompatible // indirect - github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect - github.com/shibumi/go-pathspec v1.3.0 // indirect - github.com/sigstore/cosign/v3 v3.0.3 // indirect - github.com/sigstore/protobuf-specs v0.5.0 // indirect - github.com/sigstore/rekor v1.4.3 // indirect - github.com/sigstore/rekor-tiles/v2 v2.0.1 // indirect - github.com/sigstore/sigstore v1.10.0 // indirect - github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 // indirect - github.com/sigstore/timestamp-authority/v2 v2.0.3 // indirect - github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect - github.com/slongfield/pyfmt v0.0.0-20220222012616-ea85ff4c361f // indirect - github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect - github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.15.0 // indirect - github.com/spf13/cast v1.10.0 // indirect - github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect - github.com/subosito/gotenv v1.6.0 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect - github.com/thales-e-security/pool v0.0.2 // indirect - github.com/theupdateframework/go-tuf v0.7.0 // indirect - github.com/theupdateframework/go-tuf/v2 v2.3.0 // indirect - github.com/tidwall/gjson v1.18.0 // indirect - github.com/tidwall/match v1.1.1 // indirect - github.com/tidwall/pretty v1.2.1 // indirect - github.com/tidwall/sjson v1.2.5 // indirect - github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect - github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect - github.com/transparency-dev/merkle v0.0.2 // indirect - github.com/twitchyliquid64/golang-asm v0.15.1 // indirect - github.com/vbatts/tar-split v0.12.2 // indirect - github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect - github.com/x448/float16 v0.8.4 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xeipuuv/gojsonschema v1.2.0 // indirect - github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect - github.com/yargevad/filepathx v1.0.0 // indirect - github.com/yosida95/uritemplate/v3 v3.0.2 // indirect - github.com/yuin/goldmark v1.7.13 // indirect - github.com/yuin/goldmark-emoji v1.0.6 // indirect - gitlab.com/gitlab-org/api/client-go v0.160.0 // indirect - go.mongodb.org/mongo-driver v1.17.6 // indirect - go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect - go.opentelemetry.io/otel v1.38.0 // indirect - go.opentelemetry.io/otel/metric v1.38.0 // indirect - go.opentelemetry.io/otel/trace v1.38.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.1 // indirect - go.yaml.in/yaml/v2 v2.4.3 // indirect - go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/arch v0.20.0 // indirect - golang.org/x/crypto v0.45.0 // indirect - golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect - golang.org/x/mod v0.30.0 // indirect - golang.org/x/net v0.47.0 // indirect - golang.org/x/oauth2 v0.33.0 // indirect - golang.org/x/sync v0.18.0 // indirect - golang.org/x/sys v0.38.0 // indirect - golang.org/x/term v0.37.0 // indirect - golang.org/x/text v0.31.0 // indirect - golang.org/x/time v0.14.0 // indirect - golang.org/x/tools v0.39.0 // indirect - google.golang.org/genai v1.22.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect - google.golang.org/protobuf v1.36.10 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.34.2 // indirect - k8s.io/apimachinery v0.34.2 // indirect - k8s.io/client-go v0.34.2 // indirect - k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect - k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d // indirect - lukechampine.com/blake3 v1.4.1 // indirect - sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect - sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect - sigs.k8s.io/yaml v1.6.0 // indirect -) +module github.com/agntcy/dir/e2e + +go 1.25.2 + +replace ( + github.com/agntcy/dir/api => ../api + github.com/agntcy/dir/cli => ../cli + github.com/agntcy/dir/client => ../client + github.com/agntcy/dir/importer => ../importer + github.com/agntcy/dir/mcp => ../mcp + github.com/agntcy/dir/utils => ../utils +) + +require ( + github.com/agntcy/dir/api v0.6.0 + github.com/agntcy/dir/cli v0.6.0 + github.com/agntcy/dir/client v0.6.0 + github.com/agntcy/dir/utils v0.6.0 + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c + github.com/onsi/ginkgo/v2 v2.23.0 + github.com/onsi/gomega v1.36.2 + github.com/spf13/cobra v1.10.2 + github.com/spf13/pflag v1.0.10 + github.com/spf13/viper v1.21.0 + google.golang.org/grpc v1.77.0 +) + +require ( + buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 // indirect + buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 // indirect + cloud.google.com/go v0.121.6 // indirect + cloud.google.com/go/auth v0.17.0 // indirect + cloud.google.com/go/compute/metadata v0.9.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/JohannesKaufmann/html-to-markdown v1.6.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/PuerkitoBio/goquery v1.10.3 // indirect + github.com/ThalesIgnite/crypto11 v1.2.5 // indirect + github.com/agntcy/dir/importer v0.6.0 // indirect + github.com/agntcy/dir/mcp v0.6.0 // indirect + github.com/agntcy/oasf-sdk/pkg v0.0.14 // indirect + github.com/alecthomas/chroma/v2 v2.20.0 // indirect + github.com/andybalholm/cascadia v1.3.3 // indirect + github.com/anthropics/anthropic-sdk-go v1.10.0 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/atotto/clipboard v0.1.4 // indirect + github.com/aws/aws-sdk-go-v2 v1.40.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 // indirect + github.com/aws/aws-sdk-go-v2/config v1.32.2 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.19.2 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 // indirect + github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 // indirect + github.com/aws/smithy-go v1.24.0 // indirect + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect + github.com/aymerick/douceur v0.2.0 // indirect + github.com/bahlo/generic-list-go v0.2.0 // indirect + github.com/blang/semver v3.5.1+incompatible // indirect + github.com/buger/jsonparser v1.1.1 // indirect + github.com/bytedance/gopkg v0.1.3 // indirect + github.com/bytedance/sonic v1.14.1 // indirect + github.com/bytedance/sonic/loader v0.3.0 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect + github.com/charmbracelet/bubbles v0.21.0 // indirect + github.com/charmbracelet/bubbletea v1.3.10 // indirect + github.com/charmbracelet/colorprofile v0.3.2 // indirect + github.com/charmbracelet/glamour v0.10.0 // indirect + github.com/charmbracelet/harmonica v0.2.0 // indirect + github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 // indirect + github.com/charmbracelet/x/ansi v0.10.2 // indirect + github.com/charmbracelet/x/cellbuf v0.0.13 // indirect + github.com/charmbracelet/x/exp/slice v0.0.0-20250902204034-1cdc10c66d5b // indirect + github.com/charmbracelet/x/term v0.2.1 // indirect + github.com/cloudwego/base64x v0.1.6 // indirect + github.com/cloudwego/eino v0.5.0-alpha.11 // indirect + github.com/cloudwego/eino-ext/components/model/claude v0.1.0 // indirect + github.com/cloudwego/eino-ext/components/model/ollama v0.1.2 // indirect + github.com/cloudwego/eino-ext/components/model/openai v0.0.0-20250903035842-96774a3ec845 // indirect + github.com/cloudwego/eino-ext/libs/acl/openai v0.0.0-20250826113018-8c6f6358d4bb // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect + github.com/coreos/go-oidc/v3 v3.17.0 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect + github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect + github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect + github.com/djherbis/times v1.6.0 // indirect + github.com/dlclark/regexp2 v1.11.5 // indirect + github.com/docker/cli v29.0.3+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.4 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/eino-contrib/jsonschema v1.0.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect + github.com/evanphx/json-patch v0.5.2 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.10 // indirect + github.com/getkin/kin-openapi v0.120.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect + github.com/go-jose/go-jose/v4 v4.1.3 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/analysis v0.24.1 // indirect + github.com/go-openapi/errors v0.22.4 // indirect + github.com/go-openapi/jsonpointer v0.22.1 // indirect + github.com/go-openapi/jsonreference v0.21.3 // indirect + github.com/go-openapi/loads v0.23.2 // indirect + github.com/go-openapi/runtime v0.29.2 // indirect + github.com/go-openapi/spec v0.22.1 // indirect + github.com/go-openapi/strfmt v0.25.0 // indirect + github.com/go-openapi/swag v0.25.4 // indirect + github.com/go-openapi/swag/cmdutils v0.25.4 // indirect + github.com/go-openapi/swag/conv v0.25.4 // indirect + github.com/go-openapi/swag/fileutils v0.25.4 // indirect + github.com/go-openapi/swag/jsonname v0.25.4 // indirect + github.com/go-openapi/swag/jsonutils v0.25.4 // indirect + github.com/go-openapi/swag/loading v0.25.4 // indirect + github.com/go-openapi/swag/mangling v0.25.4 // indirect + github.com/go-openapi/swag/netutils v0.25.4 // indirect + github.com/go-openapi/swag/stringutils v0.25.4 // indirect + github.com/go-openapi/swag/typeutils v0.25.4 // indirect + github.com/go-openapi/swag/yamlutils v0.25.4 // indirect + github.com/go-openapi/validate v0.25.1 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/google/certificate-transparency-go v1.3.2 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/go-containerregistry v0.20.7 // indirect + github.com/google/go-github/v73 v73.0.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/jsonschema-go v0.3.0 // indirect + github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e // indirect + github.com/google/s2a-go v0.1.9 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect + github.com/googleapis/gax-go/v2 v2.15.0 // indirect + github.com/goph/emperror v0.17.2 // indirect + github.com/gorilla/css v1.0.1 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-retryablehttp v0.7.8 // indirect + github.com/in-toto/attestation v1.1.2 // indirect + github.com/in-toto/in-toto-golang v0.9.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/invopop/jsonschema v0.13.0 // indirect + github.com/invopop/yaml v0.2.0 // indirect + github.com/ipfs/go-cid v0.5.0 // indirect + github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.18.1 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/letsencrypt/boulder v0.20251110.0 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/libp2p/go-libp2p v0.44.0 // indirect + github.com/lucasb-eyer/go-colorful v1.3.0 // indirect + github.com/mailru/easyjson v0.9.0 // indirect + github.com/mark3labs/mcp-filesystem-server v0.11.1 // indirect + github.com/mark3labs/mcp-go v0.41.1 // indirect + github.com/mark3labs/mcphost v0.31.3 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-localereader v0.0.1 // indirect + github.com/mattn/go-runewidth v0.0.17 // indirect + github.com/meguminnnnnnnnn/go-openai v0.0.0-20250821095446-07791bea23a0 // indirect + github.com/microcosm-cc/bluemonday v1.0.27 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/moby/term v0.5.2 // indirect + github.com/modelcontextprotocol/go-sdk v0.8.0 // indirect + github.com/modelcontextprotocol/registry v1.2.3 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect + github.com/muesli/cancelreader v0.2.2 // indirect + github.com/muesli/reflow v0.3.0 // indirect + github.com/muesli/termenv v0.16.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multiaddr v0.16.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.9.1 // indirect + github.com/multiformats/go-multihash v0.2.3 // indirect + github.com/multiformats/go-varint v0.0.7 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/nikolalohinski/gonja v1.5.3 // indirect + github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/ollama/ollama v0.12.9 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/perimeterx/marshmallow v1.1.5 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect + github.com/sassoftware/relic v7.2.1+incompatible // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect + github.com/shibumi/go-pathspec v1.3.0 // indirect + github.com/sigstore/cosign/v3 v3.0.3 // indirect + github.com/sigstore/protobuf-specs v0.5.0 // indirect + github.com/sigstore/rekor v1.4.3 // indirect + github.com/sigstore/rekor-tiles/v2 v2.0.1 // indirect + github.com/sigstore/sigstore v1.10.0 // indirect + github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 // indirect + github.com/sigstore/timestamp-authority/v2 v2.0.3 // indirect + github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect + github.com/slongfield/pyfmt v0.0.0-20220222012616-ea85ff4c361f // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/thales-e-security/pool v0.0.2 // indirect + github.com/theupdateframework/go-tuf v0.7.0 // indirect + github.com/theupdateframework/go-tuf/v2 v2.3.0 // indirect + github.com/tidwall/gjson v1.18.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect + github.com/transparency-dev/merkle v0.0.2 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/vbatts/tar-split v0.12.2 // indirect + github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect + github.com/yargevad/filepathx v1.0.0 // indirect + github.com/yosida95/uritemplate/v3 v3.0.2 // indirect + github.com/yuin/goldmark v1.7.13 // indirect + github.com/yuin/goldmark-emoji v1.0.6 // indirect + gitlab.com/gitlab-org/api/client-go v0.160.0 // indirect + go.mongodb.org/mongo-driver v1.17.6 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.1 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/arch v0.20.0 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect + golang.org/x/mod v0.30.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.33.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.14.0 // indirect + golang.org/x/tools v0.39.0 // indirect + google.golang.org/genai v1.22.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect + google.golang.org/protobuf v1.36.10 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.34.2 // indirect + k8s.io/apimachinery v0.34.2 // indirect + k8s.io/client-go v0.34.2 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d // indirect + lukechampine.com/blake3 v1.4.1 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect +) diff --git a/e2e/go.sum b/e2e/go.sum index a27f3754d..75f1e2a9a 100644 --- a/e2e/go.sum +++ b/e2e/go.sum @@ -1,1007 +1,1007 @@ -al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= -al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= -buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 h1:THc6uLCGTpU393vVD5Eu5JHUdikvaP1+dqAclQe8pOE= -buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1/go.mod h1:xkbAJMbZuuebIblSFnLrfTpvmfjarhKsIid+Q9snDQ0= -buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 h1:ZObM/Cdu5dZO4ibBXNRSy+rFwG4oV86mYfKbI0Z7AAI= -buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1/go.mod h1:yJHswa2p3J+WxGLpgzuWNWn3I1CIkxdOu80Y/vN5lbE= -cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= -cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= -cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= -cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= -cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= -cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= -cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= -cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= -cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= -cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= -cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= -cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= -cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= -cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= -github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= -github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0 h1:E4MgwLBGeVB5f2MdcIVD3ELVAWpr+WD6MUe1i+tM/PA= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0/go.mod h1:Y2b/1clN4zsAoUd/pgNAQHjLDnTis/6ROkUfyob6psM= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA= -github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= -github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= -github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= -github.com/JohannesKaufmann/html-to-markdown v1.6.0 h1:04VXMiE50YYfCfLboJCLcgqF5x+rHJnb1ssNmqpLH/k= -github.com/JohannesKaufmann/html-to-markdown v1.6.0/go.mod h1:NUI78lGg/a7vpEJTz/0uOcYMaibytE4BUOQS8k78yPQ= -github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= -github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= -github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= -github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/PuerkitoBio/goquery v1.9.2/go.mod h1:GHPCaP0ODyyxqcNoFGYlAprUFH81NuRPd0GX3Zu2Mvk= -github.com/PuerkitoBio/goquery v1.10.3 h1:pFYcNSqHxBD06Fpj/KsbStFRsgRATgnf3LeXiUkhzPo= -github.com/PuerkitoBio/goquery v1.10.3/go.mod h1:tMUX0zDMHXYlAQk6p35XxQMqMweEKB7iK7iLNd4RH4Y= -github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= -github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= -github.com/agntcy/oasf-sdk/pkg v0.0.14 h1:DNKQNf4R4SMDbnaawoSl6FVOBvkSy4O9MyqKd7iHE8I= -github.com/agntcy/oasf-sdk/pkg v0.0.14/go.mod h1:FvcEB49gsvK+JO5i6l/pt5QgTK0LZeR7KYKsdcI6ZIM= -github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o= -github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= -github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= -github.com/alecthomas/chroma/v2 v2.20.0 h1:sfIHpxPyR07/Oylvmcai3X/exDlE8+FA820NTz+9sGw= -github.com/alecthomas/chroma/v2 v2.20.0/go.mod h1:e7tViK0xh/Nf4BYHl00ycY6rV7b8iXBksI9E359yNmA= -github.com/alecthomas/repr v0.5.1 h1:E3G4t2QbHTSNpPKBgMTln5KLkZHLOcU7r37J4pXBuIg= -github.com/alecthomas/repr v0.5.1/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= -github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= -github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM= -github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA= -github.com/anthropics/anthropic-sdk-go v1.10.0 h1:jDKQTfC0miIEj21eMmPrNSLKTNdNa3nHZOhd4wZz1cI= -github.com/anthropics/anthropic-sdk-go v1.10.0/go.mod h1:WTz31rIUHUHqai2UslPpw5CwXrQP3geYBioRV4WOLvE= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= -github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= -github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= -github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= -github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc= -github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 h1:t9yYsydLYNBk9cJ73rgPhPWqOh/52fcWDQB5b1JsKSY= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2/go.mod h1:IusfVNTmiSN3t4rhxWFaBAqn+mcNdwKtPcV16eYdgko= -github.com/aws/aws-sdk-go-v2/config v1.32.2 h1:4liUsdEpUUPZs5WVapsJLx5NPmQhQdez7nYFcovrytk= -github.com/aws/aws-sdk-go-v2/config v1.32.2/go.mod h1:l0hs06IFz1eCT+jTacU/qZtC33nvcnLADAPL/XyrkZI= -github.com/aws/aws-sdk-go-v2/credentials v1.19.2 h1:qZry8VUyTK4VIo5aEdUcBjPZHL2v4FyQ3QEOaWcFLu4= -github.com/aws/aws-sdk-go-v2/credentials v1.19.2/go.mod h1:YUqm5a1/kBnoK+/NY5WEiMocZihKSo15/tJdmdXnM5g= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 h1:WZVR5DbDgxzA0BJeudId89Kmgy6DIU4ORpxwsVHz0qA= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14/go.mod h1:Dadl9QO0kHgbrH1GRqGiZdYtW5w+IXXaBNCHTIaheM4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 h1:FIouAnCE46kyYqyhs0XEBDFFSREtdnr8HQuLPQPLCrY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14/go.mod h1:UTwDc5COa5+guonQU8qBikJo1ZJ4ln2r1MkF7Dqag1E= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.1 h1:U0asSZ3ifpuIehDPkRI2rxHbmFUMplDA2VeR9Uogrmw= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.1/go.mod h1:NZo9WJqQ0sxQ1Yqu1IwCHQFQunTms2MlVgejg16S1rY= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 h1:MxMBdKTYBjPQChlJhi4qlEueqB1p1KcbTEa7tD5aqPs= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.2/go.mod h1:iS6EPmNeqCsGo+xQmXv0jIMjyYtQfnwg36zl2FwEouk= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 h1:ksUT5KtgpZd3SAiFJNJ0AFEJVva3gjBmN7eXUZjzUwQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.5/go.mod h1:av+ArJpoYf3pgyrj6tcehSFW+y9/QvAY8kMooR9bZCw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 h1:GtsxyiF3Nd3JahRBJbxLCCdYW9ltGQYrFWg8XdkGDd8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10/go.mod h1:/j67Z5XBVDx8nZVp9EuFM9/BS5dvBznbqILGuu73hug= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 h1:a5UTtD4mHBU3t0o6aHQZFJTNKVfxFWfPX7J0Lr7G+uY= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.2/go.mod h1:6TxbXoDSgBQ225Qd8Q+MbxUxUh6TtNKwbRt/EPS9xso= -github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= -github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= -github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= -github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= -github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8= -github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA= -github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= -github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= -github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= -github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M= -github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM= -github.com/bytedance/mockey v1.2.14 h1:KZaFgPdiUwW+jOWFieo3Lr7INM1P+6adO3hxZhDswY8= -github.com/bytedance/mockey v1.2.14/go.mod h1:1BPHF9sol5R1ud/+0VEHGQq/+i2lN+GTsr3O2Q9IENY= -github.com/bytedance/sonic v1.14.1 h1:FBMC0zVz5XUmE4z9wF4Jey0An5FueFvOsTKKKtwIl7w= -github.com/bytedance/sonic v1.14.1/go.mod h1:gi6uhQLMbTdeP0muCnrjHLeCUPyb70ujhnNlhOylAFc= -github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= -github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= -github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= -github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs= -github.com/charmbracelet/bubbles v0.21.0/go.mod h1:HF+v6QUR4HkEpz62dx7ym2xc71/KBHg+zKwJtMw+qtg= -github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw= -github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4= -github.com/charmbracelet/colorprofile v0.3.2 h1:9J27WdztfJQVAQKX2WOlSSRB+5gaKqqITmrvb1uTIiI= -github.com/charmbracelet/colorprofile v0.3.2/go.mod h1:mTD5XzNeWHj8oqHb+S1bssQb7vIHbepiebQ2kPKVKbI= -github.com/charmbracelet/glamour v0.10.0 h1:MtZvfwsYCx8jEPFJm3rIBFIMZUfUJ765oX8V6kXldcY= -github.com/charmbracelet/glamour v0.10.0/go.mod h1:f+uf+I/ChNmqo087elLnVdCiVgjSKWuXa/l6NU2ndYk= -github.com/charmbracelet/harmonica v0.2.0 h1:8NxJWRWg/bzKqqEaaeFNipOu77YR5t8aSwG4pgaUBiQ= -github.com/charmbracelet/harmonica v0.2.0/go.mod h1:KSri/1RMQOZLbw7AHqgcBycp8pgJnQMYYT8QZRqZ1Ao= -github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 h1:ZR7e0ro+SZZiIZD7msJyA+NjkCNNavuiPBLgerbOziE= -github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834/go.mod h1:aKC/t2arECF6rNOnaKaVU6y4t4ZeHQzqfxedE/VkVhA= -github.com/charmbracelet/x/ansi v0.10.2 h1:ith2ArZS0CJG30cIUfID1LXN7ZFXRCww6RUvAPA+Pzw= -github.com/charmbracelet/x/ansi v0.10.2/go.mod h1:HbLdJjQH4UH4AqA2HpRWuWNluRE6zxJH/yteYEYCFa8= -github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k= -github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= -github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ= -github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= -github.com/charmbracelet/x/exp/slice v0.0.0-20250902204034-1cdc10c66d5b h1:DZ2Li1O0j+wWw6AgEUDrODB7PAIKpmOy65yu1UBPYc4= -github.com/charmbracelet/x/exp/slice v0.0.0-20250902204034-1cdc10c66d5b/go.mod h1:vI5nDVMWi6veaYH+0Fmvpbe/+cv/iJfMntdh+N0+Tms= -github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= -github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= -github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= -github.com/cloudwego/eino v0.5.0-alpha.11 h1:KhjJ8JTAI/Ed5iCHWKUn1v4j1sDCxqV26HRoUQpSRFc= -github.com/cloudwego/eino v0.5.0-alpha.11/go.mod h1:S38tlNO4cNqFfGJKQSJZimxjzc9JDJKdf2eW3FEEfdc= -github.com/cloudwego/eino-ext/components/model/claude v0.1.0 h1:UZVwYzV7gOBCBKHGdAT2fZzm/+2TBEfDDYn713EvLF0= -github.com/cloudwego/eino-ext/components/model/claude v0.1.0/go.mod h1:lacy0WE3yKuOSxrhJQKqWAxn3LiUy/CJ91jU7nLDNNQ= -github.com/cloudwego/eino-ext/components/model/ollama v0.1.2 h1:WxJ+7oXnr3AhM6u4VbFF3L2ionxCrPfmLetx7V+zthw= -github.com/cloudwego/eino-ext/components/model/ollama v0.1.2/go.mod h1:OgGMCiR/G/RnOWaJvdK8pVSxAzoz2SlCqim43oFTuwo= -github.com/cloudwego/eino-ext/components/model/openai v0.0.0-20250903035842-96774a3ec845 h1:nxflfiBwWNPoKS9X4SMhmT+si7rtYv+lQzIyPJik4DM= -github.com/cloudwego/eino-ext/components/model/openai v0.0.0-20250903035842-96774a3ec845/go.mod h1:QQhCuQxuBAVWvu/YAZBhs/RsR76mUigw59Tl0kh04C8= -github.com/cloudwego/eino-ext/libs/acl/openai v0.0.0-20250826113018-8c6f6358d4bb h1:RMslzyijc3bi9EkqCulpS0hZupTl1y/wayR3+fVRN/c= -github.com/cloudwego/eino-ext/libs/acl/openai v0.0.0-20250826113018-8c6f6358d4bb/go.mod h1:fHn/6OqPPY1iLLx9wzz+MEVT5Dl9gwuZte1oLEnCoYw= -github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= -github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= -github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= -github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= -github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= -github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= -github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= -github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/danieljoos/wincred v1.2.3 h1:v7dZC2x32Ut3nEfRH+vhoZGvN72+dQ/snVXo/vMFLdQ= -github.com/danieljoos/wincred v1.2.3/go.mod h1:6qqX0WNrS4RzPZ1tnroDzq9kY3fu1KwE7MRLQK4X0bs= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= -github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= -github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= -github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= -github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= -github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= -github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= -github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= -github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= -github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ= -github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= -github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= -github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= -github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/eino-contrib/jsonschema v1.0.0 h1:dXxbhGNZuI3+xNi8x3JT8AGyoXz6Pff6mRvmpjVl5Ww= -github.com/eino-contrib/jsonschema v1.0.0/go.mod h1:cpnX4SyKjWjGC7iN2EbhxaTdLqGjCi0e9DxpLYxddD4= -github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= -github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= -github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= -github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= -github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= -github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= -github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= -github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= -github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0= -github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= -github.com/getkin/kin-openapi v0.120.0 h1:MqJcNJFrMDFNc07iwE8iFC5eT2k/NPUFDIpNeiZv8Jg= -github.com/getkin/kin-openapi v0.120.0/go.mod h1:PCWw/lfBrJY4HcdqE3jj+QFkaFK8ABoqo7PvqVhXXqw= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= -github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= -github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= -github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rBOesYM= -github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84= -github.com/go-openapi/errors v0.22.4 h1:oi2K9mHTOb5DPW2Zjdzs/NIvwi2N3fARKaTJLdNabaM= -github.com/go-openapi/errors v0.22.4/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= -github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= -github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= -github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc= -github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4= -github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4= -github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY= -github.com/go-openapi/runtime v0.29.2 h1:UmwSGWNmWQqKm1c2MGgXVpC2FTGwPDQeUsBMufc5Yj0= -github.com/go-openapi/runtime v0.29.2/go.mod h1:biq5kJXRJKBJxTDJXAa00DOTa/anflQPhT0/wmjuy+0= -github.com/go-openapi/spec v0.22.1 h1:beZMa5AVQzRspNjvhe5aG1/XyBSMeX1eEOs7dMoXh/k= -github.com/go-openapi/spec v0.22.1/go.mod h1:c7aeIQT175dVowfp7FeCvXXnjN/MrpaONStibD2WtDA= -github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= -github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= -github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= -github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= -github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= -github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= -github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= -github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= -github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= -github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= -github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= -github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= -github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= -github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= -github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= -github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= -github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= -github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= -github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= -github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= -github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= -github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= -github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= -github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= -github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= -github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= -github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= -github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= -github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw= -github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc= -github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= -github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= -github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= -github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= -github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= -github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= -github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= -github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= -github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= -github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= -github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= -github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= -github.com/google/go-github/v73 v73.0.0 h1:aR+Utnh+Y4mMkS+2qLQwcQ/cF9mOTpdwnzlaw//rG24= -github.com/google/go-github/v73 v73.0.0/go.mod h1:fa6w8+/V+edSU0muqdhCVY7Beh1M8F1IlQPZIANKIYw= -github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= -github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/jsonschema-go v0.3.0 h1:6AH2TxVNtk3IlvkkhjrtbUc4S8AvO0Xii0DxIygDg+Q= -github.com/google/jsonschema-go v0.3.0/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e h1:FJta/0WsADCe1r9vQjdHbd3KuiLPu7Y9WlyLGwMUNyE= -github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= -github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= -github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= -github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= -github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= -github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= -github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= -github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= -github.com/goph/emperror v0.17.2 h1:yLapQcmEsO0ipe9p5TaN22djm3OFV/TfM/fcYP0/J18= -github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic= -github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= -github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= -github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= -github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= -github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= -github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= -github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= -github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= -github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= -github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= -github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/vault/api v1.22.0 h1:+HYFquE35/B74fHoIeXlZIP2YADVboaPjaSicHEZiH0= -github.com/hashicorp/vault/api v1.22.0/go.mod h1:IUZA2cDvr4Ok3+NtK2Oq/r+lJeXkeCrHRmqdyWfpmGM= -github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= -github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= -github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= -github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E= -github.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM= -github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= -github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= -github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= -github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= -github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= -github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= -github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= -github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk= -github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= -github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= -github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= -github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= -github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= -github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= -github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= -github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= -github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= -github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= -github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/letsencrypt/boulder v0.20251110.0 h1:J8MnKICeilO91dyQ2n5eBbab24neHzUpYMUIOdOtbjc= -github.com/letsencrypt/boulder v0.20251110.0/go.mod h1:ogKCJQwll82m7OVHWyTuf8eeFCjuzdRQlgnZcCl0V+8= -github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= -github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= -github.com/libp2p/go-libp2p v0.44.0 h1:5Gtt8OrF8yiXmH+Mx4+/iBeFRMK1TY3a8OrEBDEqAvs= -github.com/libp2p/go-libp2p v0.44.0/go.mod h1:NovCojezAt4dnDd4fH048K7PKEqH0UFYYqJRjIIu8zc= -github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag= -github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= -github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= -github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= -github.com/mark3labs/mcp-filesystem-server v0.11.1 h1:7uKIZRMaKWfgvtDj/uLAvo0+7Mwb8gxo5DJywhqFW88= -github.com/mark3labs/mcp-filesystem-server v0.11.1/go.mod h1:xDqJizVYWZ5a31Mt4xuYbVku2AR/kT56H3O0SbpANoQ= -github.com/mark3labs/mcp-go v0.41.1 h1:w78eWfiQam2i8ICL7AL0WFiq7KHNJQ6UB53ZVtH4KGA= -github.com/mark3labs/mcp-go v0.41.1/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g= -github.com/mark3labs/mcphost v0.31.3 h1:v8kWozQXPXHTBKT2GMo1CCtjz5yZWKMJdXSl9awH3pM= -github.com/mark3labs/mcphost v0.31.3/go.mod h1:rJ5SEO4eo+Vs3XfUAJdxgioB+CVXt02sl+37r0Erato= -github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= -github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= -github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= -github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= -github.com/mattn/go-runewidth v0.0.17 h1:78v8ZlW0bP43XfmAfPsdXcoNCelfMHsDmd/pkENfrjQ= -github.com/mattn/go-runewidth v0.0.17/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/meguminnnnnnnnn/go-openai v0.0.0-20250821095446-07791bea23a0 h1:nIohpHs1ViKR0SVgW/cbBstHjmnqFZDM9RqgX9m9Xu8= -github.com/meguminnnnnnnnn/go-openai v0.0.0-20250821095446-07791bea23a0/go.mod h1:qs96ysDmxhE4BZoU45I43zcyfnaYxU3X+aRzLko/htY= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk= -github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA= -github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= -github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= -github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= -github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= -github.com/modelcontextprotocol/go-sdk v0.8.0 h1:jdsBtGzBLY287WKSIjYovOXAqtJkP+HtFQFKrZd4a6c= -github.com/modelcontextprotocol/go-sdk v0.8.0/go.mod h1:nYtYQroQ2KQiM0/SbyEPUWQ6xs4B95gJjEalc9AQyOs= -github.com/modelcontextprotocol/registry v1.2.3 h1:PaQTn7VxJ0xlgiI+OJUHrG7H12x8uP27wepYKJRaD88= -github.com/modelcontextprotocol/registry v1.2.3/go.mod h1:WcvDr/Cn7JS7MHdSsNPVlLZYwfmzG1/3zTtuW23IRCc= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= -github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= -github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI= -github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo= -github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA= -github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= -github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= -github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= -github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= -github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= -github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= -github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= -github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= -github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc= -github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= -github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= -github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo= -github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo= -github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= -github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= -github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= -github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= -github.com/nikolalohinski/gonja v1.5.3 h1:GsA+EEaZDZPGJ8JtpeGN78jidhOlxeJROpqMT9fTj9c= -github.com/nikolalohinski/gonja v1.5.3/go.mod h1:RmjwxNiXAEqcq1HeK5SSMmqFJvKOfTfXhkJv6YBtPa4= -github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= -github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= -github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/ollama/ollama v0.12.9 h1:qvhEcBZtaTTiXoe/elPnKsbf3z0s0bmU9urCIYUkV54= -github.com/ollama/ollama v0.12.9/go.mod h1:9+1//yWPsDE2u+l1a5mpaKrYw4VdnSsRU3ioq5BvMms= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.23.0 h1:FA1xjp8ieYDzlgS5ABTpdUDB7wtngggONc8a7ku2NqQ= -github.com/onsi/ginkgo/v2 v2.23.0/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= -github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= -github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= -github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= -github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= -github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= -github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= -github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= -github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= -github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= -github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= -github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= -github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= -github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= -github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= -github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= -github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= -github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= -github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= -github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= -github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= -github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= -github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= -github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= -github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= -github.com/sigstore/cosign/v3 v3.0.3 h1:IknuTUYM+tZ/ToghM7mvg9V0O31NG3rev97u1IJIuYA= -github.com/sigstore/cosign/v3 v3.0.3/go.mod h1:poeQqwvpDNIDyim7a2ljUhonVKpCys+fx3SY0Lkmi/4= -github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= -github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= -github.com/sigstore/rekor v1.4.3 h1:2+aw4Gbgumv8vYM/QVg6b+hvr4x4Cukur8stJrVPKU0= -github.com/sigstore/rekor v1.4.3/go.mod h1:o0zgY087Q21YwohVvGwV9vK1/tliat5mfnPiVI3i75o= -github.com/sigstore/rekor-tiles/v2 v2.0.1 h1:1Wfz15oSRNGF5Dzb0lWn5W8+lfO50ork4PGIfEKjZeo= -github.com/sigstore/rekor-tiles/v2 v2.0.1/go.mod h1:Pjsbhzj5hc3MKY8FfVTYHBUHQEnP0ozC4huatu4x7OU= -github.com/sigstore/sigstore v1.10.0 h1:lQrmdzqlR8p9SCfWIpFoGUqdXEzJSZT2X+lTXOMPaQI= -github.com/sigstore/sigstore v1.10.0/go.mod h1:Ygq+L/y9Bm3YnjpJTlQrOk/gXyrjkpn3/AEJpmk1n9Y= -github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 h1:K8hnZhun6XacjxAdCdxkowSi7+FpmfYnAcMhTXZQyPg= -github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894/go.mod h1:uuR+Edo6P+iwi0HKscycUm8mxXL748nAureqSg6jFLA= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0 h1:UOHpiyezCj5RuixgIvCV3QyuxIGQT+N6nGZEXA7OTTY= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0/go.mod h1:U0CZmA2psabDa8DdiV7yXab0AHODzfKqvD2isH7Hrvw= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0 h1:fq4+8Y4YadxeF8mzhoMRPZ1mVvDYXmI3BfS0vlkPT7M= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0/go.mod h1:u05nqPWY05lmcdHhv2lPaWTH3FGUhJzO7iW2hbboK3Q= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0 h1:iUEf5MZYOuXGnXxdF/WrarJrk0DTVHqeIOjYdtpVXtc= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0/go.mod h1:i6vg5JfEQix46R1rhQlrKmUtJoeH91drltyYOJEk1T4= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0 h1:dUvPv/MP23ZPIXZUW45kvCIgC0ZRfYxEof57AB6bAtU= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0/go.mod h1:fR/gDdPvJWGWL70/NgBBIL1O0/3Wma6JHs3tSSYg3s4= -github.com/sigstore/timestamp-authority/v2 v2.0.3 h1:sRyYNtdED/ttLCMdaYnwpf0zre1A9chvjTnCmWWxN8Y= -github.com/sigstore/timestamp-authority/v2 v2.0.3/go.mod h1:mDaHxkt3HmZYoIlwYj4QWo0RUr7VjYU52aVO5f5Qb3I= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= -github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/slongfield/pyfmt v0.0.0-20220222012616-ea85ff4c361f h1:Z2cODYsUxQPofhpYRMQVwWz4yUVpHF+vPi+eUdruUYI= -github.com/slongfield/pyfmt v0.0.0-20220222012616-ea85ff4c361f/go.mod h1:JqzWyvTuI2X4+9wOHmKSQCYxybB/8j6Ko43qVmXDuZg= -github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= -github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= -github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= -github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= -github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= -github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= -github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= -github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= -github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= -github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= -github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= -github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= -github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= -github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= -github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= -github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= -github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= -github.com/theupdateframework/go-tuf/v2 v2.3.0 h1:gt3X8xT8qu/HT4w+n1jgv+p7koi5ad8XEkLXXZqG9AA= -github.com/theupdateframework/go-tuf/v2 v2.3.0/go.mod h1:xW8yNvgXRncmovMLvBxKwrKpsOwJZu/8x+aB0KtFcdw= -github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= -github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= -github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= -github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= -github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= -github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= -github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= -github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= -github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= -github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= -github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= -github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= -github.com/tink-crypto/tink-go/v2 v2.5.0 h1:B8KLF6AofxdBIE4UJIaFbmoj5/1ehEtt7/MmzfI4Zpw= -github.com/tink-crypto/tink-go/v2 v2.5.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8= -github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= -github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= -github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c h1:5a2XDQ2LiAUV+/RjckMyq9sXudfrPSuCY4FuPC1NyAw= -github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c/go.mod h1:g85IafeFJZLxlzZCDRu4JLpfS7HKzR+Hw9qRh3bVzDI= -github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= -github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= -github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= -github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= -github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= -github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= -github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= -github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= -github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= -github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= -github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= -github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= -github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= -github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= -github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= -github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= -github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= -github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= -github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= -github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= -github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= -github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= -github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= -github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= -github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= -github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= -github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= -github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA= -github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg= -github.com/yuin/goldmark-emoji v1.0.6 h1:QWfF2FYaXwL74tfGOW5izeiZepUDroDJfWubQI9HTHs= -github.com/yuin/goldmark-emoji v1.0.6/go.mod h1:ukxJDKFpdFb5x0a5HqbdlcKtebh086iJpI31LTKmWuA= -github.com/zalando/go-keyring v0.2.6 h1:r7Yc3+H+Ux0+M72zacZoItR3UDxeWfKTcabvkI8ua9s= -github.com/zalando/go-keyring v0.2.6/go.mod h1:2TCrxYrbUNYfNS/Kgy/LSrkSQzZ5UPVH85RwfczwvcI= -gitlab.com/gitlab-org/api/client-go v0.160.0 h1:aMQzbcE8zFe0lR/J+a3zneEgH+/EBFs8rD8Chrr4Snw= -gitlab.com/gitlab-org/api/client-go v0.160.0/go.mod h1:ooCNtKB7OyP7GBa279+HrUS3eeJF6Yi6XABZZy7RTSk= -go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= -go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= -go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= -go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= -go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= -go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= -go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= -go.step.sm/crypto v0.74.0 h1:/APBEv45yYR4qQFg47HA8w1nesIGcxh44pGyQNw6JRA= -go.step.sm/crypto v0.74.0/go.mod h1:UoXqCAJjjRgzPte0Llaqen7O9P7XjPmgjgTHQGkKCDk= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= -go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= -go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= -go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= -go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= -go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= -golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c= -golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= -golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= -golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= -golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= -golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= -golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= -golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= -golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= -golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= -golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= -golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= -gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI= -google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964= -google.golang.org/genai v1.22.0 h1:5hrEhXXWJQZa3tdPocl4vQ/0w6myEAxdNns2Kmx0f4Y= -google.golang.org/genai v1.22.0/go.mod h1:QPj5NGJw+3wEOHg+PrsWwJKvG6UC84ex5FR7qAYsN/M= -google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 h1:LvZVVaPE0JSqL+ZWb6ErZfnEOKIqqFWUJE2D0fObSmc= -google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9/go.mod h1:QFOrLhdAe2PsTp3vQY4quuLKTi9j3XG3r6JPPaw7MSc= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= -google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= -google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= -google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 h1:ExN12ndbJ608cboPYflpTny6mXSzPrDLh0iTaVrRrds= -google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6/go.mod h1:6ytKWczdvnpnO+m+JiG9NjEDzR1FJfsnmJdG7B8QVZ8= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= -google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= -gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= -k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY= -k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw= -k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= -k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= -k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M= -k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE= -k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= -k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= -k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= -k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= -lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= -sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= -sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= -sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= -software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= -software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= +al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= +al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= +buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 h1:THc6uLCGTpU393vVD5Eu5JHUdikvaP1+dqAclQe8pOE= +buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1/go.mod h1:xkbAJMbZuuebIblSFnLrfTpvmfjarhKsIid+Q9snDQ0= +buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 h1:ZObM/Cdu5dZO4ibBXNRSy+rFwG4oV86mYfKbI0Z7AAI= +buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1/go.mod h1:yJHswa2p3J+WxGLpgzuWNWn3I1CIkxdOu80Y/vN5lbE= +cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= +cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= +cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= +cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= +cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= +cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= +cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0 h1:E4MgwLBGeVB5f2MdcIVD3ELVAWpr+WD6MUe1i+tM/PA= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0/go.mod h1:Y2b/1clN4zsAoUd/pgNAQHjLDnTis/6ROkUfyob6psM= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= +github.com/JohannesKaufmann/html-to-markdown v1.6.0 h1:04VXMiE50YYfCfLboJCLcgqF5x+rHJnb1ssNmqpLH/k= +github.com/JohannesKaufmann/html-to-markdown v1.6.0/go.mod h1:NUI78lGg/a7vpEJTz/0uOcYMaibytE4BUOQS8k78yPQ= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/PuerkitoBio/goquery v1.9.2/go.mod h1:GHPCaP0ODyyxqcNoFGYlAprUFH81NuRPd0GX3Zu2Mvk= +github.com/PuerkitoBio/goquery v1.10.3 h1:pFYcNSqHxBD06Fpj/KsbStFRsgRATgnf3LeXiUkhzPo= +github.com/PuerkitoBio/goquery v1.10.3/go.mod h1:tMUX0zDMHXYlAQk6p35XxQMqMweEKB7iK7iLNd4RH4Y= +github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= +github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= +github.com/agntcy/oasf-sdk/pkg v0.0.14 h1:DNKQNf4R4SMDbnaawoSl6FVOBvkSy4O9MyqKd7iHE8I= +github.com/agntcy/oasf-sdk/pkg v0.0.14/go.mod h1:FvcEB49gsvK+JO5i6l/pt5QgTK0LZeR7KYKsdcI6ZIM= +github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o= +github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= +github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/chroma/v2 v2.20.0 h1:sfIHpxPyR07/Oylvmcai3X/exDlE8+FA820NTz+9sGw= +github.com/alecthomas/chroma/v2 v2.20.0/go.mod h1:e7tViK0xh/Nf4BYHl00ycY6rV7b8iXBksI9E359yNmA= +github.com/alecthomas/repr v0.5.1 h1:E3G4t2QbHTSNpPKBgMTln5KLkZHLOcU7r37J4pXBuIg= +github.com/alecthomas/repr v0.5.1/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= +github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM= +github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA= +github.com/anthropics/anthropic-sdk-go v1.10.0 h1:jDKQTfC0miIEj21eMmPrNSLKTNdNa3nHZOhd4wZz1cI= +github.com/anthropics/anthropic-sdk-go v1.10.0/go.mod h1:WTz31rIUHUHqai2UslPpw5CwXrQP3geYBioRV4WOLvE= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= +github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= +github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= +github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= +github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc= +github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 h1:t9yYsydLYNBk9cJ73rgPhPWqOh/52fcWDQB5b1JsKSY= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2/go.mod h1:IusfVNTmiSN3t4rhxWFaBAqn+mcNdwKtPcV16eYdgko= +github.com/aws/aws-sdk-go-v2/config v1.32.2 h1:4liUsdEpUUPZs5WVapsJLx5NPmQhQdez7nYFcovrytk= +github.com/aws/aws-sdk-go-v2/config v1.32.2/go.mod h1:l0hs06IFz1eCT+jTacU/qZtC33nvcnLADAPL/XyrkZI= +github.com/aws/aws-sdk-go-v2/credentials v1.19.2 h1:qZry8VUyTK4VIo5aEdUcBjPZHL2v4FyQ3QEOaWcFLu4= +github.com/aws/aws-sdk-go-v2/credentials v1.19.2/go.mod h1:YUqm5a1/kBnoK+/NY5WEiMocZihKSo15/tJdmdXnM5g= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 h1:WZVR5DbDgxzA0BJeudId89Kmgy6DIU4ORpxwsVHz0qA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14/go.mod h1:Dadl9QO0kHgbrH1GRqGiZdYtW5w+IXXaBNCHTIaheM4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 h1:FIouAnCE46kyYqyhs0XEBDFFSREtdnr8HQuLPQPLCrY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14/go.mod h1:UTwDc5COa5+guonQU8qBikJo1ZJ4ln2r1MkF7Dqag1E= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.1 h1:U0asSZ3ifpuIehDPkRI2rxHbmFUMplDA2VeR9Uogrmw= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.1/go.mod h1:NZo9WJqQ0sxQ1Yqu1IwCHQFQunTms2MlVgejg16S1rY= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 h1:MxMBdKTYBjPQChlJhi4qlEueqB1p1KcbTEa7tD5aqPs= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.2/go.mod h1:iS6EPmNeqCsGo+xQmXv0jIMjyYtQfnwg36zl2FwEouk= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 h1:ksUT5KtgpZd3SAiFJNJ0AFEJVva3gjBmN7eXUZjzUwQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.5/go.mod h1:av+ArJpoYf3pgyrj6tcehSFW+y9/QvAY8kMooR9bZCw= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 h1:GtsxyiF3Nd3JahRBJbxLCCdYW9ltGQYrFWg8XdkGDd8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10/go.mod h1:/j67Z5XBVDx8nZVp9EuFM9/BS5dvBznbqILGuu73hug= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 h1:a5UTtD4mHBU3t0o6aHQZFJTNKVfxFWfPX7J0Lr7G+uY= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.2/go.mod h1:6TxbXoDSgBQ225Qd8Q+MbxUxUh6TtNKwbRt/EPS9xso= +github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= +github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= +github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8= +github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA= +github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= +github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= +github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M= +github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM= +github.com/bytedance/mockey v1.2.14 h1:KZaFgPdiUwW+jOWFieo3Lr7INM1P+6adO3hxZhDswY8= +github.com/bytedance/mockey v1.2.14/go.mod h1:1BPHF9sol5R1ud/+0VEHGQq/+i2lN+GTsr3O2Q9IENY= +github.com/bytedance/sonic v1.14.1 h1:FBMC0zVz5XUmE4z9wF4Jey0An5FueFvOsTKKKtwIl7w= +github.com/bytedance/sonic v1.14.1/go.mod h1:gi6uhQLMbTdeP0muCnrjHLeCUPyb70ujhnNlhOylAFc= +github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= +github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs= +github.com/charmbracelet/bubbles v0.21.0/go.mod h1:HF+v6QUR4HkEpz62dx7ym2xc71/KBHg+zKwJtMw+qtg= +github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw= +github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4= +github.com/charmbracelet/colorprofile v0.3.2 h1:9J27WdztfJQVAQKX2WOlSSRB+5gaKqqITmrvb1uTIiI= +github.com/charmbracelet/colorprofile v0.3.2/go.mod h1:mTD5XzNeWHj8oqHb+S1bssQb7vIHbepiebQ2kPKVKbI= +github.com/charmbracelet/glamour v0.10.0 h1:MtZvfwsYCx8jEPFJm3rIBFIMZUfUJ765oX8V6kXldcY= +github.com/charmbracelet/glamour v0.10.0/go.mod h1:f+uf+I/ChNmqo087elLnVdCiVgjSKWuXa/l6NU2ndYk= +github.com/charmbracelet/harmonica v0.2.0 h1:8NxJWRWg/bzKqqEaaeFNipOu77YR5t8aSwG4pgaUBiQ= +github.com/charmbracelet/harmonica v0.2.0/go.mod h1:KSri/1RMQOZLbw7AHqgcBycp8pgJnQMYYT8QZRqZ1Ao= +github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 h1:ZR7e0ro+SZZiIZD7msJyA+NjkCNNavuiPBLgerbOziE= +github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834/go.mod h1:aKC/t2arECF6rNOnaKaVU6y4t4ZeHQzqfxedE/VkVhA= +github.com/charmbracelet/x/ansi v0.10.2 h1:ith2ArZS0CJG30cIUfID1LXN7ZFXRCww6RUvAPA+Pzw= +github.com/charmbracelet/x/ansi v0.10.2/go.mod h1:HbLdJjQH4UH4AqA2HpRWuWNluRE6zxJH/yteYEYCFa8= +github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k= +github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= +github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ= +github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= +github.com/charmbracelet/x/exp/slice v0.0.0-20250902204034-1cdc10c66d5b h1:DZ2Li1O0j+wWw6AgEUDrODB7PAIKpmOy65yu1UBPYc4= +github.com/charmbracelet/x/exp/slice v0.0.0-20250902204034-1cdc10c66d5b/go.mod h1:vI5nDVMWi6veaYH+0Fmvpbe/+cv/iJfMntdh+N0+Tms= +github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= +github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= +github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= +github.com/cloudwego/eino v0.5.0-alpha.11 h1:KhjJ8JTAI/Ed5iCHWKUn1v4j1sDCxqV26HRoUQpSRFc= +github.com/cloudwego/eino v0.5.0-alpha.11/go.mod h1:S38tlNO4cNqFfGJKQSJZimxjzc9JDJKdf2eW3FEEfdc= +github.com/cloudwego/eino-ext/components/model/claude v0.1.0 h1:UZVwYzV7gOBCBKHGdAT2fZzm/+2TBEfDDYn713EvLF0= +github.com/cloudwego/eino-ext/components/model/claude v0.1.0/go.mod h1:lacy0WE3yKuOSxrhJQKqWAxn3LiUy/CJ91jU7nLDNNQ= +github.com/cloudwego/eino-ext/components/model/ollama v0.1.2 h1:WxJ+7oXnr3AhM6u4VbFF3L2ionxCrPfmLetx7V+zthw= +github.com/cloudwego/eino-ext/components/model/ollama v0.1.2/go.mod h1:OgGMCiR/G/RnOWaJvdK8pVSxAzoz2SlCqim43oFTuwo= +github.com/cloudwego/eino-ext/components/model/openai v0.0.0-20250903035842-96774a3ec845 h1:nxflfiBwWNPoKS9X4SMhmT+si7rtYv+lQzIyPJik4DM= +github.com/cloudwego/eino-ext/components/model/openai v0.0.0-20250903035842-96774a3ec845/go.mod h1:QQhCuQxuBAVWvu/YAZBhs/RsR76mUigw59Tl0kh04C8= +github.com/cloudwego/eino-ext/libs/acl/openai v0.0.0-20250826113018-8c6f6358d4bb h1:RMslzyijc3bi9EkqCulpS0hZupTl1y/wayR3+fVRN/c= +github.com/cloudwego/eino-ext/libs/acl/openai v0.0.0-20250826113018-8c6f6358d4bb/go.mod h1:fHn/6OqPPY1iLLx9wzz+MEVT5Dl9gwuZte1oLEnCoYw= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= +github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= +github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= +github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= +github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/danieljoos/wincred v1.2.3 h1:v7dZC2x32Ut3nEfRH+vhoZGvN72+dQ/snVXo/vMFLdQ= +github.com/danieljoos/wincred v1.2.3/go.mod h1:6qqX0WNrS4RzPZ1tnroDzq9kY3fu1KwE7MRLQK4X0bs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= +github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= +github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= +github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= +github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ= +github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= +github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= +github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/eino-contrib/jsonschema v1.0.0 h1:dXxbhGNZuI3+xNi8x3JT8AGyoXz6Pff6mRvmpjVl5Ww= +github.com/eino-contrib/jsonschema v1.0.0/go.mod h1:cpnX4SyKjWjGC7iN2EbhxaTdLqGjCi0e9DxpLYxddD4= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= +github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0= +github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/getkin/kin-openapi v0.120.0 h1:MqJcNJFrMDFNc07iwE8iFC5eT2k/NPUFDIpNeiZv8Jg= +github.com/getkin/kin-openapi v0.120.0/go.mod h1:PCWw/lfBrJY4HcdqE3jj+QFkaFK8ABoqo7PvqVhXXqw= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rBOesYM= +github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84= +github.com/go-openapi/errors v0.22.4 h1:oi2K9mHTOb5DPW2Zjdzs/NIvwi2N3fARKaTJLdNabaM= +github.com/go-openapi/errors v0.22.4/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= +github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= +github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= +github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc= +github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4= +github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4= +github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY= +github.com/go-openapi/runtime v0.29.2 h1:UmwSGWNmWQqKm1c2MGgXVpC2FTGwPDQeUsBMufc5Yj0= +github.com/go-openapi/runtime v0.29.2/go.mod h1:biq5kJXRJKBJxTDJXAa00DOTa/anflQPhT0/wmjuy+0= +github.com/go-openapi/spec v0.22.1 h1:beZMa5AVQzRspNjvhe5aG1/XyBSMeX1eEOs7dMoXh/k= +github.com/go-openapi/spec v0.22.1/go.mod h1:c7aeIQT175dVowfp7FeCvXXnjN/MrpaONStibD2WtDA= +github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= +github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= +github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= +github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= +github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= +github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= +github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= +github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= +github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= +github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= +github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= +github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= +github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= +github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= +github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= +github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= +github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= +github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= +github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= +github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= +github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= +github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw= +github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc= +github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= +github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= +github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= +github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= +github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= +github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= +github.com/google/go-github/v73 v73.0.0 h1:aR+Utnh+Y4mMkS+2qLQwcQ/cF9mOTpdwnzlaw//rG24= +github.com/google/go-github/v73 v73.0.0/go.mod h1:fa6w8+/V+edSU0muqdhCVY7Beh1M8F1IlQPZIANKIYw= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/jsonschema-go v0.3.0 h1:6AH2TxVNtk3IlvkkhjrtbUc4S8AvO0Xii0DxIygDg+Q= +github.com/google/jsonschema-go v0.3.0/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e h1:FJta/0WsADCe1r9vQjdHbd3KuiLPu7Y9WlyLGwMUNyE= +github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= +github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= +github.com/goph/emperror v0.17.2 h1:yLapQcmEsO0ipe9p5TaN22djm3OFV/TfM/fcYP0/J18= +github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic= +github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= +github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= +github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/vault/api v1.22.0 h1:+HYFquE35/B74fHoIeXlZIP2YADVboaPjaSicHEZiH0= +github.com/hashicorp/vault/api v1.22.0/go.mod h1:IUZA2cDvr4Ok3+NtK2Oq/r+lJeXkeCrHRmqdyWfpmGM= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E= +github.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM= +github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= +github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= +github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= +github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= +github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= +github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= +github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk= +github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= +github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= +github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= +github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= +github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/letsencrypt/boulder v0.20251110.0 h1:J8MnKICeilO91dyQ2n5eBbab24neHzUpYMUIOdOtbjc= +github.com/letsencrypt/boulder v0.20251110.0/go.mod h1:ogKCJQwll82m7OVHWyTuf8eeFCjuzdRQlgnZcCl0V+8= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-libp2p v0.44.0 h1:5Gtt8OrF8yiXmH+Mx4+/iBeFRMK1TY3a8OrEBDEqAvs= +github.com/libp2p/go-libp2p v0.44.0/go.mod h1:NovCojezAt4dnDd4fH048K7PKEqH0UFYYqJRjIIu8zc= +github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag= +github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mark3labs/mcp-filesystem-server v0.11.1 h1:7uKIZRMaKWfgvtDj/uLAvo0+7Mwb8gxo5DJywhqFW88= +github.com/mark3labs/mcp-filesystem-server v0.11.1/go.mod h1:xDqJizVYWZ5a31Mt4xuYbVku2AR/kT56H3O0SbpANoQ= +github.com/mark3labs/mcp-go v0.41.1 h1:w78eWfiQam2i8ICL7AL0WFiq7KHNJQ6UB53ZVtH4KGA= +github.com/mark3labs/mcp-go v0.41.1/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g= +github.com/mark3labs/mcphost v0.31.3 h1:v8kWozQXPXHTBKT2GMo1CCtjz5yZWKMJdXSl9awH3pM= +github.com/mark3labs/mcphost v0.31.3/go.mod h1:rJ5SEO4eo+Vs3XfUAJdxgioB+CVXt02sl+37r0Erato= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= +github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-runewidth v0.0.17 h1:78v8ZlW0bP43XfmAfPsdXcoNCelfMHsDmd/pkENfrjQ= +github.com/mattn/go-runewidth v0.0.17/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/meguminnnnnnnnn/go-openai v0.0.0-20250821095446-07791bea23a0 h1:nIohpHs1ViKR0SVgW/cbBstHjmnqFZDM9RqgX9m9Xu8= +github.com/meguminnnnnnnnn/go-openai v0.0.0-20250821095446-07791bea23a0/go.mod h1:qs96ysDmxhE4BZoU45I43zcyfnaYxU3X+aRzLko/htY= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk= +github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA= +github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/modelcontextprotocol/go-sdk v0.8.0 h1:jdsBtGzBLY287WKSIjYovOXAqtJkP+HtFQFKrZd4a6c= +github.com/modelcontextprotocol/go-sdk v0.8.0/go.mod h1:nYtYQroQ2KQiM0/SbyEPUWQ6xs4B95gJjEalc9AQyOs= +github.com/modelcontextprotocol/registry v1.2.3 h1:PaQTn7VxJ0xlgiI+OJUHrG7H12x8uP27wepYKJRaD88= +github.com/modelcontextprotocol/registry v1.2.3/go.mod h1:WcvDr/Cn7JS7MHdSsNPVlLZYwfmzG1/3zTtuW23IRCc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI= +github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo= +github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA= +github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= +github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= +github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= +github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= +github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc= +github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo= +github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= +github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= +github.com/nikolalohinski/gonja v1.5.3 h1:GsA+EEaZDZPGJ8JtpeGN78jidhOlxeJROpqMT9fTj9c= +github.com/nikolalohinski/gonja v1.5.3/go.mod h1:RmjwxNiXAEqcq1HeK5SSMmqFJvKOfTfXhkJv6YBtPa4= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/ollama/ollama v0.12.9 h1:qvhEcBZtaTTiXoe/elPnKsbf3z0s0bmU9urCIYUkV54= +github.com/ollama/ollama v0.12.9/go.mod h1:9+1//yWPsDE2u+l1a5mpaKrYw4VdnSsRU3ioq5BvMms= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.23.0 h1:FA1xjp8ieYDzlgS5ABTpdUDB7wtngggONc8a7ku2NqQ= +github.com/onsi/ginkgo/v2 v2.23.0/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= +github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= +github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= +github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= +github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= +github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= +github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= +github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= +github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= +github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= +github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= +github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= +github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= +github.com/sigstore/cosign/v3 v3.0.3 h1:IknuTUYM+tZ/ToghM7mvg9V0O31NG3rev97u1IJIuYA= +github.com/sigstore/cosign/v3 v3.0.3/go.mod h1:poeQqwvpDNIDyim7a2ljUhonVKpCys+fx3SY0Lkmi/4= +github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= +github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.4.3 h1:2+aw4Gbgumv8vYM/QVg6b+hvr4x4Cukur8stJrVPKU0= +github.com/sigstore/rekor v1.4.3/go.mod h1:o0zgY087Q21YwohVvGwV9vK1/tliat5mfnPiVI3i75o= +github.com/sigstore/rekor-tiles/v2 v2.0.1 h1:1Wfz15oSRNGF5Dzb0lWn5W8+lfO50ork4PGIfEKjZeo= +github.com/sigstore/rekor-tiles/v2 v2.0.1/go.mod h1:Pjsbhzj5hc3MKY8FfVTYHBUHQEnP0ozC4huatu4x7OU= +github.com/sigstore/sigstore v1.10.0 h1:lQrmdzqlR8p9SCfWIpFoGUqdXEzJSZT2X+lTXOMPaQI= +github.com/sigstore/sigstore v1.10.0/go.mod h1:Ygq+L/y9Bm3YnjpJTlQrOk/gXyrjkpn3/AEJpmk1n9Y= +github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 h1:K8hnZhun6XacjxAdCdxkowSi7+FpmfYnAcMhTXZQyPg= +github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894/go.mod h1:uuR+Edo6P+iwi0HKscycUm8mxXL748nAureqSg6jFLA= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0 h1:UOHpiyezCj5RuixgIvCV3QyuxIGQT+N6nGZEXA7OTTY= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0/go.mod h1:U0CZmA2psabDa8DdiV7yXab0AHODzfKqvD2isH7Hrvw= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0 h1:fq4+8Y4YadxeF8mzhoMRPZ1mVvDYXmI3BfS0vlkPT7M= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0/go.mod h1:u05nqPWY05lmcdHhv2lPaWTH3FGUhJzO7iW2hbboK3Q= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0 h1:iUEf5MZYOuXGnXxdF/WrarJrk0DTVHqeIOjYdtpVXtc= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0/go.mod h1:i6vg5JfEQix46R1rhQlrKmUtJoeH91drltyYOJEk1T4= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0 h1:dUvPv/MP23ZPIXZUW45kvCIgC0ZRfYxEof57AB6bAtU= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0/go.mod h1:fR/gDdPvJWGWL70/NgBBIL1O0/3Wma6JHs3tSSYg3s4= +github.com/sigstore/timestamp-authority/v2 v2.0.3 h1:sRyYNtdED/ttLCMdaYnwpf0zre1A9chvjTnCmWWxN8Y= +github.com/sigstore/timestamp-authority/v2 v2.0.3/go.mod h1:mDaHxkt3HmZYoIlwYj4QWo0RUr7VjYU52aVO5f5Qb3I= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/slongfield/pyfmt v0.0.0-20220222012616-ea85ff4c361f h1:Z2cODYsUxQPofhpYRMQVwWz4yUVpHF+vPi+eUdruUYI= +github.com/slongfield/pyfmt v0.0.0-20220222012616-ea85ff4c361f/go.mod h1:JqzWyvTuI2X4+9wOHmKSQCYxybB/8j6Ko43qVmXDuZg= +github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= +github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= +github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= +github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= +github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= +github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= +github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= +github.com/theupdateframework/go-tuf/v2 v2.3.0 h1:gt3X8xT8qu/HT4w+n1jgv+p7koi5ad8XEkLXXZqG9AA= +github.com/theupdateframework/go-tuf/v2 v2.3.0/go.mod h1:xW8yNvgXRncmovMLvBxKwrKpsOwJZu/8x+aB0KtFcdw= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= +github.com/tink-crypto/tink-go/v2 v2.5.0 h1:B8KLF6AofxdBIE4UJIaFbmoj5/1ehEtt7/MmzfI4Zpw= +github.com/tink-crypto/tink-go/v2 v2.5.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c h1:5a2XDQ2LiAUV+/RjckMyq9sXudfrPSuCY4FuPC1NyAw= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c/go.mod h1:g85IafeFJZLxlzZCDRu4JLpfS7HKzR+Hw9qRh3bVzDI= +github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= +github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= +github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= +github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= +github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= +github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= +github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= +github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= +github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= +github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= +github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= +github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= +github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= +github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= +github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= +github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= +github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= +github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= +github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= +github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA= +github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg= +github.com/yuin/goldmark-emoji v1.0.6 h1:QWfF2FYaXwL74tfGOW5izeiZepUDroDJfWubQI9HTHs= +github.com/yuin/goldmark-emoji v1.0.6/go.mod h1:ukxJDKFpdFb5x0a5HqbdlcKtebh086iJpI31LTKmWuA= +github.com/zalando/go-keyring v0.2.6 h1:r7Yc3+H+Ux0+M72zacZoItR3UDxeWfKTcabvkI8ua9s= +github.com/zalando/go-keyring v0.2.6/go.mod h1:2TCrxYrbUNYfNS/Kgy/LSrkSQzZ5UPVH85RwfczwvcI= +gitlab.com/gitlab-org/api/client-go v0.160.0 h1:aMQzbcE8zFe0lR/J+a3zneEgH+/EBFs8rD8Chrr4Snw= +gitlab.com/gitlab-org/api/client-go v0.160.0/go.mod h1:ooCNtKB7OyP7GBa279+HrUS3eeJF6Yi6XABZZy7RTSk= +go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= +go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.step.sm/crypto v0.74.0 h1:/APBEv45yYR4qQFg47HA8w1nesIGcxh44pGyQNw6JRA= +go.step.sm/crypto v0.74.0/go.mod h1:UoXqCAJjjRgzPte0Llaqen7O9P7XjPmgjgTHQGkKCDk= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= +go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c= +golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= +golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI= +google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964= +google.golang.org/genai v1.22.0 h1:5hrEhXXWJQZa3tdPocl4vQ/0w6myEAxdNns2Kmx0f4Y= +google.golang.org/genai v1.22.0/go.mod h1:QPj5NGJw+3wEOHg+PrsWwJKvG6UC84ex5FR7qAYsN/M= +google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 h1:LvZVVaPE0JSqL+ZWb6ErZfnEOKIqqFWUJE2D0fObSmc= +google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9/go.mod h1:QFOrLhdAe2PsTp3vQY4quuLKTi9j3XG3r6JPPaw7MSc= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 h1:ExN12ndbJ608cboPYflpTny6mXSzPrDLh0iTaVrRrds= +google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6/go.mod h1:6ytKWczdvnpnO+m+JiG9NjEDzR1FJfsnmJdG7B8QVZ8= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY= +k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw= +k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= +k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M= +k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/e2e/local/01_storage_test.go b/e2e/local/01_storage_test.go index df4bb3aa7..4b67ed612 100644 --- a/e2e/local/01_storage_test.go +++ b/e2e/local/01_storage_test.go @@ -1,236 +1,236 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package local - -import ( - _ "embed" - "os" - "path/filepath" - "time" - - "github.com/agntcy/dir/e2e/shared/config" - "github.com/agntcy/dir/e2e/shared/testdata" - "github.com/agntcy/dir/e2e/shared/utils" - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -var _ = ginkgo.Describe("Running dirctl end-to-end tests using a local single node deployment", func() { - var cli *utils.CLI - - ginkgo.BeforeEach(func() { - if cfg.DeploymentMode != config.DeploymentModeLocal { - ginkgo.Skip("Skipping test, not in local mode") - } - - utils.ResetCLIState() - // Initialize CLI helper - cli = utils.NewCLI() - }) - - // Setup temp directory for all tests - tempDir := os.Getenv("E2E_COMPILE_OUTPUT_DIR") - if tempDir == "" { - tempDir = os.TempDir() - } - - // Test cases for each OASF version - testVersions := []struct { - name string - fileName string - jsonData []byte - expectedAgentName string - expectedSkillIDs []string - expectedSkillNames []string - expectedLocator string - expectedModule string - shouldFailPush bool // If true, push should fail (validation failure test) - }{ - { - name: "OASF_0.3.1_Record", - fileName: "oasf_0.3.1_record_test.json", - jsonData: testdata.ExpectedRecordV031JSON, - expectedAgentName: "directory.agntcy.org/cisco/marketing-strategy-v1", - expectedSkillIDs: []string{"10201", "10702"}, - expectedSkillNames: []string{ - "Natural Language Processing/Text Completion", - "Natural Language Processing/Problem Solving", - }, - expectedLocator: "docker-image:https://ghcr.io/agntcy/marketing-strategy", - expectedModule: "", // 0.3.1 schema doesn't have modules - shouldFailPush: false, - }, - { - name: "OASF_0.7.0_Record", - fileName: "oasf_0.7.0_record_test.json", - jsonData: testdata.ExpectedRecordV070JSON, - expectedAgentName: "directory.agntcy.org/cisco/marketing-strategy-v3", - expectedSkillIDs: []string{"10201", "10702"}, - expectedSkillNames: []string{ - "natural_language_processing/natural_language_generation/text_completion", - "natural_language_processing/analytical_reasoning/problem_solving", - }, - expectedLocator: "docker_image:https://ghcr.io/agntcy/marketing-strategy", - expectedModule: "runtime/model", - shouldFailPush: false, - }, - { - name: "OASF_0.8.0_Record", - fileName: "oasf_0.8.0_record_test.json", - jsonData: testdata.ExpectedRecordV080JSON, - expectedAgentName: "directory.agntcy.org/example/research-assistant-v4", - expectedSkillIDs: []string{"10201", "10702"}, - expectedSkillNames: []string{ - "natural_language_processing/natural_language_generation/text_completion", - "natural_language_processing/analytical_reasoning/problem_solving", - }, - expectedLocator: "docker_image:https://ghcr.io/agntcy/research-assistant", - expectedModule: "core/llm/model", - shouldFailPush: false, - }, - } - - // Test each OASF version (V1, V2, V3) to identify JSON marshal/unmarshal issues - for _, v := range testVersions { - version := v // Capture loop variable by value to avoid closure issues - ginkgo.Context(version.name, ginkgo.Ordered, ginkgo.Serial, func() { - var cid string - - // Setup file path and create file - tempPath := filepath.Join(tempDir, version.fileName) - - // Create directory and write record data once per version - _ = os.MkdirAll(filepath.Dir(tempPath), 0o755) - _ = os.WriteFile(tempPath, version.jsonData, 0o600) - - // Step 1: Push - ginkgo.It("should successfully push an record", func() { - if version.shouldFailPush { - // For validation failure tests, expect push to fail - _ = cli.Push(tempPath).WithArgs("--output", "raw").ShouldFail() - - return - } - - cid = cli.Push(tempPath).WithArgs("--output", "raw").ShouldSucceed() - - // Validate that the returned CID correctly represents the pushed data - utils.LoadAndValidateCID(cid, tempPath) - }) - - // Step 2: Pull (depends on push) - ginkgo.It("should successfully pull an existing record", func() { - if version.shouldFailPush { - ginkgo.Skip("Skipping pull test - push failed validation") - } - cli.Pull(cid).ShouldSucceed() - }) - - // Step 3: Verify push/pull consistency (depends on pull) - ginkgo.It("should return identical record when pulled after push", func() { - if version.shouldFailPush { - ginkgo.Skip("Skipping consistency test - push failed validation") - } - // Pull the record and get the output JSON - pulledJSON := cli.Pull(cid).WithArgs("--output", "json").ShouldSucceed() - - // Compare original embedded JSON with pulled JSON using version-aware comparison - equal, err := utils.CompareOASFRecords(version.jsonData, []byte(pulledJSON)) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), - "JSON comparison should not error for %s", version.name) - gomega.Expect(equal).To(gomega.BeTrue(), - "PUSH/PULL MISMATCH for %s: Original and pulled record should be identical. "+ - "This indicates data loss during push/pull cycle - possibly the skills issue!", version.name) - }) - - // Step 4: Verify duplicate push returns same CID (depends on push) - ginkgo.It("should push the same record again and return the same cid", func() { - if version.shouldFailPush { - ginkgo.Skip("Skipping duplicate push test - push failed validation") - } - cli.Push(tempPath).WithArgs("--output", "raw").ShouldReturn(cid) - }) - - // Step 5: Search by first skill (depends on push) - ginkgo.It("should search for records with first skill and return their CID", func() { - if version.shouldFailPush || len(version.expectedSkillIDs) == 0 { - ginkgo.Skip("Skipping search test - push failed validation or no skills") - } - // This test will FAIL if skills are lost during JSON marshal/unmarshal - // or during the push/pull process, helping identify the root cause - search := cli.Search(). - WithLimit(10). - WithOffset(0). - WithArgs("--output", "raw"). - WithName(version.expectedAgentName). // Use version-specific record name to prevent conflicts between V1/V2/V3 tests - WithSkillID(version.expectedSkillIDs[0]). - WithSkillName(version.expectedSkillNames[0]) - - // Add locator and module queries only if they exist (not empty for minimal test) - if version.expectedLocator != "" { - search = search.WithLocator(version.expectedLocator) - } - if version.expectedModule != "" { - search = search.WithModule(version.expectedModule) - } - - search.ShouldContain(cid) - }) - - // Step 6: Search by second skill (depends on push) - ginkgo.It("should search for records with second skill and return their CID", func() { - if version.shouldFailPush { - ginkgo.Skip("Skipping search test - push failed validation") - } - // This test specifically checks the second skill to ensure ALL skills are preserved - // Skip if there's only one skill (like in minimal test) - if len(version.expectedSkillIDs) < 2 { - ginkgo.Skip("Skipping second skill test - only one skill in test data") - } - - search := cli.Search(). - WithLimit(10). - WithOffset(0). - WithArgs("--output", "raw"). - WithName(version.expectedAgentName). // Use version-specific record name to prevent conflicts between V1/V2/V3 tests - WithSkillID(version.expectedSkillIDs[1]). - WithSkillName(version.expectedSkillNames[1]) - - // Add locator and module queries only if they exist (not empty for minimal test) - if version.expectedLocator != "" { - search = search.WithLocator(version.expectedLocator) - } - if version.expectedModule != "" { - search = search.WithModule(version.expectedModule) - } - - search.ShouldContain(cid) - }) - - // Step 7: Test non-existent pull (independent test) - ginkgo.It("should pull a non-existent record and return an error", func() { - _ = cli.Pull("non-existent-CID").ShouldFail() - }) - - // Step 8: Delete (depends on previous steps) - ginkgo.It("should successfully delete an record", func() { - if version.shouldFailPush { - ginkgo.Skip("Skipping delete test - push failed validation") - } - cli.Delete(cid).ShouldSucceed() - }) - - // Step 9: Verify deletion (depends on delete) - ginkgo.It("should fail to pull a deleted record", func() { - if version.shouldFailPush { - ginkgo.Skip("Skipping deletion verification - push failed validation") - } - // Add a small delay to ensure delete operation is fully processed - time.Sleep(100 * time.Millisecond) - - _ = cli.Pull(cid).ShouldFail() - }) - }) - } -}) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package local + +import ( + _ "embed" + "os" + "path/filepath" + "time" + + "github.com/agntcy/dir/e2e/shared/config" + "github.com/agntcy/dir/e2e/shared/testdata" + "github.com/agntcy/dir/e2e/shared/utils" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +var _ = ginkgo.Describe("Running dirctl end-to-end tests using a local single node deployment", func() { + var cli *utils.CLI + + ginkgo.BeforeEach(func() { + if cfg.DeploymentMode != config.DeploymentModeLocal { + ginkgo.Skip("Skipping test, not in local mode") + } + + utils.ResetCLIState() + // Initialize CLI helper + cli = utils.NewCLI() + }) + + // Setup temp directory for all tests + tempDir := os.Getenv("E2E_COMPILE_OUTPUT_DIR") + if tempDir == "" { + tempDir = os.TempDir() + } + + // Test cases for each OASF version + testVersions := []struct { + name string + fileName string + jsonData []byte + expectedAgentName string + expectedSkillIDs []string + expectedSkillNames []string + expectedLocator string + expectedModule string + shouldFailPush bool // If true, push should fail (validation failure test) + }{ + { + name: "OASF_0.3.1_Record", + fileName: "oasf_0.3.1_record_test.json", + jsonData: testdata.ExpectedRecordV031JSON, + expectedAgentName: "directory.agntcy.org/cisco/marketing-strategy-v1", + expectedSkillIDs: []string{"10201", "10702"}, + expectedSkillNames: []string{ + "Natural Language Processing/Text Completion", + "Natural Language Processing/Problem Solving", + }, + expectedLocator: "docker-image:https://ghcr.io/agntcy/marketing-strategy", + expectedModule: "", // 0.3.1 schema doesn't have modules + shouldFailPush: false, + }, + { + name: "OASF_0.7.0_Record", + fileName: "oasf_0.7.0_record_test.json", + jsonData: testdata.ExpectedRecordV070JSON, + expectedAgentName: "directory.agntcy.org/cisco/marketing-strategy-v3", + expectedSkillIDs: []string{"10201", "10702"}, + expectedSkillNames: []string{ + "natural_language_processing/natural_language_generation/text_completion", + "natural_language_processing/analytical_reasoning/problem_solving", + }, + expectedLocator: "docker_image:https://ghcr.io/agntcy/marketing-strategy", + expectedModule: "runtime/model", + shouldFailPush: false, + }, + { + name: "OASF_0.8.0_Record", + fileName: "oasf_0.8.0_record_test.json", + jsonData: testdata.ExpectedRecordV080JSON, + expectedAgentName: "directory.agntcy.org/example/research-assistant-v4", + expectedSkillIDs: []string{"10201", "10702"}, + expectedSkillNames: []string{ + "natural_language_processing/natural_language_generation/text_completion", + "natural_language_processing/analytical_reasoning/problem_solving", + }, + expectedLocator: "docker_image:https://ghcr.io/agntcy/research-assistant", + expectedModule: "core/llm/model", + shouldFailPush: false, + }, + } + + // Test each OASF version (V1, V2, V3) to identify JSON marshal/unmarshal issues + for _, v := range testVersions { + version := v // Capture loop variable by value to avoid closure issues + ginkgo.Context(version.name, ginkgo.Ordered, ginkgo.Serial, func() { + var cid string + + // Setup file path and create file + tempPath := filepath.Join(tempDir, version.fileName) + + // Create directory and write record data once per version + _ = os.MkdirAll(filepath.Dir(tempPath), 0o755) + _ = os.WriteFile(tempPath, version.jsonData, 0o600) + + // Step 1: Push + ginkgo.It("should successfully push an record", func() { + if version.shouldFailPush { + // For validation failure tests, expect push to fail + _ = cli.Push(tempPath).WithArgs("--output", "raw").ShouldFail() + + return + } + + cid = cli.Push(tempPath).WithArgs("--output", "raw").ShouldSucceed() + + // Validate that the returned CID correctly represents the pushed data + utils.LoadAndValidateCID(cid, tempPath) + }) + + // Step 2: Pull (depends on push) + ginkgo.It("should successfully pull an existing record", func() { + if version.shouldFailPush { + ginkgo.Skip("Skipping pull test - push failed validation") + } + cli.Pull(cid).ShouldSucceed() + }) + + // Step 3: Verify push/pull consistency (depends on pull) + ginkgo.It("should return identical record when pulled after push", func() { + if version.shouldFailPush { + ginkgo.Skip("Skipping consistency test - push failed validation") + } + // Pull the record and get the output JSON + pulledJSON := cli.Pull(cid).WithArgs("--output", "json").ShouldSucceed() + + // Compare original embedded JSON with pulled JSON using version-aware comparison + equal, err := utils.CompareOASFRecords(version.jsonData, []byte(pulledJSON)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + "JSON comparison should not error for %s", version.name) + gomega.Expect(equal).To(gomega.BeTrue(), + "PUSH/PULL MISMATCH for %s: Original and pulled record should be identical. "+ + "This indicates data loss during push/pull cycle - possibly the skills issue!", version.name) + }) + + // Step 4: Verify duplicate push returns same CID (depends on push) + ginkgo.It("should push the same record again and return the same cid", func() { + if version.shouldFailPush { + ginkgo.Skip("Skipping duplicate push test - push failed validation") + } + cli.Push(tempPath).WithArgs("--output", "raw").ShouldReturn(cid) + }) + + // Step 5: Search by first skill (depends on push) + ginkgo.It("should search for records with first skill and return their CID", func() { + if version.shouldFailPush || len(version.expectedSkillIDs) == 0 { + ginkgo.Skip("Skipping search test - push failed validation or no skills") + } + // This test will FAIL if skills are lost during JSON marshal/unmarshal + // or during the push/pull process, helping identify the root cause + search := cli.Search(). + WithLimit(10). + WithOffset(0). + WithArgs("--output", "raw"). + WithName(version.expectedAgentName). // Use version-specific record name to prevent conflicts between V1/V2/V3 tests + WithSkillID(version.expectedSkillIDs[0]). + WithSkillName(version.expectedSkillNames[0]) + + // Add locator and module queries only if they exist (not empty for minimal test) + if version.expectedLocator != "" { + search = search.WithLocator(version.expectedLocator) + } + if version.expectedModule != "" { + search = search.WithModule(version.expectedModule) + } + + search.ShouldContain(cid) + }) + + // Step 6: Search by second skill (depends on push) + ginkgo.It("should search for records with second skill and return their CID", func() { + if version.shouldFailPush { + ginkgo.Skip("Skipping search test - push failed validation") + } + // This test specifically checks the second skill to ensure ALL skills are preserved + // Skip if there's only one skill (like in minimal test) + if len(version.expectedSkillIDs) < 2 { + ginkgo.Skip("Skipping second skill test - only one skill in test data") + } + + search := cli.Search(). + WithLimit(10). + WithOffset(0). + WithArgs("--output", "raw"). + WithName(version.expectedAgentName). // Use version-specific record name to prevent conflicts between V1/V2/V3 tests + WithSkillID(version.expectedSkillIDs[1]). + WithSkillName(version.expectedSkillNames[1]) + + // Add locator and module queries only if they exist (not empty for minimal test) + if version.expectedLocator != "" { + search = search.WithLocator(version.expectedLocator) + } + if version.expectedModule != "" { + search = search.WithModule(version.expectedModule) + } + + search.ShouldContain(cid) + }) + + // Step 7: Test non-existent pull (independent test) + ginkgo.It("should pull a non-existent record and return an error", func() { + _ = cli.Pull("non-existent-CID").ShouldFail() + }) + + // Step 8: Delete (depends on previous steps) + ginkgo.It("should successfully delete an record", func() { + if version.shouldFailPush { + ginkgo.Skip("Skipping delete test - push failed validation") + } + cli.Delete(cid).ShouldSucceed() + }) + + // Step 9: Verify deletion (depends on delete) + ginkgo.It("should fail to pull a deleted record", func() { + if version.shouldFailPush { + ginkgo.Skip("Skipping deletion verification - push failed validation") + } + // Add a small delay to ensure delete operation is fully processed + time.Sleep(100 * time.Millisecond) + + _ = cli.Pull(cid).ShouldFail() + }) + }) + } +}) diff --git a/e2e/local/02_search_test.go b/e2e/local/02_search_test.go index b97006dcf..3e46178c5 100644 --- a/e2e/local/02_search_test.go +++ b/e2e/local/02_search_test.go @@ -1,367 +1,367 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:dupl -package local - -import ( - "os" - "path/filepath" - - "github.com/agntcy/dir/e2e/shared/config" - "github.com/agntcy/dir/e2e/shared/testdata" - "github.com/agntcy/dir/e2e/shared/utils" - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -// Test data for OASF 0.8.0 record: -// - name: "directory.agntcy.org/example/research-assistant-v4" -// - version: "v4.0.0" -// - schema_version: "0.8.0" -// - authors: ["AGNTCY Contributors"] -// - created_at: "2025-03-19T17:06:37Z" -// - skills: [10201: "natural_language_processing/.../text_completion", 10702: ".../problem_solving"] -// - locators: [docker_image: "https://ghcr.io/agntcy/research-assistant"] -// - domains: [301: "life_science/biotechnology"] -// - modules: [10201: "core/llm/model"] - -var _ = ginkgo.Describe("Search functionality for OASF 0.8.0 records", func() { - var cli *utils.CLI - - ginkgo.BeforeEach(func() { - if cfg.DeploymentMode != config.DeploymentModeLocal { - ginkgo.Skip("Skipping test, not in local mode") - } - - utils.ResetCLIState() - cli = utils.NewCLI() - }) - - var ( - tempDir string - recordPath string - recordCID string - ) - - ginkgo.Context("search with format=cid (default)", ginkgo.Ordered, func() { - ginkgo.BeforeAll(func() { - var err error - tempDir, err = os.MkdirTemp("", "search-test") - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - recordPath = filepath.Join(tempDir, "record_080.json") - err = os.WriteFile(recordPath, testdata.ExpectedRecordV080JSON, 0o600) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - recordCID = cli.Push(recordPath).WithArgs("--output", "raw").ShouldSucceed() - gomega.Expect(recordCID).NotTo(gomega.BeEmpty()) - }) - - ginkgo.AfterAll(func() { - if tempDir != "" { - _ = os.RemoveAll(tempDir) - } - }) - - // Core exact match searches - ginkgo.Context("exact match searches", func() { - ginkgo.It("finds record by name", func() { - output := cli.Search(). - WithName("directory.agntcy.org/example/research-assistant-v4"). - ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) - }) - - ginkgo.It("finds record by version", func() { - output := cli.Search(). - WithVersion("v4.0.0"). - ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) - }) - - ginkgo.It("finds record by skill ID", func() { - output := cli.Search(). - WithSkillID("10201"). - ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) - }) - - ginkgo.It("finds record by author", func() { - output := cli.Search(). - WithAuthor("AGNTCY Contributors"). - ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) - }) - - ginkgo.It("finds record by schema version", func() { - output := cli.Search(). - WithSchemaVersion("0.8.0"). - ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) - }) - }) - - // Wildcard pattern searches - ginkgo.Context("wildcard searches", func() { - ginkgo.It("finds record with asterisk wildcard", func() { - output := cli.Search(). - WithName("*research-assistant*"). - ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) - }) - - ginkgo.It("finds record with question mark wildcard", func() { - output := cli.Search(). - WithVersion("v?.0.0"). - ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) - }) - - ginkgo.It("finds record with character class", func() { - output := cli.Search(). - WithVersion("v[0-9].0.0"). - ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) - }) - - ginkgo.It("finds record with negated character class", func() { - output := cli.Search(). - WithVersion("v[^0-3].0.0"). // v4.0.0, 4 is not in [0-3] - ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) - }) - - ginkgo.It("finds record with mixed wildcards", func() { - output := cli.Search(). - WithName("*[e]xample/research-assistant-v?"). - ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) - }) - }) - - // Filter logic - ginkgo.Context("filter logic", func() { - ginkgo.It("applies AND logic between different fields", func() { - output := cli.Search(). - WithName("*research-assistant*"). - WithVersion("v4.*"). - WithSkillID("10201"). - ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) - }) - - ginkgo.It("returns no results when AND filters conflict", func() { - output := cli.Search(). - WithName("*research-assistant*"). - WithVersion("v1.*"). // Record has v4.0.0 - ShouldSucceed() - gomega.Expect(output).NotTo(gomega.ContainSubstring(recordCID)) - }) - - ginkgo.It("applies OR logic for multiple values of same field", func() { - output := cli.Search(). - WithVersion("v1.0.0"). - WithVersion("v4.0.0"). // This matches - ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) - }) - }) - - // Negative tests - ginkgo.Context("negative tests", func() { - ginkgo.It("returns no results for non-matching query", func() { - output := cli.Search(). - WithName("nonexistent-agent"). - ShouldSucceed() - gomega.Expect(output).NotTo(gomega.ContainSubstring(recordCID)) - }) - - ginkgo.It("returns no results for negated class that excludes match", func() { - output := cli.Search(). - WithVersion("v[^4].0.0"). // v4.0.0, but [^4] excludes 4 - ShouldSucceed() - gomega.Expect(output).NotTo(gomega.ContainSubstring(recordCID)) - }) - }) - - // Pagination - ginkgo.Context("pagination", func() { - ginkgo.It("respects limit and offset parameters", func() { - output := cli.Search(). - WithName("*research-assistant*"). - WithOffset(0). - WithLimit(10). - ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) - }) - }) - - ginkgo.Context("comparison operators", func() { - ginkgo.It("finds record with version >= v3.0.0", func() { - output := cli.Search(). - WithVersion(">=v3.0.0"). - ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) - }) - - ginkgo.It("finds record with version <= v5.0.0", func() { - output := cli.Search(). - WithVersion("<=v5.0.0"). - ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) - }) - - ginkgo.It("finds record with version > v3.0.0", func() { - output := cli.Search(). - WithVersion(">v3.0.0"). - ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) - }) - - ginkgo.It("does not find record with version < v4.0.0", func() { - output := cli.Search(). - WithVersion("= 0.7.0", func() { - output := cli.Search(). - WithSchemaVersion(">=0.7.0"). - ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) - }) - - ginkgo.It("does not find record with schema-version > 0.8.0", func() { - output := cli.Search(). - WithSchemaVersion(">0.8.0"). - ShouldSucceed() - gomega.Expect(output).NotTo(gomega.ContainSubstring(recordCID)) - }) - - ginkgo.It("finds record with created-at >= 2025-01-01", func() { - output := cli.Search(). - WithCreatedAt(">=2025-01-01"). - ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) - }) - - ginkgo.It("does not find record with created-at < 2025-01-01", func() { - output := cli.Search(). - WithCreatedAt("<2025-01-01"). - ShouldSucceed() - gomega.Expect(output).NotTo(gomega.ContainSubstring(recordCID)) - }) - - ginkgo.It("finds record within version range", func() { - output := cli.Search(). - WithVersion(">=v3.0.0"). - WithVersion("<=v5.0.0"). - ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) - }) - }) - }) - - ginkgo.Context("search with format=record", ginkgo.Ordered, func() { - ginkgo.BeforeAll(func() { - var err error - tempDir, err = os.MkdirTemp("", "search-records-test") - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - recordPath = filepath.Join(tempDir, "record_080.json") - err = os.WriteFile(recordPath, testdata.ExpectedRecordV080JSON, 0o600) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - recordCID = cli.Push(recordPath).WithArgs("--output", "raw").ShouldSucceed() - gomega.Expect(recordCID).NotTo(gomega.BeEmpty()) - }) - - ginkgo.AfterAll(func() { - if tempDir != "" { - _ = os.RemoveAll(tempDir) - } - }) - - ginkgo.It("returns full record data with JSON output", func() { - output := cli.SearchRecords(). - WithName("directory.agntcy.org/example/research-assistant-v4"). - WithArgs("--output", "json"). - ShouldSucceed() - - // Verify record fields are present in output - gomega.Expect(output).To(gomega.ContainSubstring("research-assistant-v4")) - gomega.Expect(output).To(gomega.ContainSubstring("v4.0.0")) - gomega.Expect(output).To(gomega.ContainSubstring("0.8.0")) - gomega.Expect(output).To(gomega.ContainSubstring("AGNTCY Contributors")) - }) - - ginkgo.It("returns record with skills data", func() { - output := cli.SearchRecords(). - WithSkillID("10201"). - WithArgs("--output", "json"). - ShouldSucceed() - - gomega.Expect(output).To(gomega.ContainSubstring("text_completion")) - gomega.Expect(output).To(gomega.ContainSubstring("10201")) - }) - - ginkgo.It("returns record with domain data", func() { - output := cli.SearchRecords(). - WithDomain("life_science/*"). - WithArgs("--output", "json"). - ShouldSucceed() - - gomega.Expect(output).To(gomega.ContainSubstring("life_science")) - gomega.Expect(output).To(gomega.ContainSubstring("biotechnology")) - gomega.Expect(output).To(gomega.ContainSubstring("301")) - }) - - ginkgo.It("returns record with locator data", func() { - output := cli.SearchRecords(). - WithLocator("*research-assistant"). - WithArgs("--output", "json"). - ShouldSucceed() - - gomega.Expect(output).To(gomega.ContainSubstring("docker_image")) - gomega.Expect(output).To(gomega.ContainSubstring("ghcr.io/agntcy/research-assistant")) - }) - - ginkgo.It("returns record with module data", func() { - output := cli.SearchRecords(). - WithModule("core/*"). - WithArgs("--output", "json"). - ShouldSucceed() - - gomega.Expect(output).To(gomega.ContainSubstring("core/llm/model")) - gomega.Expect(output).To(gomega.ContainSubstring("gpt-4")) - }) - - ginkgo.It("supports wildcards like cids command", func() { - output := cli.SearchRecords(). - WithName("*research-assistant-v?"). - WithArgs("--output", "json"). - ShouldSucceed() - - gomega.Expect(output).To(gomega.ContainSubstring("research-assistant-v4")) - }) - - ginkgo.It("returns no results for non-matching query", func() { - output := cli.SearchRecords(). - WithName("nonexistent-agent"). - WithArgs("--output", "json"). - ShouldSucceed() - - // Should not contain our record data - gomega.Expect(output).NotTo(gomega.ContainSubstring("research-assistant-v4")) - }) - }) -}) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:dupl +package local + +import ( + "os" + "path/filepath" + + "github.com/agntcy/dir/e2e/shared/config" + "github.com/agntcy/dir/e2e/shared/testdata" + "github.com/agntcy/dir/e2e/shared/utils" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +// Test data for OASF 0.8.0 record: +// - name: "directory.agntcy.org/example/research-assistant-v4" +// - version: "v4.0.0" +// - schema_version: "0.8.0" +// - authors: ["AGNTCY Contributors"] +// - created_at: "2025-03-19T17:06:37Z" +// - skills: [10201: "natural_language_processing/.../text_completion", 10702: ".../problem_solving"] +// - locators: [docker_image: "https://ghcr.io/agntcy/research-assistant"] +// - domains: [301: "life_science/biotechnology"] +// - modules: [10201: "core/llm/model"] + +var _ = ginkgo.Describe("Search functionality for OASF 0.8.0 records", func() { + var cli *utils.CLI + + ginkgo.BeforeEach(func() { + if cfg.DeploymentMode != config.DeploymentModeLocal { + ginkgo.Skip("Skipping test, not in local mode") + } + + utils.ResetCLIState() + cli = utils.NewCLI() + }) + + var ( + tempDir string + recordPath string + recordCID string + ) + + ginkgo.Context("search with format=cid (default)", ginkgo.Ordered, func() { + ginkgo.BeforeAll(func() { + var err error + tempDir, err = os.MkdirTemp("", "search-test") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + recordPath = filepath.Join(tempDir, "record_080.json") + err = os.WriteFile(recordPath, testdata.ExpectedRecordV080JSON, 0o600) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + recordCID = cli.Push(recordPath).WithArgs("--output", "raw").ShouldSucceed() + gomega.Expect(recordCID).NotTo(gomega.BeEmpty()) + }) + + ginkgo.AfterAll(func() { + if tempDir != "" { + _ = os.RemoveAll(tempDir) + } + }) + + // Core exact match searches + ginkgo.Context("exact match searches", func() { + ginkgo.It("finds record by name", func() { + output := cli.Search(). + WithName("directory.agntcy.org/example/research-assistant-v4"). + ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) + }) + + ginkgo.It("finds record by version", func() { + output := cli.Search(). + WithVersion("v4.0.0"). + ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) + }) + + ginkgo.It("finds record by skill ID", func() { + output := cli.Search(). + WithSkillID("10201"). + ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) + }) + + ginkgo.It("finds record by author", func() { + output := cli.Search(). + WithAuthor("AGNTCY Contributors"). + ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) + }) + + ginkgo.It("finds record by schema version", func() { + output := cli.Search(). + WithSchemaVersion("0.8.0"). + ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) + }) + }) + + // Wildcard pattern searches + ginkgo.Context("wildcard searches", func() { + ginkgo.It("finds record with asterisk wildcard", func() { + output := cli.Search(). + WithName("*research-assistant*"). + ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) + }) + + ginkgo.It("finds record with question mark wildcard", func() { + output := cli.Search(). + WithVersion("v?.0.0"). + ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) + }) + + ginkgo.It("finds record with character class", func() { + output := cli.Search(). + WithVersion("v[0-9].0.0"). + ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) + }) + + ginkgo.It("finds record with negated character class", func() { + output := cli.Search(). + WithVersion("v[^0-3].0.0"). // v4.0.0, 4 is not in [0-3] + ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) + }) + + ginkgo.It("finds record with mixed wildcards", func() { + output := cli.Search(). + WithName("*[e]xample/research-assistant-v?"). + ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) + }) + }) + + // Filter logic + ginkgo.Context("filter logic", func() { + ginkgo.It("applies AND logic between different fields", func() { + output := cli.Search(). + WithName("*research-assistant*"). + WithVersion("v4.*"). + WithSkillID("10201"). + ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) + }) + + ginkgo.It("returns no results when AND filters conflict", func() { + output := cli.Search(). + WithName("*research-assistant*"). + WithVersion("v1.*"). // Record has v4.0.0 + ShouldSucceed() + gomega.Expect(output).NotTo(gomega.ContainSubstring(recordCID)) + }) + + ginkgo.It("applies OR logic for multiple values of same field", func() { + output := cli.Search(). + WithVersion("v1.0.0"). + WithVersion("v4.0.0"). // This matches + ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) + }) + }) + + // Negative tests + ginkgo.Context("negative tests", func() { + ginkgo.It("returns no results for non-matching query", func() { + output := cli.Search(). + WithName("nonexistent-agent"). + ShouldSucceed() + gomega.Expect(output).NotTo(gomega.ContainSubstring(recordCID)) + }) + + ginkgo.It("returns no results for negated class that excludes match", func() { + output := cli.Search(). + WithVersion("v[^4].0.0"). // v4.0.0, but [^4] excludes 4 + ShouldSucceed() + gomega.Expect(output).NotTo(gomega.ContainSubstring(recordCID)) + }) + }) + + // Pagination + ginkgo.Context("pagination", func() { + ginkgo.It("respects limit and offset parameters", func() { + output := cli.Search(). + WithName("*research-assistant*"). + WithOffset(0). + WithLimit(10). + ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) + }) + }) + + ginkgo.Context("comparison operators", func() { + ginkgo.It("finds record with version >= v3.0.0", func() { + output := cli.Search(). + WithVersion(">=v3.0.0"). + ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) + }) + + ginkgo.It("finds record with version <= v5.0.0", func() { + output := cli.Search(). + WithVersion("<=v5.0.0"). + ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) + }) + + ginkgo.It("finds record with version > v3.0.0", func() { + output := cli.Search(). + WithVersion(">v3.0.0"). + ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) + }) + + ginkgo.It("does not find record with version < v4.0.0", func() { + output := cli.Search(). + WithVersion("= 0.7.0", func() { + output := cli.Search(). + WithSchemaVersion(">=0.7.0"). + ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) + }) + + ginkgo.It("does not find record with schema-version > 0.8.0", func() { + output := cli.Search(). + WithSchemaVersion(">0.8.0"). + ShouldSucceed() + gomega.Expect(output).NotTo(gomega.ContainSubstring(recordCID)) + }) + + ginkgo.It("finds record with created-at >= 2025-01-01", func() { + output := cli.Search(). + WithCreatedAt(">=2025-01-01"). + ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) + }) + + ginkgo.It("does not find record with created-at < 2025-01-01", func() { + output := cli.Search(). + WithCreatedAt("<2025-01-01"). + ShouldSucceed() + gomega.Expect(output).NotTo(gomega.ContainSubstring(recordCID)) + }) + + ginkgo.It("finds record within version range", func() { + output := cli.Search(). + WithVersion(">=v3.0.0"). + WithVersion("<=v5.0.0"). + ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(recordCID)) + }) + }) + }) + + ginkgo.Context("search with format=record", ginkgo.Ordered, func() { + ginkgo.BeforeAll(func() { + var err error + tempDir, err = os.MkdirTemp("", "search-records-test") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + recordPath = filepath.Join(tempDir, "record_080.json") + err = os.WriteFile(recordPath, testdata.ExpectedRecordV080JSON, 0o600) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + recordCID = cli.Push(recordPath).WithArgs("--output", "raw").ShouldSucceed() + gomega.Expect(recordCID).NotTo(gomega.BeEmpty()) + }) + + ginkgo.AfterAll(func() { + if tempDir != "" { + _ = os.RemoveAll(tempDir) + } + }) + + ginkgo.It("returns full record data with JSON output", func() { + output := cli.SearchRecords(). + WithName("directory.agntcy.org/example/research-assistant-v4"). + WithArgs("--output", "json"). + ShouldSucceed() + + // Verify record fields are present in output + gomega.Expect(output).To(gomega.ContainSubstring("research-assistant-v4")) + gomega.Expect(output).To(gomega.ContainSubstring("v4.0.0")) + gomega.Expect(output).To(gomega.ContainSubstring("0.8.0")) + gomega.Expect(output).To(gomega.ContainSubstring("AGNTCY Contributors")) + }) + + ginkgo.It("returns record with skills data", func() { + output := cli.SearchRecords(). + WithSkillID("10201"). + WithArgs("--output", "json"). + ShouldSucceed() + + gomega.Expect(output).To(gomega.ContainSubstring("text_completion")) + gomega.Expect(output).To(gomega.ContainSubstring("10201")) + }) + + ginkgo.It("returns record with domain data", func() { + output := cli.SearchRecords(). + WithDomain("life_science/*"). + WithArgs("--output", "json"). + ShouldSucceed() + + gomega.Expect(output).To(gomega.ContainSubstring("life_science")) + gomega.Expect(output).To(gomega.ContainSubstring("biotechnology")) + gomega.Expect(output).To(gomega.ContainSubstring("301")) + }) + + ginkgo.It("returns record with locator data", func() { + output := cli.SearchRecords(). + WithLocator("*research-assistant"). + WithArgs("--output", "json"). + ShouldSucceed() + + gomega.Expect(output).To(gomega.ContainSubstring("docker_image")) + gomega.Expect(output).To(gomega.ContainSubstring("ghcr.io/agntcy/research-assistant")) + }) + + ginkgo.It("returns record with module data", func() { + output := cli.SearchRecords(). + WithModule("core/*"). + WithArgs("--output", "json"). + ShouldSucceed() + + gomega.Expect(output).To(gomega.ContainSubstring("core/llm/model")) + gomega.Expect(output).To(gomega.ContainSubstring("gpt-4")) + }) + + ginkgo.It("supports wildcards like cids command", func() { + output := cli.SearchRecords(). + WithName("*research-assistant-v?"). + WithArgs("--output", "json"). + ShouldSucceed() + + gomega.Expect(output).To(gomega.ContainSubstring("research-assistant-v4")) + }) + + ginkgo.It("returns no results for non-matching query", func() { + output := cli.SearchRecords(). + WithName("nonexistent-agent"). + WithArgs("--output", "json"). + ShouldSucceed() + + // Should not contain our record data + gomega.Expect(output).NotTo(gomega.ContainSubstring("research-assistant-v4")) + }) + }) +}) diff --git a/e2e/local/03_routing_test.go b/e2e/local/03_routing_test.go index f903eeec0..add7799fb 100644 --- a/e2e/local/03_routing_test.go +++ b/e2e/local/03_routing_test.go @@ -1,248 +1,248 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package local - -import ( - "os" - "path/filepath" - "time" - - "github.com/agntcy/dir/e2e/shared/config" - "github.com/agntcy/dir/e2e/shared/testdata" - "github.com/agntcy/dir/e2e/shared/utils" - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -var _ = ginkgo.Describe("Running dirctl routing commands in local single node deployment", ginkgo.Ordered, func() { - var cli *utils.CLI - var cid string - - // Setup temp record file - tempDir := os.Getenv("E2E_COMPILE_OUTPUT_DIR") - if tempDir == "" { - tempDir = os.TempDir() - } - tempPath := filepath.Join(tempDir, "record_v3_local_routing_test.json") - - // Create directory and write record data - _ = os.MkdirAll(filepath.Dir(tempPath), 0o755) - _ = os.WriteFile(tempPath, testdata.ExpectedRecordV070JSON, 0o600) - - ginkgo.BeforeEach(func() { - if cfg.DeploymentMode != config.DeploymentModeLocal { - ginkgo.Skip("Skipping test, not in local mode") - } - - utils.ResetCLIState() - // Initialize CLI helper - cli = utils.NewCLI() - }) - - ginkgo.Context("routing publish command", func() { - ginkgo.It("should push a record first (prerequisite for publish)", func() { - cid = cli.Push(tempPath).WithArgs("--output", "raw").ShouldSucceed() - - // Validate that the returned CID correctly represents the pushed data - utils.LoadAndValidateCID(cid, tempPath) - }) - - ginkgo.It("should publish a record to local routing", func() { - output := cli.Routing().Publish(cid).ShouldSucceed() - - // Should confirm successful publishing - gomega.Expect(output).To(gomega.ContainSubstring("Successfully submitted publication request")) - gomega.Expect(output).To(gomega.ContainSubstring(cid)) - - // Wait for publish operation to complete (publishing is asynchronous) - time.Sleep(utils.PublishProcessingDelay) - }) - - ginkgo.It("should fail to publish non-existent record", func() { - _ = cli.Routing().Publish("non-existent-cid").ShouldFail() - }) - }) - - ginkgo.Context("routing list command", func() { - ginkgo.It("should list all local records without filters", func() { - output := cli.Routing().List().ShouldSucceed() - - // Should show the published record - gomega.Expect(output).To(gomega.ContainSubstring("Local records")) - gomega.Expect(output).To(gomega.ContainSubstring(cid)) - }) - - ginkgo.It("should list record by CID", func() { - output := cli.Routing().List().WithCid(cid).ShouldSucceed() - - // Should find the specific record - gomega.Expect(output).To(gomega.ContainSubstring("Local records")) - gomega.Expect(output).To(gomega.ContainSubstring(cid)) - }) - - ginkgo.It("should list records by skill filter", func() { - output := cli.Routing().List().WithSkill("natural_language_processing/natural_language_generation/text_completion").ShouldSucceed() - - // Should find records with NLP skills - gomega.Expect(output).To(gomega.ContainSubstring("Local records")) - gomega.Expect(output).To(gomega.ContainSubstring(cid)) - gomega.Expect(output).To(gomega.ContainSubstring("/skills/natural_language_processing")) - }) - - ginkgo.It("should list records by specific skill", func() { - output := cli.Routing().List().WithSkill("natural_language_processing/natural_language_generation/text_completion").ShouldSucceed() - - // Should find records with specific skill - gomega.Expect(output).To(gomega.ContainSubstring("Local records")) - gomega.Expect(output).To(gomega.ContainSubstring(cid)) - }) - - ginkgo.It("should list records by locator filter", func() { - output := cli.Routing().List().WithLocator("docker_image").ShouldSucceed() - - // Should find records with docker-image locator - gomega.Expect(output).To(gomega.ContainSubstring("Local records")) - gomega.Expect(output).To(gomega.ContainSubstring(cid)) - }) - - ginkgo.It("should list records with multiple filters (AND logic)", func() { - output := cli.Routing().List(). - WithSkill("natural_language_processing/natural_language_generation/text_completion"). - WithLocator("docker_image"). - ShouldSucceed() - - // Should find records matching both criteria - gomega.Expect(output).To(gomega.ContainSubstring("Local records")) - gomega.Expect(output).To(gomega.ContainSubstring(cid)) - }) - - ginkgo.It("should return empty results for non-matching skill", func() { - output := cli.Routing().List().WithSkill("NonExistentSkill").ShouldSucceed() - - // Should not find any records - gomega.Expect(output).NotTo(gomega.ContainSubstring(cid)) - gomega.Expect(output).To(gomega.ContainSubstring("No local records found")) - }) - - ginkgo.It("should return empty results for non-existent CID", func() { - output := cli.Routing().List().WithCid("non-existent-cid").ShouldSucceed() - - // Should show helpful message about using search for network discovery - gomega.Expect(output).To(gomega.ContainSubstring("No local records found")) - }) - - ginkgo.It("should respect limit parameter", func() { - output := cli.Routing().List().WithLimit(1).ShouldSucceed() - - // Should limit results (though we only have one record anyway) - gomega.Expect(output).To(gomega.ContainSubstring("Local records")) - }) - }) - - ginkgo.Context("routing search command", func() { - ginkgo.It("should search for local records (but return empty in local mode)", func() { - output := cli.Routing().Search().WithSkill("Natural Language Processing").ShouldSucceed() - - // In local single-node mode, search should find no remote records - // since there are no other peers - gomega.Expect(output).To(gomega.ContainSubstring("No remote records found")) - }) - - ginkgo.It("should handle search with multiple criteria", func() { - output := cli.Routing().Search(). - WithSkill("Natural Language Processing"). - WithLocator("docker-image"). - WithLimit(10). - WithMinScore(2). - ShouldSucceed() - - // Should complete without error, but find no remote records in local mode - gomega.Expect(output).To(gomega.ContainSubstring("No remote records found")) - }) - - ginkgo.It("should handle OR logic search with partial query matching", func() { - // Test OR logic: 3 queries but only require 2 matches (minScore=2) - // This demonstrates the new OR behavior where records matching ≥2 queries are returned - output := cli.Routing().Search(). - WithSkill("Natural Language Processing/Text Completion"). // Query 1 - might match - WithSkill("Natural Language Processing/Problem Solving"). // Query 2 - might match - WithSkill("NonexistentSkill"). // Query 3 - won't match - WithLimit(10). - WithMinScore(2). // Only need 2/3 queries to match - ShouldSucceed() - - // Should complete without error, but find no remote records in local mode - // In network mode with remote records, this would find records matching - // "Natural Language Processing/Text Completion" + "Natural Language Processing/Problem Solving" even without "NonexistentSkill" - gomega.Expect(output).To(gomega.ContainSubstring("No remote records found")) - - // Verify the command structure was parsed correctly for OR logic - gomega.Expect(output).NotTo(gomega.ContainSubstring("error")) - }) - }) - - ginkgo.Context("routing info command", func() { - ginkgo.It("should show routing statistics for local records", func() { - output := cli.Routing().Info().ShouldSucceed() - - // Should show local routing summary - gomega.Expect(output).To(gomega.ContainSubstring("Local Routing Summary")) - gomega.Expect(output).To(gomega.ContainSubstring("Total Records: 1")) - gomega.Expect(output).To(gomega.ContainSubstring("Skills Distribution")) - gomega.Expect(output).To(gomega.ContainSubstring("natural_language_processing/natural_language_generation/text_completion")) - }) - - ginkgo.It("should show helpful tips in routing info", func() { - output := cli.Routing().Info().ShouldSucceed() - - // Should provide helpful usage tips - gomega.Expect(output).To(gomega.ContainSubstring("Tips")) - gomega.Expect(output).To(gomega.ContainSubstring("routing list --skill")) - gomega.Expect(output).To(gomega.ContainSubstring("routing search --skill")) - }) - }) - - ginkgo.Context("routing unpublish command", func() { - ginkgo.It("should unpublish a previously published record", func() { - output := cli.Routing().Unpublish(cid).ShouldSucceed() - - // Should confirm successful unpublishing - gomega.Expect(output).To(gomega.ContainSubstring("Successfully unpublished")) - gomega.Expect(output).To(gomega.ContainSubstring(cid)) - }) - - ginkgo.It("should fail to unpublish non-existent record", func() { - _ = cli.Routing().Unpublish("non-existent-cid").ShouldFail() - }) - - ginkgo.It("should not find unpublished record in local list", func() { - // After unpublishing, the record should not appear in local routing - output := cli.Routing().List().WithCid(cid).ShouldSucceed() - - // Should not find the unpublished record - gomega.Expect(output).To(gomega.ContainSubstring("No local records found")) - }) - }) - - ginkgo.Context("routing command integration", func() { - ginkgo.It("should show empty routing info after unpublish", func() { - output := cli.Routing().Info().ShouldSucceed() - - // Should show no records after unpublishing - gomega.Expect(output).To(gomega.ContainSubstring("Local Routing Summary")) - gomega.Expect(output).To(gomega.ContainSubstring("Total Records: 0")) - }) - - ginkgo.It("should validate routing command help", func() { - output := cli.Routing().WithArgs("--help").ShouldSucceed() - - // Should show all routing subcommands - gomega.Expect(output).To(gomega.ContainSubstring("publish")) - gomega.Expect(output).To(gomega.ContainSubstring("unpublish")) - gomega.Expect(output).To(gomega.ContainSubstring("list")) - gomega.Expect(output).To(gomega.ContainSubstring("search")) - gomega.Expect(output).To(gomega.ContainSubstring("info")) - }) - }) -}) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package local + +import ( + "os" + "path/filepath" + "time" + + "github.com/agntcy/dir/e2e/shared/config" + "github.com/agntcy/dir/e2e/shared/testdata" + "github.com/agntcy/dir/e2e/shared/utils" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +var _ = ginkgo.Describe("Running dirctl routing commands in local single node deployment", ginkgo.Ordered, func() { + var cli *utils.CLI + var cid string + + // Setup temp record file + tempDir := os.Getenv("E2E_COMPILE_OUTPUT_DIR") + if tempDir == "" { + tempDir = os.TempDir() + } + tempPath := filepath.Join(tempDir, "record_v3_local_routing_test.json") + + // Create directory and write record data + _ = os.MkdirAll(filepath.Dir(tempPath), 0o755) + _ = os.WriteFile(tempPath, testdata.ExpectedRecordV070JSON, 0o600) + + ginkgo.BeforeEach(func() { + if cfg.DeploymentMode != config.DeploymentModeLocal { + ginkgo.Skip("Skipping test, not in local mode") + } + + utils.ResetCLIState() + // Initialize CLI helper + cli = utils.NewCLI() + }) + + ginkgo.Context("routing publish command", func() { + ginkgo.It("should push a record first (prerequisite for publish)", func() { + cid = cli.Push(tempPath).WithArgs("--output", "raw").ShouldSucceed() + + // Validate that the returned CID correctly represents the pushed data + utils.LoadAndValidateCID(cid, tempPath) + }) + + ginkgo.It("should publish a record to local routing", func() { + output := cli.Routing().Publish(cid).ShouldSucceed() + + // Should confirm successful publishing + gomega.Expect(output).To(gomega.ContainSubstring("Successfully submitted publication request")) + gomega.Expect(output).To(gomega.ContainSubstring(cid)) + + // Wait for publish operation to complete (publishing is asynchronous) + time.Sleep(utils.PublishProcessingDelay) + }) + + ginkgo.It("should fail to publish non-existent record", func() { + _ = cli.Routing().Publish("non-existent-cid").ShouldFail() + }) + }) + + ginkgo.Context("routing list command", func() { + ginkgo.It("should list all local records without filters", func() { + output := cli.Routing().List().ShouldSucceed() + + // Should show the published record + gomega.Expect(output).To(gomega.ContainSubstring("Local records")) + gomega.Expect(output).To(gomega.ContainSubstring(cid)) + }) + + ginkgo.It("should list record by CID", func() { + output := cli.Routing().List().WithCid(cid).ShouldSucceed() + + // Should find the specific record + gomega.Expect(output).To(gomega.ContainSubstring("Local records")) + gomega.Expect(output).To(gomega.ContainSubstring(cid)) + }) + + ginkgo.It("should list records by skill filter", func() { + output := cli.Routing().List().WithSkill("natural_language_processing/natural_language_generation/text_completion").ShouldSucceed() + + // Should find records with NLP skills + gomega.Expect(output).To(gomega.ContainSubstring("Local records")) + gomega.Expect(output).To(gomega.ContainSubstring(cid)) + gomega.Expect(output).To(gomega.ContainSubstring("/skills/natural_language_processing")) + }) + + ginkgo.It("should list records by specific skill", func() { + output := cli.Routing().List().WithSkill("natural_language_processing/natural_language_generation/text_completion").ShouldSucceed() + + // Should find records with specific skill + gomega.Expect(output).To(gomega.ContainSubstring("Local records")) + gomega.Expect(output).To(gomega.ContainSubstring(cid)) + }) + + ginkgo.It("should list records by locator filter", func() { + output := cli.Routing().List().WithLocator("docker_image").ShouldSucceed() + + // Should find records with docker-image locator + gomega.Expect(output).To(gomega.ContainSubstring("Local records")) + gomega.Expect(output).To(gomega.ContainSubstring(cid)) + }) + + ginkgo.It("should list records with multiple filters (AND logic)", func() { + output := cli.Routing().List(). + WithSkill("natural_language_processing/natural_language_generation/text_completion"). + WithLocator("docker_image"). + ShouldSucceed() + + // Should find records matching both criteria + gomega.Expect(output).To(gomega.ContainSubstring("Local records")) + gomega.Expect(output).To(gomega.ContainSubstring(cid)) + }) + + ginkgo.It("should return empty results for non-matching skill", func() { + output := cli.Routing().List().WithSkill("NonExistentSkill").ShouldSucceed() + + // Should not find any records + gomega.Expect(output).NotTo(gomega.ContainSubstring(cid)) + gomega.Expect(output).To(gomega.ContainSubstring("No local records found")) + }) + + ginkgo.It("should return empty results for non-existent CID", func() { + output := cli.Routing().List().WithCid("non-existent-cid").ShouldSucceed() + + // Should show helpful message about using search for network discovery + gomega.Expect(output).To(gomega.ContainSubstring("No local records found")) + }) + + ginkgo.It("should respect limit parameter", func() { + output := cli.Routing().List().WithLimit(1).ShouldSucceed() + + // Should limit results (though we only have one record anyway) + gomega.Expect(output).To(gomega.ContainSubstring("Local records")) + }) + }) + + ginkgo.Context("routing search command", func() { + ginkgo.It("should search for local records (but return empty in local mode)", func() { + output := cli.Routing().Search().WithSkill("Natural Language Processing").ShouldSucceed() + + // In local single-node mode, search should find no remote records + // since there are no other peers + gomega.Expect(output).To(gomega.ContainSubstring("No remote records found")) + }) + + ginkgo.It("should handle search with multiple criteria", func() { + output := cli.Routing().Search(). + WithSkill("Natural Language Processing"). + WithLocator("docker-image"). + WithLimit(10). + WithMinScore(2). + ShouldSucceed() + + // Should complete without error, but find no remote records in local mode + gomega.Expect(output).To(gomega.ContainSubstring("No remote records found")) + }) + + ginkgo.It("should handle OR logic search with partial query matching", func() { + // Test OR logic: 3 queries but only require 2 matches (minScore=2) + // This demonstrates the new OR behavior where records matching ≥2 queries are returned + output := cli.Routing().Search(). + WithSkill("Natural Language Processing/Text Completion"). // Query 1 - might match + WithSkill("Natural Language Processing/Problem Solving"). // Query 2 - might match + WithSkill("NonexistentSkill"). // Query 3 - won't match + WithLimit(10). + WithMinScore(2). // Only need 2/3 queries to match + ShouldSucceed() + + // Should complete without error, but find no remote records in local mode + // In network mode with remote records, this would find records matching + // "Natural Language Processing/Text Completion" + "Natural Language Processing/Problem Solving" even without "NonexistentSkill" + gomega.Expect(output).To(gomega.ContainSubstring("No remote records found")) + + // Verify the command structure was parsed correctly for OR logic + gomega.Expect(output).NotTo(gomega.ContainSubstring("error")) + }) + }) + + ginkgo.Context("routing info command", func() { + ginkgo.It("should show routing statistics for local records", func() { + output := cli.Routing().Info().ShouldSucceed() + + // Should show local routing summary + gomega.Expect(output).To(gomega.ContainSubstring("Local Routing Summary")) + gomega.Expect(output).To(gomega.ContainSubstring("Total Records: 1")) + gomega.Expect(output).To(gomega.ContainSubstring("Skills Distribution")) + gomega.Expect(output).To(gomega.ContainSubstring("natural_language_processing/natural_language_generation/text_completion")) + }) + + ginkgo.It("should show helpful tips in routing info", func() { + output := cli.Routing().Info().ShouldSucceed() + + // Should provide helpful usage tips + gomega.Expect(output).To(gomega.ContainSubstring("Tips")) + gomega.Expect(output).To(gomega.ContainSubstring("routing list --skill")) + gomega.Expect(output).To(gomega.ContainSubstring("routing search --skill")) + }) + }) + + ginkgo.Context("routing unpublish command", func() { + ginkgo.It("should unpublish a previously published record", func() { + output := cli.Routing().Unpublish(cid).ShouldSucceed() + + // Should confirm successful unpublishing + gomega.Expect(output).To(gomega.ContainSubstring("Successfully unpublished")) + gomega.Expect(output).To(gomega.ContainSubstring(cid)) + }) + + ginkgo.It("should fail to unpublish non-existent record", func() { + _ = cli.Routing().Unpublish("non-existent-cid").ShouldFail() + }) + + ginkgo.It("should not find unpublished record in local list", func() { + // After unpublishing, the record should not appear in local routing + output := cli.Routing().List().WithCid(cid).ShouldSucceed() + + // Should not find the unpublished record + gomega.Expect(output).To(gomega.ContainSubstring("No local records found")) + }) + }) + + ginkgo.Context("routing command integration", func() { + ginkgo.It("should show empty routing info after unpublish", func() { + output := cli.Routing().Info().ShouldSucceed() + + // Should show no records after unpublishing + gomega.Expect(output).To(gomega.ContainSubstring("Local Routing Summary")) + gomega.Expect(output).To(gomega.ContainSubstring("Total Records: 0")) + }) + + ginkgo.It("should validate routing command help", func() { + output := cli.Routing().WithArgs("--help").ShouldSucceed() + + // Should show all routing subcommands + gomega.Expect(output).To(gomega.ContainSubstring("publish")) + gomega.Expect(output).To(gomega.ContainSubstring("unpublish")) + gomega.Expect(output).To(gomega.ContainSubstring("list")) + gomega.Expect(output).To(gomega.ContainSubstring("search")) + gomega.Expect(output).To(gomega.ContainSubstring("info")) + }) + }) +}) diff --git a/e2e/local/04_signature_test.go b/e2e/local/04_signature_test.go index bb71cc0ac..111e315e8 100644 --- a/e2e/local/04_signature_test.go +++ b/e2e/local/04_signature_test.go @@ -1,141 +1,141 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package local - -import ( - _ "embed" - "os" - "path/filepath" - "time" - - "github.com/agntcy/dir/e2e/shared/config" - "github.com/agntcy/dir/e2e/shared/testdata" - "github.com/agntcy/dir/e2e/shared/utils" - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -// Using the shared record data from embed.go - -// Test constants. -const ( - signTempDirPrefix = "sign-test" -) - -// Test file paths helper. -type signTestPaths struct { - tempDir string - record string - privateKey string - publicKey string - signature string - signatureOutput string -} - -func setupSignTestPaths() *signTestPaths { - tempDir, err := os.MkdirTemp("", signTempDirPrefix) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - return &signTestPaths{ - tempDir: tempDir, - record: filepath.Join(tempDir, "record.json"), - signature: filepath.Join(tempDir, "signature.json"), - signatureOutput: filepath.Join(tempDir, "signature-output.json"), - privateKey: filepath.Join(tempDir, "cosign.key"), - publicKey: filepath.Join(tempDir, "cosign.pub"), - } -} - -var _ = ginkgo.Describe("Running dirctl end-to-end tests to check signature support", func() { - var cli *utils.CLI - - ginkgo.BeforeEach(func() { - if cfg.DeploymentMode != config.DeploymentModeLocal { - ginkgo.Skip("Skipping test, not in local mode") - } - - utils.ResetCLIState() - // Initialize CLI helper - cli = utils.NewCLI() - }) - - // Test params - var ( - paths *signTestPaths - cid string - ) - - ginkgo.Context("signature workflow", ginkgo.Ordered, func() { - // Setup: Create temporary directory and files for the entire workflow - ginkgo.BeforeAll(func() { - var err error - paths = setupSignTestPaths() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Write test record to temp location - err = os.WriteFile(paths.record, testdata.ExpectedRecordV070JSON, 0o600) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Generate cosign key pair for all tests - utils.GenerateCosignKeyPair(paths.tempDir) - - // Verify key files were created - gomega.Expect(paths.privateKey).To(gomega.BeAnExistingFile()) - gomega.Expect(paths.publicKey).To(gomega.BeAnExistingFile()) - - // Set cosign password for all tests - err = os.Setenv("COSIGN_PASSWORD", utils.TestPassword) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - - // Cleanup: Remove temporary directory after all workflow tests - ginkgo.AfterAll(func() { - // Unset cosign password for all tests - os.Unsetenv("COSIGN_PASSWORD") - - if paths != nil && paths.tempDir != "" { - err := os.RemoveAll(paths.tempDir) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }) - - ginkgo.It("should create keys for signing", func() { - // Keys are already created in BeforeAll, just verify they exist - gomega.Expect(paths.privateKey).To(gomega.BeAnExistingFile()) - gomega.Expect(paths.publicKey).To(gomega.BeAnExistingFile()) - }) - - ginkgo.It("should push a record to the store", func() { - cid = cli.Push(paths.record).WithArgs("--output", "raw").ShouldSucceed() - - // Validate that the returned CID correctly represents the pushed data - utils.LoadAndValidateCID(cid, paths.record) - }) - - ginkgo.It("should sign a record with a key pair", func() { - _ = cli.Sign(cid, paths.privateKey).ShouldSucceed() - - time.Sleep(10 * time.Second) - }) - - ginkgo.It("should verify a signature with a public key on server side", func() { - cli.Command("verify"). - WithArgs(cid). - ShouldContain("Record signature is: trusted") - }) - - ginkgo.It("should pull a signature from the store", func() { - cli.Command("pull"). - WithArgs(cid, "--signature"). - WithArgs("--output", "json"). - ShouldContain("\"signature\":") - }) - - ginkgo.It("should pull a public key from the store", func() { - cli.Command("pull"). - WithArgs(cid, "--public-key"). - ShouldContain("-----BEGIN PUBLIC KEY-----") - }) - }) -}) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package local + +import ( + _ "embed" + "os" + "path/filepath" + "time" + + "github.com/agntcy/dir/e2e/shared/config" + "github.com/agntcy/dir/e2e/shared/testdata" + "github.com/agntcy/dir/e2e/shared/utils" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +// Using the shared record data from embed.go + +// Test constants. +const ( + signTempDirPrefix = "sign-test" +) + +// Test file paths helper. +type signTestPaths struct { + tempDir string + record string + privateKey string + publicKey string + signature string + signatureOutput string +} + +func setupSignTestPaths() *signTestPaths { + tempDir, err := os.MkdirTemp("", signTempDirPrefix) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + return &signTestPaths{ + tempDir: tempDir, + record: filepath.Join(tempDir, "record.json"), + signature: filepath.Join(tempDir, "signature.json"), + signatureOutput: filepath.Join(tempDir, "signature-output.json"), + privateKey: filepath.Join(tempDir, "cosign.key"), + publicKey: filepath.Join(tempDir, "cosign.pub"), + } +} + +var _ = ginkgo.Describe("Running dirctl end-to-end tests to check signature support", func() { + var cli *utils.CLI + + ginkgo.BeforeEach(func() { + if cfg.DeploymentMode != config.DeploymentModeLocal { + ginkgo.Skip("Skipping test, not in local mode") + } + + utils.ResetCLIState() + // Initialize CLI helper + cli = utils.NewCLI() + }) + + // Test params + var ( + paths *signTestPaths + cid string + ) + + ginkgo.Context("signature workflow", ginkgo.Ordered, func() { + // Setup: Create temporary directory and files for the entire workflow + ginkgo.BeforeAll(func() { + var err error + paths = setupSignTestPaths() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Write test record to temp location + err = os.WriteFile(paths.record, testdata.ExpectedRecordV070JSON, 0o600) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Generate cosign key pair for all tests + utils.GenerateCosignKeyPair(paths.tempDir) + + // Verify key files were created + gomega.Expect(paths.privateKey).To(gomega.BeAnExistingFile()) + gomega.Expect(paths.publicKey).To(gomega.BeAnExistingFile()) + + // Set cosign password for all tests + err = os.Setenv("COSIGN_PASSWORD", utils.TestPassword) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + // Cleanup: Remove temporary directory after all workflow tests + ginkgo.AfterAll(func() { + // Unset cosign password for all tests + os.Unsetenv("COSIGN_PASSWORD") + + if paths != nil && paths.tempDir != "" { + err := os.RemoveAll(paths.tempDir) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }) + + ginkgo.It("should create keys for signing", func() { + // Keys are already created in BeforeAll, just verify they exist + gomega.Expect(paths.privateKey).To(gomega.BeAnExistingFile()) + gomega.Expect(paths.publicKey).To(gomega.BeAnExistingFile()) + }) + + ginkgo.It("should push a record to the store", func() { + cid = cli.Push(paths.record).WithArgs("--output", "raw").ShouldSucceed() + + // Validate that the returned CID correctly represents the pushed data + utils.LoadAndValidateCID(cid, paths.record) + }) + + ginkgo.It("should sign a record with a key pair", func() { + _ = cli.Sign(cid, paths.privateKey).ShouldSucceed() + + time.Sleep(10 * time.Second) + }) + + ginkgo.It("should verify a signature with a public key on server side", func() { + cli.Command("verify"). + WithArgs(cid). + ShouldContain("Record signature is: trusted") + }) + + ginkgo.It("should pull a signature from the store", func() { + cli.Command("pull"). + WithArgs(cid, "--signature"). + WithArgs("--output", "json"). + ShouldContain("\"signature\":") + }) + + ginkgo.It("should pull a public key from the store", func() { + cli.Command("pull"). + WithArgs(cid, "--public-key"). + ShouldContain("-----BEGIN PUBLIC KEY-----") + }) + }) +}) diff --git a/e2e/local/05_network_cmd_test.go b/e2e/local/05_network_cmd_test.go index 2e5a759d1..dd32bb025 100644 --- a/e2e/local/05_network_cmd_test.go +++ b/e2e/local/05_network_cmd_test.go @@ -1,88 +1,88 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package local - -import ( - _ "embed" - "os" - "path/filepath" - - "github.com/agntcy/dir/e2e/shared/config" - "github.com/agntcy/dir/e2e/shared/utils" - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -var _ = ginkgo.Describe("Running dirctl end-to-end tests for network commands", func() { - var ( - cli *utils.CLI - tempDir string - tempKeyPath string - cleanup func() - ) - - ginkgo.BeforeEach(func() { - if cfg.DeploymentMode != config.DeploymentModeLocal { - ginkgo.Skip("Skipping test, not in local mode") - } - - // Initialize CLI helper - cli = utils.NewCLI() - - // Setup test directory and generate network key - tempDir, cleanup = utils.SetupNetworkTestDir() - tempKeyPath = utils.GenerateNetworkKeyPair(tempDir) - }) - - ginkgo.AfterEach(func() { - if cleanup != nil { - cleanup() - } - }) - - ginkgo.Context("info command", func() { - ginkgo.It("should generate a peer ID from a valid ED25519 key", func() { - output := cli.Network().Info(tempKeyPath).ShouldSucceed() - - // Verify that the output is not empty - gomega.Expect(output).NotTo(gomega.BeEmpty()) - }) - - ginkgo.It("should fail with non-existent key file", func() { - _ = cli.Network().Info("non-existent-key-file").ShouldFail() - }) - - ginkgo.It("should fail with empty key path", func() { - _ = cli.Network().Info("").ShouldFail() - }) - }) - - ginkgo.Context("init command", func() { - ginkgo.It("should generate a new peer ID and save the key to specified output", func() { - outputPath := filepath.Join(tempDir, "generated.key") - - // Generate new peer ID and key - peerID := cli.Network().Init().WithOutput(outputPath).ShouldSucceed() - - // Verify that the output file exists with correct permissions - gomega.Expect(outputPath).To(gomega.BeAnExistingFile()) - - info, err := os.Stat(outputPath) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(info.Mode().Perm()).To(gomega.Equal(os.FileMode(0o0600))) - - // Verify that the peer ID is valid format - gomega.Expect(peerID).NotTo(gomega.BeEmpty()) - gomega.Expect(peerID).To(gomega.HavePrefix("12D3")) - - // Verify that the generated key can be used with the info command - infoOutput := cli.Network().Info(outputPath).ShouldSucceed() - gomega.Expect(infoOutput).To(gomega.Equal(peerID)) - }) - - ginkgo.It("should fail when output directory doesn't exist and cannot be created", func() { - _ = cli.Network().Init().WithOutput("/nonexistent/directory/key.pem").ShouldFail() - }) - }) -}) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package local + +import ( + _ "embed" + "os" + "path/filepath" + + "github.com/agntcy/dir/e2e/shared/config" + "github.com/agntcy/dir/e2e/shared/utils" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +var _ = ginkgo.Describe("Running dirctl end-to-end tests for network commands", func() { + var ( + cli *utils.CLI + tempDir string + tempKeyPath string + cleanup func() + ) + + ginkgo.BeforeEach(func() { + if cfg.DeploymentMode != config.DeploymentModeLocal { + ginkgo.Skip("Skipping test, not in local mode") + } + + // Initialize CLI helper + cli = utils.NewCLI() + + // Setup test directory and generate network key + tempDir, cleanup = utils.SetupNetworkTestDir() + tempKeyPath = utils.GenerateNetworkKeyPair(tempDir) + }) + + ginkgo.AfterEach(func() { + if cleanup != nil { + cleanup() + } + }) + + ginkgo.Context("info command", func() { + ginkgo.It("should generate a peer ID from a valid ED25519 key", func() { + output := cli.Network().Info(tempKeyPath).ShouldSucceed() + + // Verify that the output is not empty + gomega.Expect(output).NotTo(gomega.BeEmpty()) + }) + + ginkgo.It("should fail with non-existent key file", func() { + _ = cli.Network().Info("non-existent-key-file").ShouldFail() + }) + + ginkgo.It("should fail with empty key path", func() { + _ = cli.Network().Info("").ShouldFail() + }) + }) + + ginkgo.Context("init command", func() { + ginkgo.It("should generate a new peer ID and save the key to specified output", func() { + outputPath := filepath.Join(tempDir, "generated.key") + + // Generate new peer ID and key + peerID := cli.Network().Init().WithOutput(outputPath).ShouldSucceed() + + // Verify that the output file exists with correct permissions + gomega.Expect(outputPath).To(gomega.BeAnExistingFile()) + + info, err := os.Stat(outputPath) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(info.Mode().Perm()).To(gomega.Equal(os.FileMode(0o0600))) + + // Verify that the peer ID is valid format + gomega.Expect(peerID).NotTo(gomega.BeEmpty()) + gomega.Expect(peerID).To(gomega.HavePrefix("12D3")) + + // Verify that the generated key can be used with the info command + infoOutput := cli.Network().Info(outputPath).ShouldSucceed() + gomega.Expect(infoOutput).To(gomega.Equal(peerID)) + }) + + ginkgo.It("should fail when output directory doesn't exist and cannot be created", func() { + _ = cli.Network().Init().WithOutput("/nonexistent/directory/key.pem").ShouldFail() + }) + }) +}) diff --git a/e2e/local/06_events_test.go b/e2e/local/06_events_test.go index 2ef6ecaa7..45bafa173 100644 --- a/e2e/local/06_events_test.go +++ b/e2e/local/06_events_test.go @@ -1,139 +1,139 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package local - -import ( - "os" - "path/filepath" - - "github.com/agntcy/dir/e2e/shared/config" - "github.com/agntcy/dir/e2e/shared/testdata" - "github.com/agntcy/dir/e2e/shared/utils" - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -// Event CLI Tests -// -// Testing Strategy: -// - CLI tests (this file): Verify command existence, help text, flag acceptance -// - SDK tests (e2e/client/02_events_test.go): Full streaming event reception tests -// -// Rationale: The 'dirctl events listen' command runs as a long-running streaming process. -// The CLI test framework (utils.CLI) executes commands synchronously and captures output, -// which doesn't support background processes. Therefore: -// - We test CLI command structure here (help, flags, command registration) -// - We test actual event streaming in e2e/client/ using the SDK -// -// This matches the pattern for other streaming commands in the codebase. - -var _ = ginkgo.Describe("Events CLI Commands", ginkgo.Serial, ginkgo.Label("events"), func() { - var cli *utils.CLI - - ginkgo.BeforeEach(func() { - if cfg.DeploymentMode != config.DeploymentModeLocal { - ginkgo.Skip("Skipping test, not in local mode") - } - - utils.ResetCLIState() - cli = utils.NewCLI() - }) - - tempDir := os.Getenv("E2E_COMPILE_OUTPUT_DIR") - if tempDir == "" { - tempDir = os.TempDir() - } - - ginkgo.Context("events command availability", func() { - ginkgo.It("should have events command registered", func() { - // Test that 'dirctl events' command exists - output, err := cli.Command("events").WithArgs("--help").Execute() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(output).To(gomega.ContainSubstring("Stream real-time events")) - gomega.Expect(output).To(gomega.ContainSubstring("listen")) - }) - - ginkgo.It("should have events listen subcommand", func() { - // Test that 'dirctl events listen' exists - output, err := cli.Command("events").WithArgs("listen", "--help").Execute() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(output).To(gomega.ContainSubstring("Listen to real-time system events")) - gomega.Expect(output).To(gomega.ContainSubstring("--types")) - gomega.Expect(output).To(gomega.ContainSubstring("--labels")) - gomega.Expect(output).To(gomega.ContainSubstring("--cids")) - }) - }) - - ginkgo.Context("events listen command flags", func() { - ginkgo.It("should support --types flag", func() { - // Verify the --types flag exists in help - output, err := cli.Command("events").WithArgs("listen", "--help").Execute() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(output).To(gomega.ContainSubstring("--types")) - gomega.Expect(output).To(gomega.ContainSubstring("Event types to filter")) - }) - - ginkgo.It("should support --labels flag", func() { - // Verify the --labels flag exists in help - output, err := cli.Command("events").WithArgs("listen", "--help").Execute() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(output).To(gomega.ContainSubstring("--labels")) - gomega.Expect(output).To(gomega.ContainSubstring("Label filters")) - }) - - ginkgo.It("should support --cids flag", func() { - // Verify the --cids flag exists in help - output, err := cli.Command("events").WithArgs("listen", "--help").Execute() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(output).To(gomega.ContainSubstring("--cids")) - gomega.Expect(output).To(gomega.ContainSubstring("CID filters")) - }) - - ginkgo.It("should support --output flag", func() { - // Verify the --output flag exists (from AddOutputFlags) - output, err := cli.Command("events").WithArgs("listen", "--help").Execute() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(output).To(gomega.ContainSubstring("--output")) - }) - }) - - ginkgo.Context("event emission during operations", ginkgo.Ordered, func() { - var pushCID, publishCID string - - ginkgo.It("should emit events during push operations", func() { - // Push operation emits RECORD_PUSHED event - // Full streaming reception tested in e2e/client/02_events_test.go - - recordFile := filepath.Join(tempDir, "events_push_test.json") - _ = os.WriteFile(recordFile, testdata.ExpectedRecordV031JSON, 0o600) - - pushCID = cli.Push(recordFile).WithArgs("--output", "raw").ShouldSucceed() - gomega.Expect(pushCID).NotTo(gomega.BeEmpty()) - }) - - ginkgo.It("should emit events during publish operations", func() { - // Publish operation emits RECORD_PUBLISHED event - // Use V070 to get a different CID - // Full streaming reception tested in e2e/client/02_events_test.go - - recordFile := filepath.Join(tempDir, "events_publish_test.json") - _ = os.WriteFile(recordFile, testdata.ExpectedRecordV070JSON, 0o600) - - publishCID = cli.Push(recordFile).WithArgs("--output", "raw").ShouldSucceed() - output := cli.Routing().Publish(publishCID).ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring("Successfully submitted publication request")) - }) - - ginkgo.It("should emit events during delete operations", func() { - // Delete the record from the first test (different from publish test) - // Delete operation emits RECORD_DELETED event - // Full streaming reception tested in e2e/client/02_events_test.go - - cli.Delete(pushCID).ShouldSucceed() - - // Verify delete worked - _ = cli.Pull(pushCID).ShouldFail() - }) - }) -}) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package local + +import ( + "os" + "path/filepath" + + "github.com/agntcy/dir/e2e/shared/config" + "github.com/agntcy/dir/e2e/shared/testdata" + "github.com/agntcy/dir/e2e/shared/utils" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +// Event CLI Tests +// +// Testing Strategy: +// - CLI tests (this file): Verify command existence, help text, flag acceptance +// - SDK tests (e2e/client/02_events_test.go): Full streaming event reception tests +// +// Rationale: The 'dirctl events listen' command runs as a long-running streaming process. +// The CLI test framework (utils.CLI) executes commands synchronously and captures output, +// which doesn't support background processes. Therefore: +// - We test CLI command structure here (help, flags, command registration) +// - We test actual event streaming in e2e/client/ using the SDK +// +// This matches the pattern for other streaming commands in the codebase. + +var _ = ginkgo.Describe("Events CLI Commands", ginkgo.Serial, ginkgo.Label("events"), func() { + var cli *utils.CLI + + ginkgo.BeforeEach(func() { + if cfg.DeploymentMode != config.DeploymentModeLocal { + ginkgo.Skip("Skipping test, not in local mode") + } + + utils.ResetCLIState() + cli = utils.NewCLI() + }) + + tempDir := os.Getenv("E2E_COMPILE_OUTPUT_DIR") + if tempDir == "" { + tempDir = os.TempDir() + } + + ginkgo.Context("events command availability", func() { + ginkgo.It("should have events command registered", func() { + // Test that 'dirctl events' command exists + output, err := cli.Command("events").WithArgs("--help").Execute() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(output).To(gomega.ContainSubstring("Stream real-time events")) + gomega.Expect(output).To(gomega.ContainSubstring("listen")) + }) + + ginkgo.It("should have events listen subcommand", func() { + // Test that 'dirctl events listen' exists + output, err := cli.Command("events").WithArgs("listen", "--help").Execute() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(output).To(gomega.ContainSubstring("Listen to real-time system events")) + gomega.Expect(output).To(gomega.ContainSubstring("--types")) + gomega.Expect(output).To(gomega.ContainSubstring("--labels")) + gomega.Expect(output).To(gomega.ContainSubstring("--cids")) + }) + }) + + ginkgo.Context("events listen command flags", func() { + ginkgo.It("should support --types flag", func() { + // Verify the --types flag exists in help + output, err := cli.Command("events").WithArgs("listen", "--help").Execute() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(output).To(gomega.ContainSubstring("--types")) + gomega.Expect(output).To(gomega.ContainSubstring("Event types to filter")) + }) + + ginkgo.It("should support --labels flag", func() { + // Verify the --labels flag exists in help + output, err := cli.Command("events").WithArgs("listen", "--help").Execute() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(output).To(gomega.ContainSubstring("--labels")) + gomega.Expect(output).To(gomega.ContainSubstring("Label filters")) + }) + + ginkgo.It("should support --cids flag", func() { + // Verify the --cids flag exists in help + output, err := cli.Command("events").WithArgs("listen", "--help").Execute() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(output).To(gomega.ContainSubstring("--cids")) + gomega.Expect(output).To(gomega.ContainSubstring("CID filters")) + }) + + ginkgo.It("should support --output flag", func() { + // Verify the --output flag exists (from AddOutputFlags) + output, err := cli.Command("events").WithArgs("listen", "--help").Execute() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(output).To(gomega.ContainSubstring("--output")) + }) + }) + + ginkgo.Context("event emission during operations", ginkgo.Ordered, func() { + var pushCID, publishCID string + + ginkgo.It("should emit events during push operations", func() { + // Push operation emits RECORD_PUSHED event + // Full streaming reception tested in e2e/client/02_events_test.go + + recordFile := filepath.Join(tempDir, "events_push_test.json") + _ = os.WriteFile(recordFile, testdata.ExpectedRecordV031JSON, 0o600) + + pushCID = cli.Push(recordFile).WithArgs("--output", "raw").ShouldSucceed() + gomega.Expect(pushCID).NotTo(gomega.BeEmpty()) + }) + + ginkgo.It("should emit events during publish operations", func() { + // Publish operation emits RECORD_PUBLISHED event + // Use V070 to get a different CID + // Full streaming reception tested in e2e/client/02_events_test.go + + recordFile := filepath.Join(tempDir, "events_publish_test.json") + _ = os.WriteFile(recordFile, testdata.ExpectedRecordV070JSON, 0o600) + + publishCID = cli.Push(recordFile).WithArgs("--output", "raw").ShouldSucceed() + output := cli.Routing().Publish(publishCID).ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring("Successfully submitted publication request")) + }) + + ginkgo.It("should emit events during delete operations", func() { + // Delete the record from the first test (different from publish test) + // Delete operation emits RECORD_DELETED event + // Full streaming reception tested in e2e/client/02_events_test.go + + cli.Delete(pushCID).ShouldSucceed() + + // Verify delete worked + _ = cli.Pull(pushCID).ShouldFail() + }) + }) +}) diff --git a/e2e/local/06_import_test.go b/e2e/local/06_import_test.go index cbcb1ae69..aabab9ed6 100644 --- a/e2e/local/06_import_test.go +++ b/e2e/local/06_import_test.go @@ -1,119 +1,119 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package local - -import ( - "encoding/json" - - "github.com/agntcy/dir/e2e/shared/config" - "github.com/agntcy/dir/e2e/shared/utils" - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -var _ = ginkgo.Describe("Running dirctl end-to-end tests for the import command", func() { - var cli *utils.CLI - - ginkgo.BeforeEach(func() { - if cfg.DeploymentMode != config.DeploymentModeLocal { - ginkgo.Skip("Skipping test, not in local mode") - } - - utils.ResetCLIState() - // Initialize CLI helper - cli = utils.NewCLI() - }) - - ginkgo.Context("MCP registry import functionality", ginkgo.Ordered, func() { - ginkgo.It("should successfully import records from MCP registry with limit", func() { - // Run import command with a limit of 10 records - output := cli.Command("import"). - WithArgs("--type=mcp", "--url=https://registry.modelcontextprotocol.io/v0.1", "--limit", "10"). - ShouldSucceed() - - ginkgo.GinkgoWriter.Printf("Import output: %s\n", output) - - // Verify output indicates successful import - gomega.Expect(output).NotTo(gomega.BeEmpty()) - gomega.Expect(output).To(gomega.ContainSubstring("Total records: 10")) - gomega.Expect(output).To(gomega.ContainSubstring("Imported: 10")) - gomega.Expect(output).To(gomega.ContainSubstring("Failed: 0")) - }) - - ginkgo.It("should accept enrichment flags without errors", func() { - // Run import help command to verify the enrichment flags exist - // We just want to verify the flags are recognized by the CLI - output := cli.Command("import"). - WithArgs("--help"). - ShouldSucceed() - - ginkgo.GinkgoWriter.Printf("Import help output: %s\n", output) - - // Verify the new enrichment flags are documented - gomega.Expect(output).To(gomega.ContainSubstring("--enrich-skills-prompt")) - gomega.Expect(output).To(gomega.ContainSubstring("--enrich-domains-prompt")) - gomega.Expect(output).To(gomega.ContainSubstring("--enrich-config")) - }) - - ginkgo.It("should accept force and debug flags", func() { - // Test that --force flag is accepted - output := cli.Command("import"). - WithArgs("--type=mcp", "--url=https://registry.modelcontextprotocol.io/v0.1", "--limit", "2", "--force"). - ShouldSucceed() - - ginkgo.GinkgoWriter.Printf("Import with --force flag: %s\n", output) - - gomega.Expect(output).To(gomega.ContainSubstring("Total records: 2")) - gomega.Expect(output).To(gomega.ContainSubstring("Imported: 2")) - - // Test that --debug flag is accepted and runs without error - output2 := cli.Command("import"). - WithArgs("--type=mcp", "--url=https://registry.modelcontextprotocol.io/v0.1", "--limit", "1", "--debug"). - ShouldSucceed() - - ginkgo.GinkgoWriter.Printf("Import with --debug flag: %s\n", output2) - - // Just verify the command succeeds with debug flag - gomega.Expect(output2).To(gomega.ContainSubstring("Total records:")) - }) - - var recordRefs []string - - ginkgo.It("should find at least 10 imported MCP records", func() { - // Search for records with integration/mcp module - output := cli.Search(). - WithModule("integration/mcp"). - WithLimit(20). - WithArgs("--output", "json"). - ShouldSucceed() - - ginkgo.GinkgoWriter.Printf("Search output: %s\n", output) - - // Parse the output - err := json.Unmarshal([]byte(output), &recordRefs) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Verify we have at least 10 records (from all previous import tests) - gomega.Expect(len(recordRefs)).To(gomega.BeNumerically(">=", 10), - "Expected at least 10 imported MCP records, got %d", len(recordRefs)) - }) - - ginkgo.It("should be able to pull an imported record", func() { - // Try to pull the record - pullOutput := cli.Pull(recordRefs[0]).WithArgs("--output", "json").ShouldSucceed() - gomega.Expect(pullOutput).NotTo(gomega.BeEmpty()) - - // Verify the pulled record has expected fields - var record map[string]interface{} - err := json.Unmarshal([]byte(pullOutput), &record) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Verify essential fields exist - gomega.Expect(record).To(gomega.HaveKey("name")) - gomega.Expect(record).To(gomega.HaveKey("version")) - gomega.Expect(record).To(gomega.HaveKey("schema_version")) - gomega.Expect(record).To(gomega.HaveKey("locators")) - }) - }) -}) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package local + +import ( + "encoding/json" + + "github.com/agntcy/dir/e2e/shared/config" + "github.com/agntcy/dir/e2e/shared/utils" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +var _ = ginkgo.Describe("Running dirctl end-to-end tests for the import command", func() { + var cli *utils.CLI + + ginkgo.BeforeEach(func() { + if cfg.DeploymentMode != config.DeploymentModeLocal { + ginkgo.Skip("Skipping test, not in local mode") + } + + utils.ResetCLIState() + // Initialize CLI helper + cli = utils.NewCLI() + }) + + ginkgo.Context("MCP registry import functionality", ginkgo.Ordered, func() { + ginkgo.It("should successfully import records from MCP registry with limit", func() { + // Run import command with a limit of 10 records + output := cli.Command("import"). + WithArgs("--type=mcp", "--url=https://registry.modelcontextprotocol.io/v0.1", "--limit", "10"). + ShouldSucceed() + + ginkgo.GinkgoWriter.Printf("Import output: %s\n", output) + + // Verify output indicates successful import + gomega.Expect(output).NotTo(gomega.BeEmpty()) + gomega.Expect(output).To(gomega.ContainSubstring("Total records: 10")) + gomega.Expect(output).To(gomega.ContainSubstring("Imported: 10")) + gomega.Expect(output).To(gomega.ContainSubstring("Failed: 0")) + }) + + ginkgo.It("should accept enrichment flags without errors", func() { + // Run import help command to verify the enrichment flags exist + // We just want to verify the flags are recognized by the CLI + output := cli.Command("import"). + WithArgs("--help"). + ShouldSucceed() + + ginkgo.GinkgoWriter.Printf("Import help output: %s\n", output) + + // Verify the new enrichment flags are documented + gomega.Expect(output).To(gomega.ContainSubstring("--enrich-skills-prompt")) + gomega.Expect(output).To(gomega.ContainSubstring("--enrich-domains-prompt")) + gomega.Expect(output).To(gomega.ContainSubstring("--enrich-config")) + }) + + ginkgo.It("should accept force and debug flags", func() { + // Test that --force flag is accepted + output := cli.Command("import"). + WithArgs("--type=mcp", "--url=https://registry.modelcontextprotocol.io/v0.1", "--limit", "2", "--force"). + ShouldSucceed() + + ginkgo.GinkgoWriter.Printf("Import with --force flag: %s\n", output) + + gomega.Expect(output).To(gomega.ContainSubstring("Total records: 2")) + gomega.Expect(output).To(gomega.ContainSubstring("Imported: 2")) + + // Test that --debug flag is accepted and runs without error + output2 := cli.Command("import"). + WithArgs("--type=mcp", "--url=https://registry.modelcontextprotocol.io/v0.1", "--limit", "1", "--debug"). + ShouldSucceed() + + ginkgo.GinkgoWriter.Printf("Import with --debug flag: %s\n", output2) + + // Just verify the command succeeds with debug flag + gomega.Expect(output2).To(gomega.ContainSubstring("Total records:")) + }) + + var recordRefs []string + + ginkgo.It("should find at least 10 imported MCP records", func() { + // Search for records with integration/mcp module + output := cli.Search(). + WithModule("integration/mcp"). + WithLimit(20). + WithArgs("--output", "json"). + ShouldSucceed() + + ginkgo.GinkgoWriter.Printf("Search output: %s\n", output) + + // Parse the output + err := json.Unmarshal([]byte(output), &recordRefs) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Verify we have at least 10 records (from all previous import tests) + gomega.Expect(len(recordRefs)).To(gomega.BeNumerically(">=", 10), + "Expected at least 10 imported MCP records, got %d", len(recordRefs)) + }) + + ginkgo.It("should be able to pull an imported record", func() { + // Try to pull the record + pullOutput := cli.Pull(recordRefs[0]).WithArgs("--output", "json").ShouldSucceed() + gomega.Expect(pullOutput).NotTo(gomega.BeEmpty()) + + // Verify the pulled record has expected fields + var record map[string]interface{} + err := json.Unmarshal([]byte(pullOutput), &record) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Verify essential fields exist + gomega.Expect(record).To(gomega.HaveKey("name")) + gomega.Expect(record).To(gomega.HaveKey("version")) + gomega.Expect(record).To(gomega.HaveKey("schema_version")) + gomega.Expect(record).To(gomega.HaveKey("locators")) + }) + }) +}) diff --git a/e2e/local/07_metrics_test.go b/e2e/local/07_metrics_test.go index 7276fb255..ec69cd337 100644 --- a/e2e/local/07_metrics_test.go +++ b/e2e/local/07_metrics_test.go @@ -1,463 +1,463 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package local - -import ( - "fmt" - "io" - "net/http" - "strings" - - "github.com/agntcy/dir/e2e/shared/config" - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -// Metrics E2E Tests -// -// Testing Strategy: -// - Verify Prometheus /metrics endpoint is accessible -// - Validate gRPC metrics are being collected -// - Check that metrics contain data from previous tests (01-06) -// -// Note: This test runs AFTER other tests (numbered 07), so metrics should -// already contain non-zero values from previous test operations. - -var _ = ginkgo.Describe("Prometheus Metrics", ginkgo.Serial, ginkgo.Label("metrics"), func() { - const metricsURL = "http://localhost:9090/metrics" - - ginkgo.BeforeEach(func() { - if cfg.DeploymentMode != config.DeploymentModeLocal { - ginkgo.Skip("Skipping test, not in local mode") - } - }) - - ginkgo.Context("metrics endpoint availability", func() { - ginkgo.It("should expose /metrics endpoint on port 9090", func() { - resp, err := http.Get(metricsURL) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer resp.Body.Close() - - gomega.Expect(resp.StatusCode).To(gomega.Equal(http.StatusOK)) - gomega.Expect(resp.Header.Get("Content-Type")).To(gomega.ContainSubstring("text/plain")) - }) - - ginkgo.It("should return Prometheus-formatted metrics", func() { - resp, err := http.Get(metricsURL) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - metrics := string(body) - - // Verify Prometheus format (HELP and TYPE comments) - gomega.Expect(metrics).To(gomega.ContainSubstring("# HELP")) - gomega.Expect(metrics).To(gomega.ContainSubstring("# TYPE")) - - // Verify metrics are not empty - gomega.Expect(len(metrics)).To(gomega.BeNumerically(">", 100), - "Expected metrics output to be substantial") - }) - }) - - ginkgo.Context("gRPC metrics collection", func() { - var metricsContent string - - ginkgo.BeforeEach(func() { - // Fetch metrics once for all tests in this context - resp, err := http.Get(metricsURL) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - metricsContent = string(body) - }) - - ginkgo.It("should expose grpc_server_started_total counter", func() { - gomega.Expect(metricsContent).To(gomega.ContainSubstring("grpc_server_started_total")) - gomega.Expect(metricsContent).To(gomega.ContainSubstring("# TYPE grpc_server_started_total counter")) - }) - - ginkgo.It("should expose grpc_server_handled_total counter", func() { - gomega.Expect(metricsContent).To(gomega.ContainSubstring("grpc_server_handled_total")) - gomega.Expect(metricsContent).To(gomega.ContainSubstring("# TYPE grpc_server_handled_total counter")) - }) - - ginkgo.It("should expose grpc_server_msg_received_total counter for streaming", func() { - gomega.Expect(metricsContent).To(gomega.ContainSubstring("grpc_server_msg_received_total")) - gomega.Expect(metricsContent).To(gomega.ContainSubstring("# TYPE grpc_server_msg_received_total counter")) - }) - - ginkgo.It("should expose grpc_server_msg_sent_total counter for streaming", func() { - gomega.Expect(metricsContent).To(gomega.ContainSubstring("grpc_server_msg_sent_total")) - gomega.Expect(metricsContent).To(gomega.ContainSubstring("# TYPE grpc_server_msg_sent_total counter")) - }) - - ginkgo.It("should include StoreService metrics", func() { - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_service="agntcy.dir.store.v1.StoreService"`)) - - // Verify key StoreService methods are instrumented - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_method="Push"`)) - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_method="Pull"`)) - }) - - ginkgo.It("should include RoutingService metrics", func() { - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_service="agntcy.dir.routing.v1.RoutingService"`)) - - // Verify key RoutingService methods are instrumented - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_method="Search"`)) - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_method="Publish"`)) - }) - - ginkgo.It("should include EventService metrics", func() { - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_service="agntcy.dir.events.v1.EventService"`)) - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_method="Listen"`)) - }) - - ginkgo.It("should include SearchService metrics", func() { - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_service="agntcy.dir.search.v1.SearchService"`)) - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_method="Search"`)) - }) - - ginkgo.It("should include Health service metrics", func() { - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_service="grpc.health.v1.Health"`)) - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_method="Check"`)) - }) - }) - - ginkgo.Context("metrics from previous tests", func() { - ginkgo.It("should have non-zero request counts from previous tests", func() { - resp, err := http.Get(metricsURL) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - metricsContent := string(body) - - // Parse metrics to find counters with non-zero values - // Previous tests (01-06) should have generated traffic - foundNonZero := false - - for _, line := range strings.Split(metricsContent, "\n") { - // Skip comments and empty lines - if strings.HasPrefix(line, "#") || strings.TrimSpace(line) == "" { - continue - } - - // Look for grpc_server_started_total or grpc_server_handled_total with values - if strings.Contains(line, "grpc_server_started_total") || - strings.Contains(line, "grpc_server_handled_total") { - // Parse value (last part after space) - parts := strings.Fields(line) - if len(parts) >= 2 { - value := parts[len(parts)-1] - if value != "0" && value != "0.0" { - foundNonZero = true - - ginkgo.GinkgoWriter.Printf("Found non-zero metric: %s\n", line) - - break - } - } - } - } - - gomega.Expect(foundNonZero).To(gomega.BeTrue(), - "Expected to find non-zero request metrics from previous tests (01-06)") - }) - - ginkgo.It("should have successful (OK) status codes from previous tests", func() { - resp, err := http.Get(metricsURL) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - metricsContent := string(body) - - // Look for successful operations (grpc_code="OK") - foundOKStatus := false - - for _, line := range strings.Split(metricsContent, "\n") { - if strings.Contains(line, `grpc_code="OK"`) && !strings.HasPrefix(line, "#") { - // Parse value - parts := strings.Fields(line) - if len(parts) >= 2 { - value := parts[len(parts)-1] - if value != "0" && value != "0.0" { - foundOKStatus = true - - ginkgo.GinkgoWriter.Printf("Found successful request: %s\n", line) - - break - } - } - } - } - - gomega.Expect(foundOKStatus).To(gomega.BeTrue(), - "Expected to find successful (OK) requests from previous tests") - }) - }) - - ginkgo.Context("metrics labels and structure", func() { - var metricsContent string - - ginkgo.BeforeEach(func() { - resp, err := http.Get(metricsURL) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - metricsContent = string(body) - }) - - ginkgo.It("should include grpc_method label", func() { - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_method=`)) - }) - - ginkgo.It("should include grpc_service label", func() { - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_service=`)) - }) - - ginkgo.It("should include grpc_type label", func() { - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_type=`)) - - // Verify different RPC types are tracked - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_type="unary"`)) - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_type="bidi_stream"`)) - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_type="server_stream"`)) - }) - - ginkgo.It("should include grpc_code label for completed requests", func() { - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_code=`)) - - // Verify common status codes are tracked - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_code="OK"`)) - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_code="InvalidArgument"`)) - gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_code="NotFound"`)) - }) - }) - - ginkgo.Context("metrics validation", func() { - ginkgo.It("should report metrics for all registered services", func() { - resp, err := http.Get(metricsURL) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - metricsContent := string(body) - - // Expected services to be instrumented - expectedServices := []string{ - "agntcy.dir.store.v1.StoreService", - "agntcy.dir.routing.v1.RoutingService", - "agntcy.dir.events.v1.EventService", - "agntcy.dir.search.v1.SearchService", - "agntcy.dir.store.v1.SyncService", - "agntcy.dir.routing.v1.PublicationService", - "agntcy.dir.sign.v1.SignService", - "grpc.health.v1.Health", - } - - for _, service := range expectedServices { - gomega.Expect(metricsContent).To( - gomega.ContainSubstring(fmt.Sprintf(`grpc_service="%s"`, service)), - "Expected metrics for service: %s", service, - ) - } - }) - - ginkgo.It("should count method invocations from previous tests", func() { - resp, err := http.Get(metricsURL) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - metricsContent := string(body) - - // Count lines with actual metric values (not HELP or TYPE) - metricLines := 0 - - for _, line := range strings.Split(metricsContent, "\n") { - if !strings.HasPrefix(line, "#") && strings.TrimSpace(line) != "" { - metricLines++ - } - } - - gomega.Expect(metricLines).To(gomega.BeNumerically(">", 50), - "Expected at least 50 metric data lines, got %d", metricLines) - - ginkgo.GinkgoWriter.Printf("Total metric data lines: %d\n", metricLines) - }) - }) - - ginkgo.Context("integration with kubectl (optional - if ServiceMonitor enabled)", func() { - ginkgo.It("should be able to port-forward to metrics port", func() { - // This test verifies the Kubernetes service exposes the metrics port - // Note: We don't actually port-forward here (it's already done by Taskfile) - // We just verify the metrics are accessible via the existing port-forward - - resp, err := http.Get(metricsURL) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer resp.Body.Close() - - gomega.Expect(resp.StatusCode).To(gomega.Equal(http.StatusOK)) - - ginkgo.GinkgoWriter.Println("Metrics port is accessible via port-forward ✓") - }) - }) - - ginkgo.Context("metrics useful for monitoring", func() { - ginkgo.It("should provide data for request rate queries", func() { - resp, err := http.Get(metricsURL) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - metricsContent := string(body) - - // Verify we can identify busy methods - // Example: grpc_server_started_total{grpc_method="Push",grpc_service="...",grpc_type="bidi_stream"} 5 - hasStartedMetrics := strings.Contains(metricsContent, "grpc_server_started_total") - gomega.Expect(hasStartedMetrics).To(gomega.BeTrue(), - "Need grpc_server_started_total for rate(grpc_server_started_total[5m]) queries") - }) - - ginkgo.It("should provide data for error rate queries", func() { - resp, err := http.Get(metricsURL) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - metricsContent := string(body) - - // Verify we have status codes for error rate calculation - hasHandledMetrics := strings.Contains(metricsContent, "grpc_server_handled_total") - hasStatusCodes := strings.Contains(metricsContent, `grpc_code="OK"`) - - gomega.Expect(hasHandledMetrics).To(gomega.BeTrue()) - gomega.Expect(hasStatusCodes).To(gomega.BeTrue()) - - ginkgo.GinkgoWriter.Println("Metrics support error rate calculation: rate(grpc_server_handled_total{grpc_code!=\"OK\"}[5m]) / rate(grpc_server_handled_total[5m])") - }) - - ginkgo.It("should support latency percentile queries (histogram buckets)", func() { - resp, err := http.Get(metricsURL) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - metricsContent := string(body) - - // Note: grpc-prometheus v1.2.0 doesn't expose histogram by default - // Check if we have any timing metrics - // If histograms are missing, this is expected and we can add them in future iterations - - ginkgo.GinkgoWriter.Println("Checking for latency metrics (histograms)...") - - if strings.Contains(metricsContent, "grpc_server_handling_seconds") { - ginkgo.GinkgoWriter.Println("✓ Latency histogram metrics found") - - gomega.Expect(metricsContent).To(gomega.ContainSubstring("grpc_server_handling_seconds")) - } else { - ginkgo.GinkgoWriter.Println("ℹ Latency histograms not available in current grpc-prometheus version") - ginkgo.GinkgoWriter.Println(" This is expected with grpc-prometheus v1.2.0") - ginkgo.GinkgoWriter.Println(" To add histograms, we can use grpc-ecosystem/go-grpc-middleware/v2") - } - }) - }) - - ginkgo.Context("metrics data sanity checks", func() { - ginkgo.It("should parse as valid Prometheus metrics format", func() { - resp, err := http.Get(metricsURL) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - metricsContent := string(body) - - // Basic validation: each metric line should have format "metric_name{labels} value" - metricDataLines := 0 - invalidLines := []string{} - - for _, line := range strings.Split(metricsContent, "\n") { - // Skip comments and empty lines - if strings.HasPrefix(line, "#") || strings.TrimSpace(line) == "" { - continue - } - - metricDataLines++ - - // Validate format: should have '{' for labels and end with a number - if !strings.Contains(line, "{") { - invalidLines = append(invalidLines, line) - - continue - } - - // Check it ends with a number (simple validation) - parts := strings.Fields(line) - if len(parts) < 2 { - invalidLines = append(invalidLines, line) - } - } - - gomega.Expect(invalidLines).To(gomega.BeEmpty(), - "Found %d invalid metric lines: %v", len(invalidLines), invalidLines) - - ginkgo.GinkgoWriter.Printf("Validated %d metric data lines ✓\n", metricDataLines) - }) - - ginkgo.It("should not have negative metric values", func() { - resp, err := http.Get(metricsURL) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - metricsContent := string(body) - - // Check for negative values - negativeLines := []string{} - - for _, line := range strings.Split(metricsContent, "\n") { - // Skip comments - if strings.HasPrefix(line, "#") || strings.TrimSpace(line) == "" { - continue - } - - // Look for negative values - if strings.Contains(line, " -") { - negativeLines = append(negativeLines, line) - } - } - - gomega.Expect(negativeLines).To(gomega.BeEmpty(), - "Found metrics with negative values: %v", negativeLines) - }) - }) -}) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package local + +import ( + "fmt" + "io" + "net/http" + "strings" + + "github.com/agntcy/dir/e2e/shared/config" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +// Metrics E2E Tests +// +// Testing Strategy: +// - Verify Prometheus /metrics endpoint is accessible +// - Validate gRPC metrics are being collected +// - Check that metrics contain data from previous tests (01-06) +// +// Note: This test runs AFTER other tests (numbered 07), so metrics should +// already contain non-zero values from previous test operations. + +var _ = ginkgo.Describe("Prometheus Metrics", ginkgo.Serial, ginkgo.Label("metrics"), func() { + const metricsURL = "http://localhost:9090/metrics" + + ginkgo.BeforeEach(func() { + if cfg.DeploymentMode != config.DeploymentModeLocal { + ginkgo.Skip("Skipping test, not in local mode") + } + }) + + ginkgo.Context("metrics endpoint availability", func() { + ginkgo.It("should expose /metrics endpoint on port 9090", func() { + resp, err := http.Get(metricsURL) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer resp.Body.Close() + + gomega.Expect(resp.StatusCode).To(gomega.Equal(http.StatusOK)) + gomega.Expect(resp.Header.Get("Content-Type")).To(gomega.ContainSubstring("text/plain")) + }) + + ginkgo.It("should return Prometheus-formatted metrics", func() { + resp, err := http.Get(metricsURL) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + metrics := string(body) + + // Verify Prometheus format (HELP and TYPE comments) + gomega.Expect(metrics).To(gomega.ContainSubstring("# HELP")) + gomega.Expect(metrics).To(gomega.ContainSubstring("# TYPE")) + + // Verify metrics are not empty + gomega.Expect(len(metrics)).To(gomega.BeNumerically(">", 100), + "Expected metrics output to be substantial") + }) + }) + + ginkgo.Context("gRPC metrics collection", func() { + var metricsContent string + + ginkgo.BeforeEach(func() { + // Fetch metrics once for all tests in this context + resp, err := http.Get(metricsURL) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + metricsContent = string(body) + }) + + ginkgo.It("should expose grpc_server_started_total counter", func() { + gomega.Expect(metricsContent).To(gomega.ContainSubstring("grpc_server_started_total")) + gomega.Expect(metricsContent).To(gomega.ContainSubstring("# TYPE grpc_server_started_total counter")) + }) + + ginkgo.It("should expose grpc_server_handled_total counter", func() { + gomega.Expect(metricsContent).To(gomega.ContainSubstring("grpc_server_handled_total")) + gomega.Expect(metricsContent).To(gomega.ContainSubstring("# TYPE grpc_server_handled_total counter")) + }) + + ginkgo.It("should expose grpc_server_msg_received_total counter for streaming", func() { + gomega.Expect(metricsContent).To(gomega.ContainSubstring("grpc_server_msg_received_total")) + gomega.Expect(metricsContent).To(gomega.ContainSubstring("# TYPE grpc_server_msg_received_total counter")) + }) + + ginkgo.It("should expose grpc_server_msg_sent_total counter for streaming", func() { + gomega.Expect(metricsContent).To(gomega.ContainSubstring("grpc_server_msg_sent_total")) + gomega.Expect(metricsContent).To(gomega.ContainSubstring("# TYPE grpc_server_msg_sent_total counter")) + }) + + ginkgo.It("should include StoreService metrics", func() { + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_service="agntcy.dir.store.v1.StoreService"`)) + + // Verify key StoreService methods are instrumented + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_method="Push"`)) + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_method="Pull"`)) + }) + + ginkgo.It("should include RoutingService metrics", func() { + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_service="agntcy.dir.routing.v1.RoutingService"`)) + + // Verify key RoutingService methods are instrumented + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_method="Search"`)) + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_method="Publish"`)) + }) + + ginkgo.It("should include EventService metrics", func() { + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_service="agntcy.dir.events.v1.EventService"`)) + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_method="Listen"`)) + }) + + ginkgo.It("should include SearchService metrics", func() { + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_service="agntcy.dir.search.v1.SearchService"`)) + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_method="Search"`)) + }) + + ginkgo.It("should include Health service metrics", func() { + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_service="grpc.health.v1.Health"`)) + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_method="Check"`)) + }) + }) + + ginkgo.Context("metrics from previous tests", func() { + ginkgo.It("should have non-zero request counts from previous tests", func() { + resp, err := http.Get(metricsURL) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + metricsContent := string(body) + + // Parse metrics to find counters with non-zero values + // Previous tests (01-06) should have generated traffic + foundNonZero := false + + for _, line := range strings.Split(metricsContent, "\n") { + // Skip comments and empty lines + if strings.HasPrefix(line, "#") || strings.TrimSpace(line) == "" { + continue + } + + // Look for grpc_server_started_total or grpc_server_handled_total with values + if strings.Contains(line, "grpc_server_started_total") || + strings.Contains(line, "grpc_server_handled_total") { + // Parse value (last part after space) + parts := strings.Fields(line) + if len(parts) >= 2 { + value := parts[len(parts)-1] + if value != "0" && value != "0.0" { + foundNonZero = true + + ginkgo.GinkgoWriter.Printf("Found non-zero metric: %s\n", line) + + break + } + } + } + } + + gomega.Expect(foundNonZero).To(gomega.BeTrue(), + "Expected to find non-zero request metrics from previous tests (01-06)") + }) + + ginkgo.It("should have successful (OK) status codes from previous tests", func() { + resp, err := http.Get(metricsURL) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + metricsContent := string(body) + + // Look for successful operations (grpc_code="OK") + foundOKStatus := false + + for _, line := range strings.Split(metricsContent, "\n") { + if strings.Contains(line, `grpc_code="OK"`) && !strings.HasPrefix(line, "#") { + // Parse value + parts := strings.Fields(line) + if len(parts) >= 2 { + value := parts[len(parts)-1] + if value != "0" && value != "0.0" { + foundOKStatus = true + + ginkgo.GinkgoWriter.Printf("Found successful request: %s\n", line) + + break + } + } + } + } + + gomega.Expect(foundOKStatus).To(gomega.BeTrue(), + "Expected to find successful (OK) requests from previous tests") + }) + }) + + ginkgo.Context("metrics labels and structure", func() { + var metricsContent string + + ginkgo.BeforeEach(func() { + resp, err := http.Get(metricsURL) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + metricsContent = string(body) + }) + + ginkgo.It("should include grpc_method label", func() { + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_method=`)) + }) + + ginkgo.It("should include grpc_service label", func() { + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_service=`)) + }) + + ginkgo.It("should include grpc_type label", func() { + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_type=`)) + + // Verify different RPC types are tracked + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_type="unary"`)) + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_type="bidi_stream"`)) + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_type="server_stream"`)) + }) + + ginkgo.It("should include grpc_code label for completed requests", func() { + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_code=`)) + + // Verify common status codes are tracked + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_code="OK"`)) + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_code="InvalidArgument"`)) + gomega.Expect(metricsContent).To(gomega.ContainSubstring(`grpc_code="NotFound"`)) + }) + }) + + ginkgo.Context("metrics validation", func() { + ginkgo.It("should report metrics for all registered services", func() { + resp, err := http.Get(metricsURL) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + metricsContent := string(body) + + // Expected services to be instrumented + expectedServices := []string{ + "agntcy.dir.store.v1.StoreService", + "agntcy.dir.routing.v1.RoutingService", + "agntcy.dir.events.v1.EventService", + "agntcy.dir.search.v1.SearchService", + "agntcy.dir.store.v1.SyncService", + "agntcy.dir.routing.v1.PublicationService", + "agntcy.dir.sign.v1.SignService", + "grpc.health.v1.Health", + } + + for _, service := range expectedServices { + gomega.Expect(metricsContent).To( + gomega.ContainSubstring(fmt.Sprintf(`grpc_service="%s"`, service)), + "Expected metrics for service: %s", service, + ) + } + }) + + ginkgo.It("should count method invocations from previous tests", func() { + resp, err := http.Get(metricsURL) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + metricsContent := string(body) + + // Count lines with actual metric values (not HELP or TYPE) + metricLines := 0 + + for _, line := range strings.Split(metricsContent, "\n") { + if !strings.HasPrefix(line, "#") && strings.TrimSpace(line) != "" { + metricLines++ + } + } + + gomega.Expect(metricLines).To(gomega.BeNumerically(">", 50), + "Expected at least 50 metric data lines, got %d", metricLines) + + ginkgo.GinkgoWriter.Printf("Total metric data lines: %d\n", metricLines) + }) + }) + + ginkgo.Context("integration with kubectl (optional - if ServiceMonitor enabled)", func() { + ginkgo.It("should be able to port-forward to metrics port", func() { + // This test verifies the Kubernetes service exposes the metrics port + // Note: We don't actually port-forward here (it's already done by Taskfile) + // We just verify the metrics are accessible via the existing port-forward + + resp, err := http.Get(metricsURL) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer resp.Body.Close() + + gomega.Expect(resp.StatusCode).To(gomega.Equal(http.StatusOK)) + + ginkgo.GinkgoWriter.Println("Metrics port is accessible via port-forward ✓") + }) + }) + + ginkgo.Context("metrics useful for monitoring", func() { + ginkgo.It("should provide data for request rate queries", func() { + resp, err := http.Get(metricsURL) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + metricsContent := string(body) + + // Verify we can identify busy methods + // Example: grpc_server_started_total{grpc_method="Push",grpc_service="...",grpc_type="bidi_stream"} 5 + hasStartedMetrics := strings.Contains(metricsContent, "grpc_server_started_total") + gomega.Expect(hasStartedMetrics).To(gomega.BeTrue(), + "Need grpc_server_started_total for rate(grpc_server_started_total[5m]) queries") + }) + + ginkgo.It("should provide data for error rate queries", func() { + resp, err := http.Get(metricsURL) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + metricsContent := string(body) + + // Verify we have status codes for error rate calculation + hasHandledMetrics := strings.Contains(metricsContent, "grpc_server_handled_total") + hasStatusCodes := strings.Contains(metricsContent, `grpc_code="OK"`) + + gomega.Expect(hasHandledMetrics).To(gomega.BeTrue()) + gomega.Expect(hasStatusCodes).To(gomega.BeTrue()) + + ginkgo.GinkgoWriter.Println("Metrics support error rate calculation: rate(grpc_server_handled_total{grpc_code!=\"OK\"}[5m]) / rate(grpc_server_handled_total[5m])") + }) + + ginkgo.It("should support latency percentile queries (histogram buckets)", func() { + resp, err := http.Get(metricsURL) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + metricsContent := string(body) + + // Note: grpc-prometheus v1.2.0 doesn't expose histogram by default + // Check if we have any timing metrics + // If histograms are missing, this is expected and we can add them in future iterations + + ginkgo.GinkgoWriter.Println("Checking for latency metrics (histograms)...") + + if strings.Contains(metricsContent, "grpc_server_handling_seconds") { + ginkgo.GinkgoWriter.Println("✓ Latency histogram metrics found") + + gomega.Expect(metricsContent).To(gomega.ContainSubstring("grpc_server_handling_seconds")) + } else { + ginkgo.GinkgoWriter.Println("ℹ Latency histograms not available in current grpc-prometheus version") + ginkgo.GinkgoWriter.Println(" This is expected with grpc-prometheus v1.2.0") + ginkgo.GinkgoWriter.Println(" To add histograms, we can use grpc-ecosystem/go-grpc-middleware/v2") + } + }) + }) + + ginkgo.Context("metrics data sanity checks", func() { + ginkgo.It("should parse as valid Prometheus metrics format", func() { + resp, err := http.Get(metricsURL) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + metricsContent := string(body) + + // Basic validation: each metric line should have format "metric_name{labels} value" + metricDataLines := 0 + invalidLines := []string{} + + for _, line := range strings.Split(metricsContent, "\n") { + // Skip comments and empty lines + if strings.HasPrefix(line, "#") || strings.TrimSpace(line) == "" { + continue + } + + metricDataLines++ + + // Validate format: should have '{' for labels and end with a number + if !strings.Contains(line, "{") { + invalidLines = append(invalidLines, line) + + continue + } + + // Check it ends with a number (simple validation) + parts := strings.Fields(line) + if len(parts) < 2 { + invalidLines = append(invalidLines, line) + } + } + + gomega.Expect(invalidLines).To(gomega.BeEmpty(), + "Found %d invalid metric lines: %v", len(invalidLines), invalidLines) + + ginkgo.GinkgoWriter.Printf("Validated %d metric data lines ✓\n", metricDataLines) + }) + + ginkgo.It("should not have negative metric values", func() { + resp, err := http.Get(metricsURL) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + metricsContent := string(body) + + // Check for negative values + negativeLines := []string{} + + for _, line := range strings.Split(metricsContent, "\n") { + // Skip comments + if strings.HasPrefix(line, "#") || strings.TrimSpace(line) == "" { + continue + } + + // Look for negative values + if strings.Contains(line, " -") { + negativeLines = append(negativeLines, line) + } + } + + gomega.Expect(negativeLines).To(gomega.BeEmpty(), + "Found metrics with negative values: %v", negativeLines) + }) + }) +}) diff --git a/e2e/local/local_suite_test.go b/e2e/local/local_suite_test.go index 3cd263000..581c48bbe 100644 --- a/e2e/local/local_suite_test.go +++ b/e2e/local/local_suite_test.go @@ -1,29 +1,29 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package local - -import ( - "testing" - - "github.com/agntcy/dir/e2e/shared/config" - ginkgo "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -var cfg *config.Config - -func TestLocalE2E(t *testing.T) { - gomega.RegisterFailHandler(ginkgo.Fail) - - var err error - - cfg, err = config.LoadConfig() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - if cfg.DeploymentMode != config.DeploymentModeLocal { - t.Skip("Skipping local tests - not in local mode") - } - - ginkgo.RunSpecs(t, "Local E2E Test Suite") -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package local + +import ( + "testing" + + "github.com/agntcy/dir/e2e/shared/config" + ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +var cfg *config.Config + +func TestLocalE2E(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + + var err error + + cfg, err = config.LoadConfig() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if cfg.DeploymentMode != config.DeploymentModeLocal { + t.Skip("Skipping local tests - not in local mode") + } + + ginkgo.RunSpecs(t, "Local E2E Test Suite") +} diff --git a/e2e/mcp/01_protocol_test.go b/e2e/mcp/01_protocol_test.go index 81d817484..e557378f8 100644 --- a/e2e/mcp/01_protocol_test.go +++ b/e2e/mcp/01_protocol_test.go @@ -1,644 +1,644 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package mcp - -import ( - "bufio" - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "os/exec" - "path/filepath" - "time" - - "github.com/agntcy/dir/e2e/shared/testdata" - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -// MCPRequest represents a JSON-RPC 2.0 request. -type MCPRequest struct { - JSONRPC string `json:"jsonrpc"` - Method string `json:"method"` - Params interface{} `json:"params,omitempty"` - ID interface{} `json:"id"` -} - -// MCPResponse represents a JSON-RPC 2.0 response. -type MCPResponse struct { - JSONRPC string `json:"jsonrpc"` - ID interface{} `json:"id,omitempty"` - Result json.RawMessage `json:"result,omitempty"` - Error *MCPError `json:"error,omitempty"` -} - -// MCPError represents a JSON-RPC 2.0 error. -type MCPError struct { - Code int `json:"code"` - Message string `json:"message"` -} - -// MCPClient manages the MCP server process and communication. -type MCPClient struct { - cmd *exec.Cmd - stdin io.WriteCloser - stdout *bufio.Scanner - stderr *bufio.Scanner -} - -// NewMCPClient starts an MCP server and returns a client to communicate with it. -// The path parameter should be the directory containing the MCP server code. -func NewMCPClient(mcpDir string) (*MCPClient, error) { - cmd := exec.CommandContext(context.Background(), "go", "run", ".") - cmd.Dir = mcpDir - - stdin, err := cmd.StdinPipe() - if err != nil { - return nil, fmt.Errorf("failed to create stdin pipe: %w", err) - } - - stdout, err := cmd.StdoutPipe() - if err != nil { - return nil, fmt.Errorf("failed to create stdout pipe: %w", err) - } - - stderr, err := cmd.StderrPipe() - if err != nil { - return nil, fmt.Errorf("failed to create stderr pipe: %w", err) - } - - if err := cmd.Start(); err != nil { - return nil, fmt.Errorf("failed to start MCP server: %w", err) - } - - // Give the server a moment to start - time.Sleep(100 * time.Millisecond) - - // Create scanner with larger buffer for large responses (e.g., schema resources) - stdoutScanner := bufio.NewScanner(stdout) - - const maxTokenSize = 10 * 1024 * 1024 // 10MB - - buf := make([]byte, maxTokenSize) - stdoutScanner.Buffer(buf, maxTokenSize) - - return &MCPClient{ - cmd: cmd, - stdin: stdin, - stdout: stdoutScanner, - stderr: bufio.NewScanner(stderr), - }, nil -} - -// SendRequest sends a JSON-RPC request and returns the response. -func (c *MCPClient) SendRequest(req MCPRequest) (*MCPResponse, error) { - // Marshal request - reqBytes, err := json.Marshal(req) - if err != nil { - return nil, fmt.Errorf("failed to marshal request: %w", err) - } - - // Send request with newline - if _, err := c.stdin.Write(append(reqBytes, '\n')); err != nil { - return nil, fmt.Errorf("failed to write request: %w", err) - } - - // Read response - if !c.stdout.Scan() { - if err := c.stdout.Err(); err != nil { - return nil, fmt.Errorf("failed to read response: %w", err) - } - - return nil, errors.New("no response received") - } - - // Parse response - var resp MCPResponse - if err := json.Unmarshal(c.stdout.Bytes(), &resp); err != nil { - return nil, fmt.Errorf("failed to unmarshal response: %w", err) - } - - return &resp, nil -} - -// Close stops the MCP server and cleans up. -func (c *MCPClient) Close() error { - if c.stdin != nil { - _ = c.stdin.Close() - } - - if c.cmd != nil && c.cmd.Process != nil { - _ = c.cmd.Process.Kill() - _ = c.cmd.Wait() - } - - return nil -} - -// GetStderrOutput reads any stderr output from the server. -func (c *MCPClient) GetStderrOutput() string { - var buf bytes.Buffer - for c.stderr.Scan() { - buf.WriteString(c.stderr.Text()) - buf.WriteString("\n") - } - - return buf.String() -} - -// Helper function to get OASF schema and validate it. -func getSchemaAndValidate(client *MCPClient, version string, requestID int) { - req := MCPRequest{ - JSONRPC: "2.0", - Method: "tools/call", - Params: map[string]interface{}{ - "name": "agntcy_oasf_get_schema", - "arguments": map[string]interface{}{ - "version": version, - }, - }, - ID: requestID, - } - - resp, err := client.SendRequest(req) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(resp.Error).To(gomega.BeNil()) - - // Parse result - var result map[string]interface{} - - err = json.Unmarshal(resp.Result, &result) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - content, ok := result["content"].([]interface{}) - gomega.Expect(ok).To(gomega.BeTrue()) - gomega.Expect(content).To(gomega.HaveLen(1)) - - output, ok := content[0].(map[string]interface{}) - gomega.Expect(ok).To(gomega.BeTrue()) - gomega.Expect(output["type"]).To(gomega.Equal("text")) - - textOutput, ok := output["text"].(string) - gomega.Expect(ok).To(gomega.BeTrue()) - - var toolOutput map[string]interface{} - - err = json.Unmarshal([]byte(textOutput), &toolOutput) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - gomega.Expect(toolOutput["version"]).To(gomega.Equal(version)) - gomega.Expect(toolOutput["schema"]).NotTo(gomega.BeEmpty()) - - // Verify it's valid JSON - schemaStr, ok := toolOutput["schema"].(string) - gomega.Expect(ok).To(gomega.BeTrue()) - - var schema map[string]interface{} - - err = json.Unmarshal([]byte(schemaStr), &schema) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(schema).To(gomega.HaveKey("$defs")) -} - -// Helper function to validate a record and parse the output. -func validateRecordAndParseOutput(client *MCPClient, recordJSON string, requestID int) map[string]interface{} { - req := MCPRequest{ - JSONRPC: "2.0", - Method: "tools/call", - Params: map[string]interface{}{ - "name": "agntcy_oasf_validate_record", - "arguments": map[string]interface{}{ - "record_json": recordJSON, - }, - }, - ID: requestID, - } - - resp, err := client.SendRequest(req) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(resp.Error).To(gomega.BeNil()) - - var result map[string]interface{} - - err = json.Unmarshal(resp.Result, &result) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - content, ok := result["content"].([]interface{}) - gomega.Expect(ok).To(gomega.BeTrue()) - gomega.Expect(content).To(gomega.HaveLen(1)) - - output, ok := content[0].(map[string]interface{}) - gomega.Expect(ok).To(gomega.BeTrue()) - gomega.Expect(output["type"]).To(gomega.Equal("text")) - - textOutput, ok := output["text"].(string) - gomega.Expect(ok).To(gomega.BeTrue()) - - var toolOutput map[string]interface{} - - err = json.Unmarshal([]byte(textOutput), &toolOutput) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - return toolOutput -} - -var _ = ginkgo.Describe("MCP Server Protocol Tests", func() { - var client *MCPClient - var mcpDir string - - ginkgo.BeforeEach(func() { - // Get the MCP directory (relative to e2e/mcp) - repoRoot := filepath.Join("..", "..") - mcpDir = filepath.Join(repoRoot, "mcp") - - // Start MCP server using go run - var err error - client, err = NewMCPClient(mcpDir) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - - ginkgo.AfterEach(func() { - if client != nil { - client.Close() - } - }) - - ginkgo.Context("MCP Initialization", func() { - ginkgo.It("should successfully initialize with proper capabilities", func() { - req := MCPRequest{ - JSONRPC: "2.0", - Method: "initialize", - Params: map[string]interface{}{ - "protocolVersion": "2024-11-05", - "clientInfo": map[string]string{ - "name": "e2e-test-client", - "version": "1.0.0", - }, - "capabilities": map[string]interface{}{}, - }, - ID: 1, - } - - resp, err := client.SendRequest(req) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(resp.Error).To(gomega.BeNil()) - - // Parse result - var result map[string]interface{} - err = json.Unmarshal(resp.Result, &result) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Verify server info - serverInfo, ok := result["serverInfo"].(map[string]interface{}) - gomega.Expect(ok).To(gomega.BeTrue()) - gomega.Expect(serverInfo["name"]).To(gomega.Equal("dir-mcp-server")) - gomega.Expect(serverInfo["version"]).To(gomega.Equal("v0.1.0")) - - // Verify capabilities - capabilities, ok := result["capabilities"].(map[string]interface{}) - gomega.Expect(ok).To(gomega.BeTrue()) - gomega.Expect(capabilities).To(gomega.HaveKey("tools")) - - ginkgo.GinkgoWriter.Printf("Server initialized successfully: %s %s\n", - serverInfo["name"], serverInfo["version"]) - }) - - ginkgo.It("should send initialized notification", func() { - // First initialize - initReq := MCPRequest{ - JSONRPC: "2.0", - Method: "initialize", - Params: map[string]interface{}{ - "protocolVersion": "2024-11-05", - "clientInfo": map[string]string{ - "name": "e2e-test-client", - "version": "1.0.0", - }, - "capabilities": map[string]interface{}{}, - }, - ID: 1, - } - - resp, err := client.SendRequest(initReq) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(resp.Error).To(gomega.BeNil()) - - // Send initialized notification (no response expected) - notifReq := MCPRequest{ - JSONRPC: "2.0", - Method: "initialized", - Params: map[string]interface{}{}, - } - - notifBytes, err := json.Marshal(notifReq) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - _, err = client.stdin.Write(append(notifBytes, '\n')) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.GinkgoWriter.Println("Initialized notification sent successfully") - }) - }) - - ginkgo.Context("Tools Listing and Calling", func() { - ginkgo.BeforeEach(func() { - // Initialize session - initReq := MCPRequest{ - JSONRPC: "2.0", - Method: "initialize", - Params: map[string]interface{}{ - "protocolVersion": "2024-11-05", - "clientInfo": map[string]string{ - "name": "e2e-test-client", - "version": "1.0.0", - }, - "capabilities": map[string]interface{}{}, - }, - ID: 1, - } - - resp, err := client.SendRequest(initReq) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(resp.Error).To(gomega.BeNil()) - }) - - ginkgo.It("should list all available tools", func() { - req := MCPRequest{ - JSONRPC: "2.0", - Method: "tools/list", - Params: map[string]interface{}{}, - ID: 2, - } - - resp, err := client.SendRequest(req) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(resp.Error).To(gomega.BeNil()) - - // Parse result - var result map[string]interface{} - err = json.Unmarshal(resp.Result, &result) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - tools, ok := result["tools"].([]interface{}) - gomega.Expect(ok).To(gomega.BeTrue()) - gomega.Expect(tools).To(gomega.HaveLen(4)) - - // Verify tool names - toolNames := make(map[string]bool) - for _, tool := range tools { - t, ok := tool.(map[string]interface{}) - gomega.Expect(ok).To(gomega.BeTrue()) - - name, ok := t["name"].(string) - gomega.Expect(ok).To(gomega.BeTrue()) - - toolNames[name] = true - ginkgo.GinkgoWriter.Printf(" - %s: %s\n", t["name"], t["description"]) - } - - gomega.Expect(toolNames).To(gomega.HaveKey("agntcy_oasf_list_versions")) - gomega.Expect(toolNames).To(gomega.HaveKey("agntcy_oasf_get_schema")) - gomega.Expect(toolNames).To(gomega.HaveKey("agntcy_oasf_validate_record")) - gomega.Expect(toolNames).To(gomega.HaveKey("agntcy_dir_push_record")) - - ginkgo.GinkgoWriter.Println("All tools listed successfully") - }) - - ginkgo.It("should validate a valid 0.7.0 record", func() { - recordJSON := string(testdata.ExpectedRecordV070JSON) - toolOutput := validateRecordAndParseOutput(client, recordJSON, 4) - - gomega.Expect(toolOutput["valid"]).To(gomega.BeTrue()) - gomega.Expect(toolOutput["schema_version"]).To(gomega.Equal("0.7.0")) - - ginkgo.GinkgoWriter.Println("Record validated successfully") - }) - - ginkgo.It("should validate a valid 0.3.1 record", func() { - recordJSON := string(testdata.ExpectedRecordV031JSON) - toolOutput := validateRecordAndParseOutput(client, recordJSON, 5) - - gomega.Expect(toolOutput["valid"]).To(gomega.BeTrue()) - gomega.Expect(toolOutput["schema_version"]).To(gomega.Equal("0.3.1")) - - ginkgo.GinkgoWriter.Println("0.3.1 record validated successfully") - }) - - ginkgo.It("should validate a valid 0.8.0 record", func() { - recordJSON := string(testdata.ExpectedRecordV080JSON) - toolOutput := validateRecordAndParseOutput(client, recordJSON, 6) - - gomega.Expect(toolOutput["valid"]).To(gomega.BeTrue()) - gomega.Expect(toolOutput["schema_version"]).To(gomega.Equal("0.8.0")) - - ginkgo.GinkgoWriter.Println("0.8.0 record validated successfully") - }) - - ginkgo.It("should return validation errors for invalid record", func() { - invalidJSON := `{ - "name": "test-agent", - "version": "1.0.0", - "schema_version": "0.7.0", - "description": "Test", - "authors": ["Test"], - "created_at": "2025-01-01T00:00:00Z" - }` - - toolOutput := validateRecordAndParseOutput(client, invalidJSON, 7) - - gomega.Expect(toolOutput["valid"]).To(gomega.BeFalse()) - gomega.Expect(toolOutput["validation_errors"]).NotTo(gomega.BeEmpty()) - - errors, ok := toolOutput["validation_errors"].([]interface{}) - gomega.Expect(ok).To(gomega.BeTrue()) - ginkgo.GinkgoWriter.Printf("Validation errors returned: %v\n", errors) - }) - - ginkgo.It("should push a valid record to Directory server", func() { - recordJSON := string(testdata.ExpectedRecordV070JSON) - - req := MCPRequest{ - JSONRPC: "2.0", - Method: "tools/call", - Params: map[string]interface{}{ - "name": "agntcy_dir_push_record", - "arguments": map[string]interface{}{ - "record_json": recordJSON, - }, - }, - ID: 8, - } - - resp, err := client.SendRequest(req) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(resp.Error).To(gomega.BeNil()) - - var result map[string]interface{} - - err = json.Unmarshal(resp.Result, &result) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - content, ok := result["content"].([]interface{}) - gomega.Expect(ok).To(gomega.BeTrue()) - gomega.Expect(content).To(gomega.HaveLen(1)) - - output, ok := content[0].(map[string]interface{}) - gomega.Expect(ok).To(gomega.BeTrue()) - gomega.Expect(output["type"]).To(gomega.Equal("text")) - - textOutput, ok := output["text"].(string) - gomega.Expect(ok).To(gomega.BeTrue()) - - var toolOutput map[string]interface{} - - err = json.Unmarshal([]byte(textOutput), &toolOutput) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Check for errors first - if errorMsg, hasError := toolOutput["error_message"]; hasError && errorMsg != nil && errorMsg != "" { - ginkgo.GinkgoWriter.Printf("Tool returned error: %v\n", errorMsg) - gomega.Expect(errorMsg).To(gomega.BeEmpty(), "Push should succeed without errors") - } - - // Verify the push response - gomega.Expect(toolOutput["cid"]).NotTo(gomega.BeEmpty()) - gomega.Expect(toolOutput["server_address"]).NotTo(gomega.BeEmpty()) - - cid, ok := toolOutput["cid"].(string) - gomega.Expect(ok).To(gomega.BeTrue()) - gomega.Expect(cid).To(gomega.HavePrefix("ba")) // CIDv1 starts with 'ba' - gomega.Expect(len(cid)).To(gomega.BeNumerically(">", 10)) - - serverAddress, ok := toolOutput["server_address"].(string) - gomega.Expect(ok).To(gomega.BeTrue()) - gomega.Expect(serverAddress).To(gomega.Equal("0.0.0.0:8888")) - - ginkgo.GinkgoWriter.Printf("Record pushed successfully with CID: %s to server: %s\n", cid, serverAddress) - }) - }) - - ginkgo.Context("Schema Tools", func() { - ginkgo.BeforeEach(func() { - // Initialize session - initReq := MCPRequest{ - JSONRPC: "2.0", - Method: "initialize", - Params: map[string]interface{}{ - "protocolVersion": "2024-11-05", - "clientInfo": map[string]string{ - "name": "e2e-test-client", - "version": "1.0.0", - }, - "capabilities": map[string]interface{}{}, - }, - ID: 1, - } - - resp, err := client.SendRequest(initReq) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(resp.Error).To(gomega.BeNil()) - }) - - ginkgo.It("should list available schema versions", func() { - req := MCPRequest{ - JSONRPC: "2.0", - Method: "tools/call", - Params: map[string]interface{}{ - "name": "agntcy_oasf_list_versions", - "arguments": map[string]interface{}{}, - }, - ID: 2, - } - - resp, err := client.SendRequest(req) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(resp.Error).To(gomega.BeNil()) - - // Parse result - var result map[string]interface{} - err = json.Unmarshal(resp.Result, &result) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - content, ok := result["content"].([]interface{}) - gomega.Expect(ok).To(gomega.BeTrue()) - gomega.Expect(content).To(gomega.HaveLen(1)) - - output, ok := content[0].(map[string]interface{}) - gomega.Expect(ok).To(gomega.BeTrue()) - gomega.Expect(output["type"]).To(gomega.Equal("text")) - - textOutput, ok := output["text"].(string) - gomega.Expect(ok).To(gomega.BeTrue()) - - var toolOutput map[string]interface{} - err = json.Unmarshal([]byte(textOutput), &toolOutput) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - availableVersions, ok := toolOutput["available_versions"].([]interface{}) - gomega.Expect(ok).To(gomega.BeTrue()) - gomega.Expect(availableVersions).To(gomega.ContainElement("0.3.1")) - gomega.Expect(availableVersions).To(gomega.ContainElement("0.7.0")) - - count, ok := toolOutput["count"].(float64) - gomega.Expect(ok).To(gomega.BeTrue()) - gomega.Expect(count).To(gomega.BeNumerically(">=", 2)) - - ginkgo.GinkgoWriter.Printf("Available versions: %v (count: %v)\n", availableVersions, count) - }) - - ginkgo.It("should get OASF 0.7.0 schema", func() { - getSchemaAndValidate(client, "0.7.0", 3) - ginkgo.GinkgoWriter.Println("OASF 0.7.0 schema retrieved successfully") - }) - - ginkgo.It("should get OASF 0.3.1 schema", func() { - getSchemaAndValidate(client, "0.3.1", 4) - ginkgo.GinkgoWriter.Println("OASF 0.3.1 schema retrieved successfully") - }) - - ginkgo.It("should return error for invalid schema version", func() { - req := MCPRequest{ - JSONRPC: "2.0", - Method: "tools/call", - Params: map[string]interface{}{ - "name": "agntcy_oasf_get_schema", - "arguments": map[string]interface{}{ - "version": "999.999.999", - }, - }, - ID: 5, - } - - resp, err := client.SendRequest(req) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(resp.Error).To(gomega.BeNil()) - - // Parse result - var result map[string]interface{} - err = json.Unmarshal(resp.Result, &result) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - content, ok := result["content"].([]interface{}) - gomega.Expect(ok).To(gomega.BeTrue()) - gomega.Expect(content).To(gomega.HaveLen(1)) - - output, ok := content[0].(map[string]interface{}) - gomega.Expect(ok).To(gomega.BeTrue()) - - textOutput, ok := output["text"].(string) - gomega.Expect(ok).To(gomega.BeTrue()) - - var toolOutput map[string]interface{} - err = json.Unmarshal([]byte(textOutput), &toolOutput) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - gomega.Expect(toolOutput["error_message"]).NotTo(gomega.BeEmpty()) - gomega.Expect(toolOutput["available_versions"]).NotTo(gomega.BeEmpty()) - - ginkgo.GinkgoWriter.Printf("Error message: %v\n", toolOutput["error_message"]) - }) - }) -}) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package mcp + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os/exec" + "path/filepath" + "time" + + "github.com/agntcy/dir/e2e/shared/testdata" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +// MCPRequest represents a JSON-RPC 2.0 request. +type MCPRequest struct { + JSONRPC string `json:"jsonrpc"` + Method string `json:"method"` + Params interface{} `json:"params,omitempty"` + ID interface{} `json:"id"` +} + +// MCPResponse represents a JSON-RPC 2.0 response. +type MCPResponse struct { + JSONRPC string `json:"jsonrpc"` + ID interface{} `json:"id,omitempty"` + Result json.RawMessage `json:"result,omitempty"` + Error *MCPError `json:"error,omitempty"` +} + +// MCPError represents a JSON-RPC 2.0 error. +type MCPError struct { + Code int `json:"code"` + Message string `json:"message"` +} + +// MCPClient manages the MCP server process and communication. +type MCPClient struct { + cmd *exec.Cmd + stdin io.WriteCloser + stdout *bufio.Scanner + stderr *bufio.Scanner +} + +// NewMCPClient starts an MCP server and returns a client to communicate with it. +// The path parameter should be the directory containing the MCP server code. +func NewMCPClient(mcpDir string) (*MCPClient, error) { + cmd := exec.CommandContext(context.Background(), "go", "run", ".") + cmd.Dir = mcpDir + + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, fmt.Errorf("failed to create stdin pipe: %w", err) + } + + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("failed to create stdout pipe: %w", err) + } + + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, fmt.Errorf("failed to create stderr pipe: %w", err) + } + + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("failed to start MCP server: %w", err) + } + + // Give the server a moment to start + time.Sleep(100 * time.Millisecond) + + // Create scanner with larger buffer for large responses (e.g., schema resources) + stdoutScanner := bufio.NewScanner(stdout) + + const maxTokenSize = 10 * 1024 * 1024 // 10MB + + buf := make([]byte, maxTokenSize) + stdoutScanner.Buffer(buf, maxTokenSize) + + return &MCPClient{ + cmd: cmd, + stdin: stdin, + stdout: stdoutScanner, + stderr: bufio.NewScanner(stderr), + }, nil +} + +// SendRequest sends a JSON-RPC request and returns the response. +func (c *MCPClient) SendRequest(req MCPRequest) (*MCPResponse, error) { + // Marshal request + reqBytes, err := json.Marshal(req) + if err != nil { + return nil, fmt.Errorf("failed to marshal request: %w", err) + } + + // Send request with newline + if _, err := c.stdin.Write(append(reqBytes, '\n')); err != nil { + return nil, fmt.Errorf("failed to write request: %w", err) + } + + // Read response + if !c.stdout.Scan() { + if err := c.stdout.Err(); err != nil { + return nil, fmt.Errorf("failed to read response: %w", err) + } + + return nil, errors.New("no response received") + } + + // Parse response + var resp MCPResponse + if err := json.Unmarshal(c.stdout.Bytes(), &resp); err != nil { + return nil, fmt.Errorf("failed to unmarshal response: %w", err) + } + + return &resp, nil +} + +// Close stops the MCP server and cleans up. +func (c *MCPClient) Close() error { + if c.stdin != nil { + _ = c.stdin.Close() + } + + if c.cmd != nil && c.cmd.Process != nil { + _ = c.cmd.Process.Kill() + _ = c.cmd.Wait() + } + + return nil +} + +// GetStderrOutput reads any stderr output from the server. +func (c *MCPClient) GetStderrOutput() string { + var buf bytes.Buffer + for c.stderr.Scan() { + buf.WriteString(c.stderr.Text()) + buf.WriteString("\n") + } + + return buf.String() +} + +// Helper function to get OASF schema and validate it. +func getSchemaAndValidate(client *MCPClient, version string, requestID int) { + req := MCPRequest{ + JSONRPC: "2.0", + Method: "tools/call", + Params: map[string]interface{}{ + "name": "agntcy_oasf_get_schema", + "arguments": map[string]interface{}{ + "version": version, + }, + }, + ID: requestID, + } + + resp, err := client.SendRequest(req) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(resp.Error).To(gomega.BeNil()) + + // Parse result + var result map[string]interface{} + + err = json.Unmarshal(resp.Result, &result) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + content, ok := result["content"].([]interface{}) + gomega.Expect(ok).To(gomega.BeTrue()) + gomega.Expect(content).To(gomega.HaveLen(1)) + + output, ok := content[0].(map[string]interface{}) + gomega.Expect(ok).To(gomega.BeTrue()) + gomega.Expect(output["type"]).To(gomega.Equal("text")) + + textOutput, ok := output["text"].(string) + gomega.Expect(ok).To(gomega.BeTrue()) + + var toolOutput map[string]interface{} + + err = json.Unmarshal([]byte(textOutput), &toolOutput) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + gomega.Expect(toolOutput["version"]).To(gomega.Equal(version)) + gomega.Expect(toolOutput["schema"]).NotTo(gomega.BeEmpty()) + + // Verify it's valid JSON + schemaStr, ok := toolOutput["schema"].(string) + gomega.Expect(ok).To(gomega.BeTrue()) + + var schema map[string]interface{} + + err = json.Unmarshal([]byte(schemaStr), &schema) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(schema).To(gomega.HaveKey("$defs")) +} + +// Helper function to validate a record and parse the output. +func validateRecordAndParseOutput(client *MCPClient, recordJSON string, requestID int) map[string]interface{} { + req := MCPRequest{ + JSONRPC: "2.0", + Method: "tools/call", + Params: map[string]interface{}{ + "name": "agntcy_oasf_validate_record", + "arguments": map[string]interface{}{ + "record_json": recordJSON, + }, + }, + ID: requestID, + } + + resp, err := client.SendRequest(req) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(resp.Error).To(gomega.BeNil()) + + var result map[string]interface{} + + err = json.Unmarshal(resp.Result, &result) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + content, ok := result["content"].([]interface{}) + gomega.Expect(ok).To(gomega.BeTrue()) + gomega.Expect(content).To(gomega.HaveLen(1)) + + output, ok := content[0].(map[string]interface{}) + gomega.Expect(ok).To(gomega.BeTrue()) + gomega.Expect(output["type"]).To(gomega.Equal("text")) + + textOutput, ok := output["text"].(string) + gomega.Expect(ok).To(gomega.BeTrue()) + + var toolOutput map[string]interface{} + + err = json.Unmarshal([]byte(textOutput), &toolOutput) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + return toolOutput +} + +var _ = ginkgo.Describe("MCP Server Protocol Tests", func() { + var client *MCPClient + var mcpDir string + + ginkgo.BeforeEach(func() { + // Get the MCP directory (relative to e2e/mcp) + repoRoot := filepath.Join("..", "..") + mcpDir = filepath.Join(repoRoot, "mcp") + + // Start MCP server using go run + var err error + client, err = NewMCPClient(mcpDir) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + ginkgo.AfterEach(func() { + if client != nil { + client.Close() + } + }) + + ginkgo.Context("MCP Initialization", func() { + ginkgo.It("should successfully initialize with proper capabilities", func() { + req := MCPRequest{ + JSONRPC: "2.0", + Method: "initialize", + Params: map[string]interface{}{ + "protocolVersion": "2024-11-05", + "clientInfo": map[string]string{ + "name": "e2e-test-client", + "version": "1.0.0", + }, + "capabilities": map[string]interface{}{}, + }, + ID: 1, + } + + resp, err := client.SendRequest(req) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(resp.Error).To(gomega.BeNil()) + + // Parse result + var result map[string]interface{} + err = json.Unmarshal(resp.Result, &result) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Verify server info + serverInfo, ok := result["serverInfo"].(map[string]interface{}) + gomega.Expect(ok).To(gomega.BeTrue()) + gomega.Expect(serverInfo["name"]).To(gomega.Equal("dir-mcp-server")) + gomega.Expect(serverInfo["version"]).To(gomega.Equal("v0.1.0")) + + // Verify capabilities + capabilities, ok := result["capabilities"].(map[string]interface{}) + gomega.Expect(ok).To(gomega.BeTrue()) + gomega.Expect(capabilities).To(gomega.HaveKey("tools")) + + ginkgo.GinkgoWriter.Printf("Server initialized successfully: %s %s\n", + serverInfo["name"], serverInfo["version"]) + }) + + ginkgo.It("should send initialized notification", func() { + // First initialize + initReq := MCPRequest{ + JSONRPC: "2.0", + Method: "initialize", + Params: map[string]interface{}{ + "protocolVersion": "2024-11-05", + "clientInfo": map[string]string{ + "name": "e2e-test-client", + "version": "1.0.0", + }, + "capabilities": map[string]interface{}{}, + }, + ID: 1, + } + + resp, err := client.SendRequest(initReq) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(resp.Error).To(gomega.BeNil()) + + // Send initialized notification (no response expected) + notifReq := MCPRequest{ + JSONRPC: "2.0", + Method: "initialized", + Params: map[string]interface{}{}, + } + + notifBytes, err := json.Marshal(notifReq) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + _, err = client.stdin.Write(append(notifBytes, '\n')) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.GinkgoWriter.Println("Initialized notification sent successfully") + }) + }) + + ginkgo.Context("Tools Listing and Calling", func() { + ginkgo.BeforeEach(func() { + // Initialize session + initReq := MCPRequest{ + JSONRPC: "2.0", + Method: "initialize", + Params: map[string]interface{}{ + "protocolVersion": "2024-11-05", + "clientInfo": map[string]string{ + "name": "e2e-test-client", + "version": "1.0.0", + }, + "capabilities": map[string]interface{}{}, + }, + ID: 1, + } + + resp, err := client.SendRequest(initReq) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(resp.Error).To(gomega.BeNil()) + }) + + ginkgo.It("should list all available tools", func() { + req := MCPRequest{ + JSONRPC: "2.0", + Method: "tools/list", + Params: map[string]interface{}{}, + ID: 2, + } + + resp, err := client.SendRequest(req) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(resp.Error).To(gomega.BeNil()) + + // Parse result + var result map[string]interface{} + err = json.Unmarshal(resp.Result, &result) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + tools, ok := result["tools"].([]interface{}) + gomega.Expect(ok).To(gomega.BeTrue()) + gomega.Expect(tools).To(gomega.HaveLen(4)) + + // Verify tool names + toolNames := make(map[string]bool) + for _, tool := range tools { + t, ok := tool.(map[string]interface{}) + gomega.Expect(ok).To(gomega.BeTrue()) + + name, ok := t["name"].(string) + gomega.Expect(ok).To(gomega.BeTrue()) + + toolNames[name] = true + ginkgo.GinkgoWriter.Printf(" - %s: %s\n", t["name"], t["description"]) + } + + gomega.Expect(toolNames).To(gomega.HaveKey("agntcy_oasf_list_versions")) + gomega.Expect(toolNames).To(gomega.HaveKey("agntcy_oasf_get_schema")) + gomega.Expect(toolNames).To(gomega.HaveKey("agntcy_oasf_validate_record")) + gomega.Expect(toolNames).To(gomega.HaveKey("agntcy_dir_push_record")) + + ginkgo.GinkgoWriter.Println("All tools listed successfully") + }) + + ginkgo.It("should validate a valid 0.7.0 record", func() { + recordJSON := string(testdata.ExpectedRecordV070JSON) + toolOutput := validateRecordAndParseOutput(client, recordJSON, 4) + + gomega.Expect(toolOutput["valid"]).To(gomega.BeTrue()) + gomega.Expect(toolOutput["schema_version"]).To(gomega.Equal("0.7.0")) + + ginkgo.GinkgoWriter.Println("Record validated successfully") + }) + + ginkgo.It("should validate a valid 0.3.1 record", func() { + recordJSON := string(testdata.ExpectedRecordV031JSON) + toolOutput := validateRecordAndParseOutput(client, recordJSON, 5) + + gomega.Expect(toolOutput["valid"]).To(gomega.BeTrue()) + gomega.Expect(toolOutput["schema_version"]).To(gomega.Equal("0.3.1")) + + ginkgo.GinkgoWriter.Println("0.3.1 record validated successfully") + }) + + ginkgo.It("should validate a valid 0.8.0 record", func() { + recordJSON := string(testdata.ExpectedRecordV080JSON) + toolOutput := validateRecordAndParseOutput(client, recordJSON, 6) + + gomega.Expect(toolOutput["valid"]).To(gomega.BeTrue()) + gomega.Expect(toolOutput["schema_version"]).To(gomega.Equal("0.8.0")) + + ginkgo.GinkgoWriter.Println("0.8.0 record validated successfully") + }) + + ginkgo.It("should return validation errors for invalid record", func() { + invalidJSON := `{ + "name": "test-agent", + "version": "1.0.0", + "schema_version": "0.7.0", + "description": "Test", + "authors": ["Test"], + "created_at": "2025-01-01T00:00:00Z" + }` + + toolOutput := validateRecordAndParseOutput(client, invalidJSON, 7) + + gomega.Expect(toolOutput["valid"]).To(gomega.BeFalse()) + gomega.Expect(toolOutput["validation_errors"]).NotTo(gomega.BeEmpty()) + + errors, ok := toolOutput["validation_errors"].([]interface{}) + gomega.Expect(ok).To(gomega.BeTrue()) + ginkgo.GinkgoWriter.Printf("Validation errors returned: %v\n", errors) + }) + + ginkgo.It("should push a valid record to Directory server", func() { + recordJSON := string(testdata.ExpectedRecordV070JSON) + + req := MCPRequest{ + JSONRPC: "2.0", + Method: "tools/call", + Params: map[string]interface{}{ + "name": "agntcy_dir_push_record", + "arguments": map[string]interface{}{ + "record_json": recordJSON, + }, + }, + ID: 8, + } + + resp, err := client.SendRequest(req) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(resp.Error).To(gomega.BeNil()) + + var result map[string]interface{} + + err = json.Unmarshal(resp.Result, &result) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + content, ok := result["content"].([]interface{}) + gomega.Expect(ok).To(gomega.BeTrue()) + gomega.Expect(content).To(gomega.HaveLen(1)) + + output, ok := content[0].(map[string]interface{}) + gomega.Expect(ok).To(gomega.BeTrue()) + gomega.Expect(output["type"]).To(gomega.Equal("text")) + + textOutput, ok := output["text"].(string) + gomega.Expect(ok).To(gomega.BeTrue()) + + var toolOutput map[string]interface{} + + err = json.Unmarshal([]byte(textOutput), &toolOutput) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Check for errors first + if errorMsg, hasError := toolOutput["error_message"]; hasError && errorMsg != nil && errorMsg != "" { + ginkgo.GinkgoWriter.Printf("Tool returned error: %v\n", errorMsg) + gomega.Expect(errorMsg).To(gomega.BeEmpty(), "Push should succeed without errors") + } + + // Verify the push response + gomega.Expect(toolOutput["cid"]).NotTo(gomega.BeEmpty()) + gomega.Expect(toolOutput["server_address"]).NotTo(gomega.BeEmpty()) + + cid, ok := toolOutput["cid"].(string) + gomega.Expect(ok).To(gomega.BeTrue()) + gomega.Expect(cid).To(gomega.HavePrefix("ba")) // CIDv1 starts with 'ba' + gomega.Expect(len(cid)).To(gomega.BeNumerically(">", 10)) + + serverAddress, ok := toolOutput["server_address"].(string) + gomega.Expect(ok).To(gomega.BeTrue()) + gomega.Expect(serverAddress).To(gomega.Equal("0.0.0.0:8888")) + + ginkgo.GinkgoWriter.Printf("Record pushed successfully with CID: %s to server: %s\n", cid, serverAddress) + }) + }) + + ginkgo.Context("Schema Tools", func() { + ginkgo.BeforeEach(func() { + // Initialize session + initReq := MCPRequest{ + JSONRPC: "2.0", + Method: "initialize", + Params: map[string]interface{}{ + "protocolVersion": "2024-11-05", + "clientInfo": map[string]string{ + "name": "e2e-test-client", + "version": "1.0.0", + }, + "capabilities": map[string]interface{}{}, + }, + ID: 1, + } + + resp, err := client.SendRequest(initReq) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(resp.Error).To(gomega.BeNil()) + }) + + ginkgo.It("should list available schema versions", func() { + req := MCPRequest{ + JSONRPC: "2.0", + Method: "tools/call", + Params: map[string]interface{}{ + "name": "agntcy_oasf_list_versions", + "arguments": map[string]interface{}{}, + }, + ID: 2, + } + + resp, err := client.SendRequest(req) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(resp.Error).To(gomega.BeNil()) + + // Parse result + var result map[string]interface{} + err = json.Unmarshal(resp.Result, &result) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + content, ok := result["content"].([]interface{}) + gomega.Expect(ok).To(gomega.BeTrue()) + gomega.Expect(content).To(gomega.HaveLen(1)) + + output, ok := content[0].(map[string]interface{}) + gomega.Expect(ok).To(gomega.BeTrue()) + gomega.Expect(output["type"]).To(gomega.Equal("text")) + + textOutput, ok := output["text"].(string) + gomega.Expect(ok).To(gomega.BeTrue()) + + var toolOutput map[string]interface{} + err = json.Unmarshal([]byte(textOutput), &toolOutput) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + availableVersions, ok := toolOutput["available_versions"].([]interface{}) + gomega.Expect(ok).To(gomega.BeTrue()) + gomega.Expect(availableVersions).To(gomega.ContainElement("0.3.1")) + gomega.Expect(availableVersions).To(gomega.ContainElement("0.7.0")) + + count, ok := toolOutput["count"].(float64) + gomega.Expect(ok).To(gomega.BeTrue()) + gomega.Expect(count).To(gomega.BeNumerically(">=", 2)) + + ginkgo.GinkgoWriter.Printf("Available versions: %v (count: %v)\n", availableVersions, count) + }) + + ginkgo.It("should get OASF 0.7.0 schema", func() { + getSchemaAndValidate(client, "0.7.0", 3) + ginkgo.GinkgoWriter.Println("OASF 0.7.0 schema retrieved successfully") + }) + + ginkgo.It("should get OASF 0.3.1 schema", func() { + getSchemaAndValidate(client, "0.3.1", 4) + ginkgo.GinkgoWriter.Println("OASF 0.3.1 schema retrieved successfully") + }) + + ginkgo.It("should return error for invalid schema version", func() { + req := MCPRequest{ + JSONRPC: "2.0", + Method: "tools/call", + Params: map[string]interface{}{ + "name": "agntcy_oasf_get_schema", + "arguments": map[string]interface{}{ + "version": "999.999.999", + }, + }, + ID: 5, + } + + resp, err := client.SendRequest(req) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(resp.Error).To(gomega.BeNil()) + + // Parse result + var result map[string]interface{} + err = json.Unmarshal(resp.Result, &result) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + content, ok := result["content"].([]interface{}) + gomega.Expect(ok).To(gomega.BeTrue()) + gomega.Expect(content).To(gomega.HaveLen(1)) + + output, ok := content[0].(map[string]interface{}) + gomega.Expect(ok).To(gomega.BeTrue()) + + textOutput, ok := output["text"].(string) + gomega.Expect(ok).To(gomega.BeTrue()) + + var toolOutput map[string]interface{} + err = json.Unmarshal([]byte(textOutput), &toolOutput) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + gomega.Expect(toolOutput["error_message"]).NotTo(gomega.BeEmpty()) + gomega.Expect(toolOutput["available_versions"]).NotTo(gomega.BeEmpty()) + + ginkgo.GinkgoWriter.Printf("Error message: %v\n", toolOutput["error_message"]) + }) + }) +}) diff --git a/e2e/mcp/mcp_suite_test.go b/e2e/mcp/mcp_suite_test.go index adeb51288..66418ea46 100644 --- a/e2e/mcp/mcp_suite_test.go +++ b/e2e/mcp/mcp_suite_test.go @@ -1,16 +1,16 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package mcp - -import ( - "testing" - - ginkgo "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -func TestMCPE2E(t *testing.T) { - gomega.RegisterFailHandler(ginkgo.Fail) - ginkgo.RunSpecs(t, "MCP E2E Test Suite") -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package mcp + +import ( + "testing" + + ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +func TestMCPE2E(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(t, "MCP E2E Test Suite") +} diff --git a/e2e/network/01_deploy_test.go b/e2e/network/01_deploy_test.go index 0ee3010ef..a2a4feb56 100644 --- a/e2e/network/01_deploy_test.go +++ b/e2e/network/01_deploy_test.go @@ -1,165 +1,165 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package network - -import ( - "os" - "path/filepath" - "time" - - "github.com/agntcy/dir/e2e/shared/config" - "github.com/agntcy/dir/e2e/shared/testdata" - "github.com/agntcy/dir/e2e/shared/utils" - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -// Using peer addresses from utils.constants - -// Package-level variables for cleanup (accessible by AfterSuite) -// CIDs are now tracked in network_suite_test.go - -var _ = ginkgo.Describe("Running dirctl end-to-end tests using a network multi peer deployment", ginkgo.Ordered, func() { - var cli *utils.CLI - var cid string - - // Setup temp record file - tempDir := os.Getenv("E2E_COMPILE_OUTPUT_DIR") - if tempDir == "" { - tempDir = os.TempDir() - } - tempPath := filepath.Join(tempDir, "record_070_network_test.json") - - // Create directory and write record data - _ = os.MkdirAll(filepath.Dir(tempPath), 0o755) - _ = os.WriteFile(tempPath, testdata.ExpectedRecordV070JSON, 0o600) - - ginkgo.BeforeEach(func() { - if cfg.DeploymentMode != config.DeploymentModeNetwork { - ginkgo.Skip("Skipping test, not in network mode") - } - - // Initialize CLI helper - cli = utils.NewCLI() - }) - - ginkgo.It("should push record_070.json to peer 1", func() { - cid = cli.Push(tempPath).WithArgs("--output", "raw").OnServer(utils.Peer1Addr).ShouldSucceed() - - // Track CID for cleanup - RegisterCIDForCleanup(cid, "deploy") - - // Validate that the returned CID correctly represents the pushed data - utils.LoadAndValidateCID(cid, tempPath) - }) - - ginkgo.It("should pull record_070.json from peer 1", func() { - cli.Pull(cid).OnServer(utils.Peer1Addr).ShouldSucceed() - }) - - ginkgo.It("should fail to pull record_070.json from peer 2", func() { - _ = cli.Pull(cid).OnServer(utils.Peer2Addr).ShouldFail() - }) - - ginkgo.It("should publish record_070.json to the network on peer 1", func() { - cli.Routing().Publish(cid).OnServer(utils.Peer1Addr).ShouldSucceed() - - // Wait at least 10 seconds to ensure the record is published. - time.Sleep(15 * time.Second) - }) - - ginkgo.It("should fail publish record_070.json to the network on peer 2 that does not store the record", func() { - _ = cli.Routing().Publish(cid).OnServer(utils.Peer2Addr).ShouldFail() - }) - - ginkgo.It("should list local records correctly (List is local-only)", func() { - // Reset CLI state to ensure clean test environment - utils.ResetCLIState() - - // Test that List only returns records on the peer that published them - // Peer1 published the record, so it should find it locally - output := cli.Routing().List().WithCid(cid).OnServer(utils.Peer1Addr).ShouldSucceed() - - // Should find the local record - gomega.Expect(output).To(gomega.ContainSubstring(cid)) - gomega.Expect(output).To(gomega.ContainSubstring("Local records")) - - // Reset CLI state before testing Peer2 - utils.ResetCLIState() - - // Peer2 did NOT publish the record, so List should not find it locally - // (even though it might be available via DHT/network) - output2 := cli.Routing().List().WithCid(cid).OnServer(utils.Peer2Addr).ShouldSucceed() - - // Should NOT find the record locally on Peer2 - gomega.Expect(output2).To(gomega.ContainSubstring("No local records found")) - }) - - ginkgo.It("should list by skill correctly on local vs remote peers", func() { - // Reset CLI state to ensure clean test environment - utils.ResetCLIState() - - // Test Peer1 (published the record) - should find it locally - output1 := cli.Routing().List().WithSkill("natural_language_processing").OnServer(utils.Peer1Addr).ShouldSucceed() - - // Should find the local record with expected labels - gomega.Expect(output1).To(gomega.ContainSubstring(cid)) - gomega.Expect(output1).To(gomega.ContainSubstring("Local records")) - gomega.Expect(output1).To(gomega.ContainSubstring("/skills/natural_language_processing/natural_language_generation/text_completion")) - gomega.Expect(output1).To(gomega.ContainSubstring("/skills/natural_language_processing/analytical_reasoning/problem_solving")) - - // Reset CLI state again before testing Peer2 - utils.ResetCLIState() - - // Test Peer2 (did NOT publish the record) - should not find it locally - output2 := cli.Routing().List().WithSkill("natural_language_processing").OnServer(utils.Peer2Addr).ShouldSucceed() - - // Should NOT find the record locally, but should show helpful message - gomega.Expect(output2).NotTo(gomega.ContainSubstring(cid)) - // Note: If no local records match, CLI might show empty results or no records message - }) - - ginkgo.It("should show routing info statistics", func() { - // Reset CLI state to ensure clean test environment - utils.ResetCLIState() - - // Test routing info on Peer1 (has published records) - output1 := cli.Routing().Info().OnServer(utils.Peer1Addr).ShouldSucceed() - - // Should show local routing statistics - gomega.Expect(output1).To(gomega.ContainSubstring("Local Routing Summary")) - gomega.Expect(output1).To(gomega.ContainSubstring("Total Records:")) - gomega.Expect(output1).To(gomega.ContainSubstring("Skills Distribution")) - - // Reset CLI state before testing Peer2 - utils.ResetCLIState() - - // Test routing info on Peer2 (no published records) - output2 := cli.Routing().Info().OnServer(utils.Peer2Addr).ShouldSucceed() - - // Should show empty statistics or no records message - gomega.Expect(output2).To(gomega.ContainSubstring("Local Routing Summary")) - // Peer2 might have 0 records or show "No local records found" - }) - - ginkgo.It("should discover remote records via routing search", func() { - // Reset CLI state to ensure clean test environment - utils.ResetCLIState() - - // Test routing search from Peer2 to discover records published by Peer1 - // This tests whether DHT propagation is working in the e2e environment - output := cli.Routing().Search(). - WithSkill("natural_language_processing"). - WithLimit(10). - OnServer(utils.Peer2Addr).ShouldEventuallyContain(cid, 60*time.Second) - - ginkgo.GinkgoWriter.Printf("=== DHT DISCOVERY TEST OUTPUT ===\n%s", output) - - // CLEANUP: This is the last test in this Describe block - // Clean up deploy test records to ensure isolation from subsequent test files - ginkgo.DeferCleanup(func() { - CleanupNetworkRecords(deployTestCIDs, "deploy tests") - }) - }) -}) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package network + +import ( + "os" + "path/filepath" + "time" + + "github.com/agntcy/dir/e2e/shared/config" + "github.com/agntcy/dir/e2e/shared/testdata" + "github.com/agntcy/dir/e2e/shared/utils" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +// Using peer addresses from utils.constants + +// Package-level variables for cleanup (accessible by AfterSuite) +// CIDs are now tracked in network_suite_test.go + +var _ = ginkgo.Describe("Running dirctl end-to-end tests using a network multi peer deployment", ginkgo.Ordered, func() { + var cli *utils.CLI + var cid string + + // Setup temp record file + tempDir := os.Getenv("E2E_COMPILE_OUTPUT_DIR") + if tempDir == "" { + tempDir = os.TempDir() + } + tempPath := filepath.Join(tempDir, "record_070_network_test.json") + + // Create directory and write record data + _ = os.MkdirAll(filepath.Dir(tempPath), 0o755) + _ = os.WriteFile(tempPath, testdata.ExpectedRecordV070JSON, 0o600) + + ginkgo.BeforeEach(func() { + if cfg.DeploymentMode != config.DeploymentModeNetwork { + ginkgo.Skip("Skipping test, not in network mode") + } + + // Initialize CLI helper + cli = utils.NewCLI() + }) + + ginkgo.It("should push record_070.json to peer 1", func() { + cid = cli.Push(tempPath).WithArgs("--output", "raw").OnServer(utils.Peer1Addr).ShouldSucceed() + + // Track CID for cleanup + RegisterCIDForCleanup(cid, "deploy") + + // Validate that the returned CID correctly represents the pushed data + utils.LoadAndValidateCID(cid, tempPath) + }) + + ginkgo.It("should pull record_070.json from peer 1", func() { + cli.Pull(cid).OnServer(utils.Peer1Addr).ShouldSucceed() + }) + + ginkgo.It("should fail to pull record_070.json from peer 2", func() { + _ = cli.Pull(cid).OnServer(utils.Peer2Addr).ShouldFail() + }) + + ginkgo.It("should publish record_070.json to the network on peer 1", func() { + cli.Routing().Publish(cid).OnServer(utils.Peer1Addr).ShouldSucceed() + + // Wait at least 10 seconds to ensure the record is published. + time.Sleep(15 * time.Second) + }) + + ginkgo.It("should fail publish record_070.json to the network on peer 2 that does not store the record", func() { + _ = cli.Routing().Publish(cid).OnServer(utils.Peer2Addr).ShouldFail() + }) + + ginkgo.It("should list local records correctly (List is local-only)", func() { + // Reset CLI state to ensure clean test environment + utils.ResetCLIState() + + // Test that List only returns records on the peer that published them + // Peer1 published the record, so it should find it locally + output := cli.Routing().List().WithCid(cid).OnServer(utils.Peer1Addr).ShouldSucceed() + + // Should find the local record + gomega.Expect(output).To(gomega.ContainSubstring(cid)) + gomega.Expect(output).To(gomega.ContainSubstring("Local records")) + + // Reset CLI state before testing Peer2 + utils.ResetCLIState() + + // Peer2 did NOT publish the record, so List should not find it locally + // (even though it might be available via DHT/network) + output2 := cli.Routing().List().WithCid(cid).OnServer(utils.Peer2Addr).ShouldSucceed() + + // Should NOT find the record locally on Peer2 + gomega.Expect(output2).To(gomega.ContainSubstring("No local records found")) + }) + + ginkgo.It("should list by skill correctly on local vs remote peers", func() { + // Reset CLI state to ensure clean test environment + utils.ResetCLIState() + + // Test Peer1 (published the record) - should find it locally + output1 := cli.Routing().List().WithSkill("natural_language_processing").OnServer(utils.Peer1Addr).ShouldSucceed() + + // Should find the local record with expected labels + gomega.Expect(output1).To(gomega.ContainSubstring(cid)) + gomega.Expect(output1).To(gomega.ContainSubstring("Local records")) + gomega.Expect(output1).To(gomega.ContainSubstring("/skills/natural_language_processing/natural_language_generation/text_completion")) + gomega.Expect(output1).To(gomega.ContainSubstring("/skills/natural_language_processing/analytical_reasoning/problem_solving")) + + // Reset CLI state again before testing Peer2 + utils.ResetCLIState() + + // Test Peer2 (did NOT publish the record) - should not find it locally + output2 := cli.Routing().List().WithSkill("natural_language_processing").OnServer(utils.Peer2Addr).ShouldSucceed() + + // Should NOT find the record locally, but should show helpful message + gomega.Expect(output2).NotTo(gomega.ContainSubstring(cid)) + // Note: If no local records match, CLI might show empty results or no records message + }) + + ginkgo.It("should show routing info statistics", func() { + // Reset CLI state to ensure clean test environment + utils.ResetCLIState() + + // Test routing info on Peer1 (has published records) + output1 := cli.Routing().Info().OnServer(utils.Peer1Addr).ShouldSucceed() + + // Should show local routing statistics + gomega.Expect(output1).To(gomega.ContainSubstring("Local Routing Summary")) + gomega.Expect(output1).To(gomega.ContainSubstring("Total Records:")) + gomega.Expect(output1).To(gomega.ContainSubstring("Skills Distribution")) + + // Reset CLI state before testing Peer2 + utils.ResetCLIState() + + // Test routing info on Peer2 (no published records) + output2 := cli.Routing().Info().OnServer(utils.Peer2Addr).ShouldSucceed() + + // Should show empty statistics or no records message + gomega.Expect(output2).To(gomega.ContainSubstring("Local Routing Summary")) + // Peer2 might have 0 records or show "No local records found" + }) + + ginkgo.It("should discover remote records via routing search", func() { + // Reset CLI state to ensure clean test environment + utils.ResetCLIState() + + // Test routing search from Peer2 to discover records published by Peer1 + // This tests whether DHT propagation is working in the e2e environment + output := cli.Routing().Search(). + WithSkill("natural_language_processing"). + WithLimit(10). + OnServer(utils.Peer2Addr).ShouldEventuallyContain(cid, 60*time.Second) + + ginkgo.GinkgoWriter.Printf("=== DHT DISCOVERY TEST OUTPUT ===\n%s", output) + + // CLEANUP: This is the last test in this Describe block + // Clean up deploy test records to ensure isolation from subsequent test files + ginkgo.DeferCleanup(func() { + CleanupNetworkRecords(deployTestCIDs, "deploy tests") + }) + }) +}) diff --git a/e2e/network/02_sync_test.go b/e2e/network/02_sync_test.go index 22cb31b62..3ee33a9bc 100644 --- a/e2e/network/02_sync_test.go +++ b/e2e/network/02_sync_test.go @@ -1,291 +1,291 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package network - -import ( - "os" - "path/filepath" - "strings" - "time" - - "github.com/agntcy/dir/e2e/shared/config" - "github.com/agntcy/dir/e2e/shared/testdata" - "github.com/agntcy/dir/e2e/shared/utils" - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -// Using peer addresses from utils.constants - -// Package-level variables for cleanup (accessible by AfterSuite) -// CIDs are now tracked in network_suite_test.go - -var _ = ginkgo.Describe("Running dirctl end-to-end tests for sync commands", func() { - var cli *utils.CLI - var syncID string - var privateKeyPath string - var tempKeyDir string - - // Setup temp files for CLI commands (CLI needs actual files on disk) - tempDir := os.Getenv("E2E_COMPILE_OUTPUT_DIR") - if tempDir == "" { - tempDir = os.TempDir() - } - recordV4Path := filepath.Join(tempDir, "record_070_sync_v4_test.json") - recordV5Path := filepath.Join(tempDir, "record_070_sync_v5_test.json") - - // Create directory and write record data - _ = os.MkdirAll(filepath.Dir(recordV4Path), 0o755) - _ = os.WriteFile(recordV4Path, testdata.ExpectedRecordV070SyncV4JSON, 0o600) - _ = os.WriteFile(recordV5Path, testdata.ExpectedRecordV070SyncV5JSON, 0o600) - - ginkgo.BeforeEach(func() { - if cfg.DeploymentMode != config.DeploymentModeNetwork { - ginkgo.Skip("Skipping test, not in network mode") - } - - utils.ResetCLIState() - - // Initialize CLI helper - cli = utils.NewCLI() - }) - - ginkgo.Context("create command", func() { - ginkgo.It("should accept valid remote URL format", func() { - output := cli.Sync().Create("https://directory.example.com").OnServer(utils.Peer1Addr).ShouldSucceed() - - gomega.Expect(output).To(gomega.ContainSubstring("Sync created with ID: ")) - syncID = strings.TrimPrefix(output, "Sync created with ID: ") - }) - }) - - ginkgo.Context("list command", func() { - ginkgo.It("should execute without arguments and return a list with the created sync", func() { - output := cli.Sync().List().OnServer(utils.Peer1Addr).ShouldSucceed() - - gomega.Expect(output).To(gomega.ContainSubstring(syncID)) - gomega.Expect(output).To(gomega.ContainSubstring("https://directory.example.com")) - }) - }) - - ginkgo.Context("status command", func() { - ginkgo.It("should accept a sync ID argument and return the sync status", func() { - output := cli.Sync().Status(syncID).OnServer(utils.Peer1Addr).ShouldSucceed() - - gomega.Expect(output).To(gomega.ContainSubstring("PENDING")) - }) - }) - - ginkgo.Context("delete command", func() { - ginkgo.It("should accept a sync ID argument and delete the sync", func() { - // Command may fail due to network/auth issues, but argument parsing should work - _, err := cli.Sync().Delete(syncID).OnServer(utils.Peer1Addr).Execute() - if err != nil { - gomega.Expect(err.Error()).NotTo(gomega.ContainSubstring("required")) - } - }) - - ginkgo.It("should return deleted status", func() { - cli.Sync().Status(syncID).OnServer(utils.Peer1Addr).ShouldContain("DELETE") - }) - }) - - ginkgo.Context("sync functionality", ginkgo.Ordered, func() { - var cid string - var cidV5 string - - // Setup cosign key pair for signing tests - ginkgo.BeforeAll(func() { - var err error - tempKeyDir, err = os.MkdirTemp("", "sync-test-keys") - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Generate cosign key pair - utils.GenerateCosignKeyPair(tempKeyDir) - privateKeyPath = filepath.Join(tempKeyDir, "cosign.key") - - // Verify key file was created - gomega.Expect(privateKeyPath).To(gomega.BeAnExistingFile()) - - // Set cosign password for signing - err = os.Setenv("COSIGN_PASSWORD", utils.TestPassword) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - - // Cleanup cosign keys after all tests - ginkgo.AfterAll(func() { - os.Unsetenv("COSIGN_PASSWORD") - if tempKeyDir != "" { - err := os.RemoveAll(tempKeyDir) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - }) - - ginkgo.It("should push record_070_sync_v4.json to peer 1", func() { - cid = cli.Push(recordV4Path).WithArgs("--output", "raw").OnServer(utils.Peer1Addr).ShouldSucceed() - - // Track CID for cleanup - RegisterCIDForCleanup(cid, "sync") - - // Validate that the returned CID correctly represents the pushed data - utils.LoadAndValidateCID(cid, recordV4Path) - - // Sign the record - output := cli.Sign(cid, privateKeyPath).OnServer(utils.Peer1Addr).ShouldSucceed() - ginkgo.GinkgoWriter.Printf("Sign output: %s", output) - }) - - ginkgo.It("should publish record_070_sync_v4.json", func() { - cli.Routing().Publish(cid).OnServer(utils.Peer1Addr).ShouldSucceed() - }) - - ginkgo.It("should push record_070_sync_v5.json to peer 1", func() { - cidV5 = cli.Push(recordV5Path).WithArgs("--output", "raw").OnServer(utils.Peer1Addr).ShouldSucceed() - - // Track CID for cleanup - RegisterCIDForCleanup(cidV5, "sync") - - // Validate that the returned CID correctly represents the pushed data - utils.LoadAndValidateCID(cidV5, recordV5Path) - - // Sign the record - output := cli.Sign(cidV5, privateKeyPath).OnServer(utils.Peer1Addr).ShouldSucceed() - ginkgo.GinkgoWriter.Printf("Sign output: %s", output) - }) - - ginkgo.It("should publish record_070_sync_v5.json", func() { - cli.Routing().Publish(cidV5).OnServer(utils.Peer1Addr).ShouldSucceed() - }) - - ginkgo.It("should fail to pull record_070_sync_v4.json from peer 2", func() { - _ = cli.Pull(cid).OnServer(utils.Peer2Addr).ShouldFail() - }) - - ginkgo.It("should create sync from peer 1 to peer 2", func() { - output := cli.Sync().Create(utils.Peer1InternalAddr).OnServer(utils.Peer2Addr).ShouldSucceed() - - gomega.Expect(output).To(gomega.ContainSubstring("Sync created with ID: ")) - syncID = strings.TrimPrefix(output, "Sync created with ID: ") - }) - - ginkgo.It("should list the sync", func() { - output := cli.Sync().List().OnServer(utils.Peer2Addr).ShouldSucceed() - - gomega.Expect(output).To(gomega.ContainSubstring(syncID)) - gomega.Expect(output).To(gomega.ContainSubstring(utils.Peer1InternalAddr)) - }) - - // Wait for sync to complete - ginkgo.It("should wait for sync to complete", func() { - // Poll sync status until it changes from PENDING to IN_PROGRESS - output := cli.Sync().Status(syncID).OnServer(utils.Peer2Addr).ShouldEventuallyContain("IN_PROGRESS", 120*time.Second) - ginkgo.GinkgoWriter.Printf("Current sync status: %s", output) - - // Wait for 60 seconds to ensure the sync is complete (reduce flakiness) - time.Sleep(60 * time.Second) - }) - - ginkgo.It("should succeed to pull record_070_sync_v4.json from peer 2 after sync", func() { - output := cli.Pull(cid).WithArgs("--output", "json").OnServer(utils.Peer2Addr).ShouldSucceed() - - // Compare the output with the expected JSON - equal, err := utils.CompareOASFRecords([]byte(output), testdata.ExpectedRecordV070SyncV4JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(equal).To(gomega.BeTrue()) - }) - - ginkgo.It("should succeed to search for record_070_sync_v4.json from peer 2 after sync", func() { - // Search should eventually return the cid in peer 2 (retry until monitor indexes the record) - output := cli.Search().WithName("directory.agntcy.org/cisco/marketing-strategy-v4").OnServer(utils.Peer2Addr).ShouldEventuallyContain(cid, 240*time.Second) - - ginkgo.GinkgoWriter.Printf("Search found cid: %s", output) - }) - - ginkgo.It("should verify the record_070_sync_v4.json from peer 2 after sync", func() { - output := cli.Verify(cid).OnServer(utils.Peer2Addr).ShouldSucceed() - - // Verify the output - gomega.Expect(output).To(gomega.ContainSubstring("Record signature is: trusted")) - }) - - // Delete sync from peer 2 - ginkgo.It("should delete sync from peer 2", func() { - cli.Sync().Delete(syncID).OnServer(utils.Peer2Addr).ShouldSucceed() - }) - - // Wait for sync to complete - ginkgo.It("should wait for delete to complete", func() { - // Poll sync status until it changes from DELETE_PENDING to DELETED - output := cli.Sync().Status(syncID).OnServer(utils.Peer2Addr).ShouldEventuallyContain("DELETED", 120*time.Second) - ginkgo.GinkgoWriter.Printf("Current sync status: %s", output) - }) - - ginkgo.It("should create sync from peer 1 to peer 3 using routing search piped to sync create", func() { - ginkgo.GinkgoWriter.Printf("Verifying initial state - peer 3 should not have any records\n") - _ = cli.Pull(cid).OnServer(utils.Peer3Addr).ShouldFail() // v4 (NLP) should not exist - _ = cli.Pull(cidV5).OnServer(utils.Peer3Addr).ShouldFail() // v5 (Audio) should not exist - - ginkgo.GinkgoWriter.Printf("Running routing search for 'audio' skill\n") - searchOutput := cli.Routing().Search().WithArgs("--skill", "audio").WithArgs("--output", "json").OnServer(utils.Peer3Addr).ShouldSucceed() - - ginkgo.GinkgoWriter.Printf("Routing search output: %s\n", searchOutput) - gomega.Expect(searchOutput).To(gomega.ContainSubstring(cidV5)) - - ginkgo.GinkgoWriter.Printf("Creating sync by tag with 'audio' search output\n") - output := cli.Sync().CreateFromStdin(searchOutput).OnServer(utils.Peer3Addr).ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring("Sync IDs created:")) - - // Extract sync ID using simple string methods - // Find the quoted UUID in the output - start := strings.Index(output, `[`) - end := strings.LastIndex(output, `]`) - gomega.Expect(start).To(gomega.BeNumerically(">", -1), "Expected to find opening quote") - gomega.Expect(end).To(gomega.BeNumerically(">", start), "Expected to find closing quote") - syncID = output[start+1 : end] - - ginkgo.GinkgoWriter.Printf("Sync ID: %s", syncID) - }) - - // Wait for sync to complete - ginkgo.It("should wait for sync to complete", func() { - _ = cli.Sync().Status(syncID).OnServer(utils.Peer3Addr).ShouldEventuallyContain("IN_PROGRESS", 120*time.Second) - - // Wait for 60 seconds to ensure the sync is complete (reduce flakiness) - time.Sleep(60 * time.Second) - }) - - ginkgo.It("should succeed to pull record_070_sync_v5.json from peer 3 after sync", func() { - output := cli.Pull(cidV5).WithArgs("--output", "json").OnServer(utils.Peer3Addr).ShouldSucceed() - - // Compare the output with the expected JSON - equal, err := utils.CompareOASFRecords([]byte(output), testdata.ExpectedRecordV070SyncV5JSON) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(equal).To(gomega.BeTrue()) - }) - - ginkgo.It("should succeed to search for record_070_sync_v5.json from peer 3 after sync", func() { - // Search should eventually return the cid in peer 2 (retry until monitor indexes the record) - output := cli.Search().WithName("directory.agntcy.org/cisco/marketing-strategy-v5").OnServer(utils.Peer3Addr).ShouldEventuallyContain(cidV5, 240*time.Second) - - ginkgo.GinkgoWriter.Printf("Search found cid: %s", output) - }) - - ginkgo.It("should verify the record_070_sync_v5.json from peer 3 after sync", func() { - output := cli.Verify(cidV5).OnServer(utils.Peer3Addr).ShouldSucceed() - - // Verify the output - gomega.Expect(output).To(gomega.ContainSubstring("Record signature is: trusted")) - }) - - ginkgo.It("should fail to pull record_070_sync_v4.json from peer 3 after sync", func() { - _ = cli.Pull(cid).OnServer(utils.Peer3Addr).ShouldFail() - - // CLEANUP: This is the last test in the sync functionality Context - // Clean up sync test records to ensure isolation from subsequent test files - ginkgo.DeferCleanup(func() { - CleanupNetworkRecords(syncTestCIDs, "sync tests") - }) - }) - }) -}) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package network + +import ( + "os" + "path/filepath" + "strings" + "time" + + "github.com/agntcy/dir/e2e/shared/config" + "github.com/agntcy/dir/e2e/shared/testdata" + "github.com/agntcy/dir/e2e/shared/utils" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +// Using peer addresses from utils.constants + +// Package-level variables for cleanup (accessible by AfterSuite) +// CIDs are now tracked in network_suite_test.go + +var _ = ginkgo.Describe("Running dirctl end-to-end tests for sync commands", func() { + var cli *utils.CLI + var syncID string + var privateKeyPath string + var tempKeyDir string + + // Setup temp files for CLI commands (CLI needs actual files on disk) + tempDir := os.Getenv("E2E_COMPILE_OUTPUT_DIR") + if tempDir == "" { + tempDir = os.TempDir() + } + recordV4Path := filepath.Join(tempDir, "record_070_sync_v4_test.json") + recordV5Path := filepath.Join(tempDir, "record_070_sync_v5_test.json") + + // Create directory and write record data + _ = os.MkdirAll(filepath.Dir(recordV4Path), 0o755) + _ = os.WriteFile(recordV4Path, testdata.ExpectedRecordV070SyncV4JSON, 0o600) + _ = os.WriteFile(recordV5Path, testdata.ExpectedRecordV070SyncV5JSON, 0o600) + + ginkgo.BeforeEach(func() { + if cfg.DeploymentMode != config.DeploymentModeNetwork { + ginkgo.Skip("Skipping test, not in network mode") + } + + utils.ResetCLIState() + + // Initialize CLI helper + cli = utils.NewCLI() + }) + + ginkgo.Context("create command", func() { + ginkgo.It("should accept valid remote URL format", func() { + output := cli.Sync().Create("https://directory.example.com").OnServer(utils.Peer1Addr).ShouldSucceed() + + gomega.Expect(output).To(gomega.ContainSubstring("Sync created with ID: ")) + syncID = strings.TrimPrefix(output, "Sync created with ID: ") + }) + }) + + ginkgo.Context("list command", func() { + ginkgo.It("should execute without arguments and return a list with the created sync", func() { + output := cli.Sync().List().OnServer(utils.Peer1Addr).ShouldSucceed() + + gomega.Expect(output).To(gomega.ContainSubstring(syncID)) + gomega.Expect(output).To(gomega.ContainSubstring("https://directory.example.com")) + }) + }) + + ginkgo.Context("status command", func() { + ginkgo.It("should accept a sync ID argument and return the sync status", func() { + output := cli.Sync().Status(syncID).OnServer(utils.Peer1Addr).ShouldSucceed() + + gomega.Expect(output).To(gomega.ContainSubstring("PENDING")) + }) + }) + + ginkgo.Context("delete command", func() { + ginkgo.It("should accept a sync ID argument and delete the sync", func() { + // Command may fail due to network/auth issues, but argument parsing should work + _, err := cli.Sync().Delete(syncID).OnServer(utils.Peer1Addr).Execute() + if err != nil { + gomega.Expect(err.Error()).NotTo(gomega.ContainSubstring("required")) + } + }) + + ginkgo.It("should return deleted status", func() { + cli.Sync().Status(syncID).OnServer(utils.Peer1Addr).ShouldContain("DELETE") + }) + }) + + ginkgo.Context("sync functionality", ginkgo.Ordered, func() { + var cid string + var cidV5 string + + // Setup cosign key pair for signing tests + ginkgo.BeforeAll(func() { + var err error + tempKeyDir, err = os.MkdirTemp("", "sync-test-keys") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Generate cosign key pair + utils.GenerateCosignKeyPair(tempKeyDir) + privateKeyPath = filepath.Join(tempKeyDir, "cosign.key") + + // Verify key file was created + gomega.Expect(privateKeyPath).To(gomega.BeAnExistingFile()) + + // Set cosign password for signing + err = os.Setenv("COSIGN_PASSWORD", utils.TestPassword) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + // Cleanup cosign keys after all tests + ginkgo.AfterAll(func() { + os.Unsetenv("COSIGN_PASSWORD") + if tempKeyDir != "" { + err := os.RemoveAll(tempKeyDir) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }) + + ginkgo.It("should push record_070_sync_v4.json to peer 1", func() { + cid = cli.Push(recordV4Path).WithArgs("--output", "raw").OnServer(utils.Peer1Addr).ShouldSucceed() + + // Track CID for cleanup + RegisterCIDForCleanup(cid, "sync") + + // Validate that the returned CID correctly represents the pushed data + utils.LoadAndValidateCID(cid, recordV4Path) + + // Sign the record + output := cli.Sign(cid, privateKeyPath).OnServer(utils.Peer1Addr).ShouldSucceed() + ginkgo.GinkgoWriter.Printf("Sign output: %s", output) + }) + + ginkgo.It("should publish record_070_sync_v4.json", func() { + cli.Routing().Publish(cid).OnServer(utils.Peer1Addr).ShouldSucceed() + }) + + ginkgo.It("should push record_070_sync_v5.json to peer 1", func() { + cidV5 = cli.Push(recordV5Path).WithArgs("--output", "raw").OnServer(utils.Peer1Addr).ShouldSucceed() + + // Track CID for cleanup + RegisterCIDForCleanup(cidV5, "sync") + + // Validate that the returned CID correctly represents the pushed data + utils.LoadAndValidateCID(cidV5, recordV5Path) + + // Sign the record + output := cli.Sign(cidV5, privateKeyPath).OnServer(utils.Peer1Addr).ShouldSucceed() + ginkgo.GinkgoWriter.Printf("Sign output: %s", output) + }) + + ginkgo.It("should publish record_070_sync_v5.json", func() { + cli.Routing().Publish(cidV5).OnServer(utils.Peer1Addr).ShouldSucceed() + }) + + ginkgo.It("should fail to pull record_070_sync_v4.json from peer 2", func() { + _ = cli.Pull(cid).OnServer(utils.Peer2Addr).ShouldFail() + }) + + ginkgo.It("should create sync from peer 1 to peer 2", func() { + output := cli.Sync().Create(utils.Peer1InternalAddr).OnServer(utils.Peer2Addr).ShouldSucceed() + + gomega.Expect(output).To(gomega.ContainSubstring("Sync created with ID: ")) + syncID = strings.TrimPrefix(output, "Sync created with ID: ") + }) + + ginkgo.It("should list the sync", func() { + output := cli.Sync().List().OnServer(utils.Peer2Addr).ShouldSucceed() + + gomega.Expect(output).To(gomega.ContainSubstring(syncID)) + gomega.Expect(output).To(gomega.ContainSubstring(utils.Peer1InternalAddr)) + }) + + // Wait for sync to complete + ginkgo.It("should wait for sync to complete", func() { + // Poll sync status until it changes from PENDING to IN_PROGRESS + output := cli.Sync().Status(syncID).OnServer(utils.Peer2Addr).ShouldEventuallyContain("IN_PROGRESS", 120*time.Second) + ginkgo.GinkgoWriter.Printf("Current sync status: %s", output) + + // Wait for 60 seconds to ensure the sync is complete (reduce flakiness) + time.Sleep(60 * time.Second) + }) + + ginkgo.It("should succeed to pull record_070_sync_v4.json from peer 2 after sync", func() { + output := cli.Pull(cid).WithArgs("--output", "json").OnServer(utils.Peer2Addr).ShouldSucceed() + + // Compare the output with the expected JSON + equal, err := utils.CompareOASFRecords([]byte(output), testdata.ExpectedRecordV070SyncV4JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(equal).To(gomega.BeTrue()) + }) + + ginkgo.It("should succeed to search for record_070_sync_v4.json from peer 2 after sync", func() { + // Search should eventually return the cid in peer 2 (retry until monitor indexes the record) + output := cli.Search().WithName("directory.agntcy.org/cisco/marketing-strategy-v4").OnServer(utils.Peer2Addr).ShouldEventuallyContain(cid, 240*time.Second) + + ginkgo.GinkgoWriter.Printf("Search found cid: %s", output) + }) + + ginkgo.It("should verify the record_070_sync_v4.json from peer 2 after sync", func() { + output := cli.Verify(cid).OnServer(utils.Peer2Addr).ShouldSucceed() + + // Verify the output + gomega.Expect(output).To(gomega.ContainSubstring("Record signature is: trusted")) + }) + + // Delete sync from peer 2 + ginkgo.It("should delete sync from peer 2", func() { + cli.Sync().Delete(syncID).OnServer(utils.Peer2Addr).ShouldSucceed() + }) + + // Wait for sync to complete + ginkgo.It("should wait for delete to complete", func() { + // Poll sync status until it changes from DELETE_PENDING to DELETED + output := cli.Sync().Status(syncID).OnServer(utils.Peer2Addr).ShouldEventuallyContain("DELETED", 120*time.Second) + ginkgo.GinkgoWriter.Printf("Current sync status: %s", output) + }) + + ginkgo.It("should create sync from peer 1 to peer 3 using routing search piped to sync create", func() { + ginkgo.GinkgoWriter.Printf("Verifying initial state - peer 3 should not have any records\n") + _ = cli.Pull(cid).OnServer(utils.Peer3Addr).ShouldFail() // v4 (NLP) should not exist + _ = cli.Pull(cidV5).OnServer(utils.Peer3Addr).ShouldFail() // v5 (Audio) should not exist + + ginkgo.GinkgoWriter.Printf("Running routing search for 'audio' skill\n") + searchOutput := cli.Routing().Search().WithArgs("--skill", "audio").WithArgs("--output", "json").OnServer(utils.Peer3Addr).ShouldSucceed() + + ginkgo.GinkgoWriter.Printf("Routing search output: %s\n", searchOutput) + gomega.Expect(searchOutput).To(gomega.ContainSubstring(cidV5)) + + ginkgo.GinkgoWriter.Printf("Creating sync by tag with 'audio' search output\n") + output := cli.Sync().CreateFromStdin(searchOutput).OnServer(utils.Peer3Addr).ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring("Sync IDs created:")) + + // Extract sync ID using simple string methods + // Find the quoted UUID in the output + start := strings.Index(output, `[`) + end := strings.LastIndex(output, `]`) + gomega.Expect(start).To(gomega.BeNumerically(">", -1), "Expected to find opening quote") + gomega.Expect(end).To(gomega.BeNumerically(">", start), "Expected to find closing quote") + syncID = output[start+1 : end] + + ginkgo.GinkgoWriter.Printf("Sync ID: %s", syncID) + }) + + // Wait for sync to complete + ginkgo.It("should wait for sync to complete", func() { + _ = cli.Sync().Status(syncID).OnServer(utils.Peer3Addr).ShouldEventuallyContain("IN_PROGRESS", 120*time.Second) + + // Wait for 60 seconds to ensure the sync is complete (reduce flakiness) + time.Sleep(60 * time.Second) + }) + + ginkgo.It("should succeed to pull record_070_sync_v5.json from peer 3 after sync", func() { + output := cli.Pull(cidV5).WithArgs("--output", "json").OnServer(utils.Peer3Addr).ShouldSucceed() + + // Compare the output with the expected JSON + equal, err := utils.CompareOASFRecords([]byte(output), testdata.ExpectedRecordV070SyncV5JSON) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(equal).To(gomega.BeTrue()) + }) + + ginkgo.It("should succeed to search for record_070_sync_v5.json from peer 3 after sync", func() { + // Search should eventually return the cid in peer 2 (retry until monitor indexes the record) + output := cli.Search().WithName("directory.agntcy.org/cisco/marketing-strategy-v5").OnServer(utils.Peer3Addr).ShouldEventuallyContain(cidV5, 240*time.Second) + + ginkgo.GinkgoWriter.Printf("Search found cid: %s", output) + }) + + ginkgo.It("should verify the record_070_sync_v5.json from peer 3 after sync", func() { + output := cli.Verify(cidV5).OnServer(utils.Peer3Addr).ShouldSucceed() + + // Verify the output + gomega.Expect(output).To(gomega.ContainSubstring("Record signature is: trusted")) + }) + + ginkgo.It("should fail to pull record_070_sync_v4.json from peer 3 after sync", func() { + _ = cli.Pull(cid).OnServer(utils.Peer3Addr).ShouldFail() + + // CLEANUP: This is the last test in the sync functionality Context + // Clean up sync test records to ensure isolation from subsequent test files + ginkgo.DeferCleanup(func() { + CleanupNetworkRecords(syncTestCIDs, "sync tests") + }) + }) + }) +}) diff --git a/e2e/network/03_search_test.go b/e2e/network/03_search_test.go index 30825f451..615eb7ae8 100644 --- a/e2e/network/03_search_test.go +++ b/e2e/network/03_search_test.go @@ -1,245 +1,245 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package network - -import ( - "os" - "path/filepath" - "time" - - "github.com/agntcy/dir/e2e/shared/config" - "github.com/agntcy/dir/e2e/shared/testdata" - "github.com/agntcy/dir/e2e/shared/utils" - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -// Test file dedicated to testing remote routing search functionality with OR logic and minMatchScore - -// Package-level variables for cleanup (accessible by AfterSuite) -// CIDs are now tracked in network_suite_test.go - -var _ = ginkgo.Describe("Running dirctl end-to-end tests for remote routing search with OR logic", func() { - var cli *utils.CLI - var cid string - - // Setup temp record file - tempDir := os.Getenv("E2E_COMPILE_OUTPUT_DIR") - if tempDir == "" { - tempDir = os.TempDir() - } - tempPath := filepath.Join(tempDir, "record_v1alpha1_remote_search_test.json") - - // Create directory and write V1Alpha1 record data - _ = os.MkdirAll(filepath.Dir(tempPath), 0o755) - _ = os.WriteFile(tempPath, testdata.ExpectedRecordV070JSON, 0o600) - - ginkgo.BeforeEach(func() { - if cfg.DeploymentMode != config.DeploymentModeNetwork { - ginkgo.Skip("Skipping test, not in network mode") - } - - // ✅ CRITICAL: Reset CLI state to prevent flag accumulation across test executions - utils.ResetCLIState() - - // Initialize CLI helper - cli = utils.NewCLI() - }) - - ginkgo.Context("setup for remote search testing", func() { - ginkgo.It("should push record_070.json to peer 1", func() { - cid = cli.Push(tempPath).WithArgs("--output", "raw").OnServer(utils.Peer1Addr).ShouldSucceed() - - // Track CID for cleanup - RegisterCIDForCleanup(cid, "search") - - // Validate that the returned CID correctly represents the pushed data - utils.LoadAndValidateCID(cid, tempPath) - }) - - ginkgo.It("should publish record_070.json to routing on peer 1 only", func() { - // ONLY publish on Peer 1 - this creates the scenario: - // - Peer 1: has record locally (published) - // - Peer 2: will see it as remote via DHT - cli.Routing().Publish(cid).OnServer(utils.Peer1Addr).ShouldSucceed() - - // Wait for DHT propagation (same timing as working network deploy test) - time.Sleep(15 * time.Second) - ginkgo.GinkgoWriter.Printf("Published CID to routing on Peer 1: %s", cid) - }) - - ginkgo.It("should verify setup - peer 1 has local record, peer 2 does not", func() { - // Debug: Check local records on both peers - peer1LocalRecords := cli.Routing().List().OnServer(utils.Peer1Addr).ShouldSucceed() - ginkgo.GinkgoWriter.Printf("=== PEER 1 LOCAL RECORDS ===\n%s", peer1LocalRecords) - - peer2LocalRecords := cli.Routing().List().OnServer(utils.Peer2Addr).ShouldSucceed() - ginkgo.GinkgoWriter.Printf("=== PEER 2 LOCAL RECORDS ===\n%s", peer2LocalRecords) - - // Peer 1 should have the record locally - gomega.Expect(peer1LocalRecords).To(gomega.ContainSubstring(cid)) - - // Peer 2 should NOT have the record locally (will see it as remote) - gomega.Expect(peer2LocalRecords).To(gomega.ContainSubstring("No local records found")) - }) - }) - - ginkgo.Context("OR logic with minMatchScore tests", func() { - ginkgo.It("should debug: test working pattern first (minScore=1)", func() { - // First, let's replicate the WORKING test pattern from dirctl_network_deploy_test.go - // This should work since the original test works - output := cli.Routing().Search(). - WithSkill("natural_language_processing"). // Same as working test - should match via prefix - WithMinScore(1). // Explicit minScore=1 (same as default) - WithLimit(10). - OnServer(utils.Peer2Addr). // Search FROM Peer 2 to find Peer 1's records - ShouldSucceed() // Don't wait - should be immediate since working test works - - ginkgo.GinkgoWriter.Printf("=== DEBUG: Working pattern with explicit minScore=1 ===\n%s", output) - - // Should find the record like the working test does - gomega.Expect(output).To(gomega.ContainSubstring(cid)) - ginkgo.GinkgoWriter.Printf("✅ SUCCESS: Working pattern with explicit minScore=1 found record") - }) - - ginkgo.It("should debug: test exact skill matching (minScore=1)", func() { - // Test exact skill matching with minScore=1 - output := cli.Routing().Search(). - WithSkill("natural_language_processing/natural_language_generation/text_completion"). // Exact match - should work - WithMinScore(1). // Only need 1 match - WithLimit(10). - OnServer(utils.Peer2Addr). - ShouldSucceed() - - ginkgo.GinkgoWriter.Printf("=== DEBUG: Exact skill with minScore=1 ===\n%s", output) - - // Should find the record - gomega.Expect(output).To(gomega.ContainSubstring(cid)) - ginkgo.GinkgoWriter.Printf("✅ SUCCESS: Exact skill matching with minScore=1 found record") - }) - - ginkgo.It("should debug: test two skills with minScore=2", func() { - // Test two exact skills with minScore=2 (should match both and pass threshold) - output := cli.Routing().Search(). - WithSkill("natural_language_processing/natural_language_generation/text_completion"). // Query 1 - ✅ should match - WithSkill("natural_language_processing/analytical_reasoning/problem_solving"). // Query 2 - ✅ should match - WithMinScore(2). // Need both queries to match - WithLimit(10). - OnServer(utils.Peer2Addr). - ShouldSucceed() - - ginkgo.GinkgoWriter.Printf("=== DEBUG: Two exact skills with minScore=2 ===\n%s", output) - - // Should find the record since both skills should match - gomega.Expect(output).To(gomega.ContainSubstring(cid)) - ginkgo.GinkgoWriter.Printf("✅ SUCCESS: Two skills with minScore=2 found record") - }) - - ginkgo.It("should demonstrate OR logic success - minScore=2 finds record", func() { - // Now test the full OR logic: 2 real skills + 1 fake skill, requiring minScore=2 - output := cli.Routing().Search(). - WithSkill("natural_language_processing/natural_language_generation/text_completion"). // Query 1 - ✅ should match - WithSkill("natural_language_processing/analytical_reasoning/problem_solving"). // Query 2 - ✅ should match - WithSkill("NonexistentSkill"). // Query 3 - ❌ won't match - WithMinScore(2). // Only need 2/3 queries to match - WithLimit(10). - OnServer(utils.Peer2Addr). - ShouldSucceed() - - ginkgo.GinkgoWriter.Printf("=== DEBUG: Full OR logic test (minScore=2) ===\n%s", output) - - // Should find the record since 2/3 queries match - gomega.Expect(output).To(gomega.ContainSubstring(cid)) - ginkgo.GinkgoWriter.Printf("✅ SUCCESS: OR logic test found record with minScore=2 (2/3 queries matched)") - }) - - ginkgo.It("should demonstrate threshold filtering - minScore=3 filters out record", func() { - // Test threshold filtering: same queries but higher minScore should find NO records - // Same 2/3 queries match, but now we require minScore=3 (all queries must match) - output := cli.Routing().Search(). - WithSkill("natural_language_processing/natural_language_generation/text_completion"). // Query 1 - ✅ should match - WithSkill("natural_language_processing/analytical_reasoning/problem_solving"). // Query 2 - ✅ should match - WithSkill("NonexistentSkill"). // Query 3 - ❌ doesn't match - WithMinScore(3). // Require ALL 3 queries to match - WithLimit(10). - OnServer(utils.Peer2Addr). // Search FROM Peer 2 to find Peer 1's records - ShouldSucceed() // Should succeed but return "No remote records found" - - ginkgo.GinkgoWriter.Printf("=== THRESHOLD TEST RESULT (minScore=3) ===\n%s", output) - - // Should find NO records because minScore=3 but record only matches 2/3 queries - gomega.Expect(output).To(gomega.ContainSubstring("No remote records found")) - gomega.Expect(output).NotTo(gomega.ContainSubstring(cid)) // Should NOT contain the CID - - ginkgo.GinkgoWriter.Printf("✅ SUCCESS: Threshold filtering worked - no records found with minScore=3 (only 2/3 queries matched)") - }) - - ginkgo.It("should demonstrate single query match - minScore=1 finds record", func() { - // Test with single query to verify basic functionality - cli.Routing().Search(). - WithSkill("natural_language_processing/natural_language_generation/text_completion"). // Query 1 - ✅ should match - WithMinScore(1). // Only need 1 query to match - WithLimit(10). - OnServer(utils.Peer2Addr). // Search FROM Peer 2 to find Peer 1's records - ShouldEventuallyContain(cid, 60*time.Second) // Shorter timeout since DHT is already propagated - - ginkgo.GinkgoWriter.Printf("✅ SUCCESS: Single query search found record with minScore=1") - }) - - ginkgo.It("should demonstrate all queries match - minScore=2 with 2 real queries", func() { - // Test with 2 real queries that should both match, requiring both (minScore=2) - cli.Routing().Search(). - WithSkill("natural_language_processing/natural_language_generation/text_completion"). // Query 1 - ✅ should match - WithSkill("natural_language_processing/analytical_reasoning/problem_solving"). // Query 2 - ✅ should match - WithMinScore(2). // Need both queries to match - WithLimit(10). - OnServer(utils.Peer2Addr). // Search FROM Peer 2 to find Peer 1's records - ShouldEventuallyContain(cid, 60*time.Second) // Shorter timeout since DHT is already propagated - - ginkgo.GinkgoWriter.Printf("✅ SUCCESS: All matching queries search found record with minScore=2") - }) - }) - - ginkgo.Context("edge case tests", func() { - ginkgo.It("should handle minScore=0 (should default to minScore=1)", func() { - // Test edge case: minScore=0 should default to minScore=1 per proto specification - // Proto: "If not set, it will return records that match at least one query" - output := cli.Routing().Search(). - WithSkill("natural_language_processing/natural_language_generation/text_completion"). // Query 1 - ✅ should match - WithMinScore(0). // Should default to 1 - WithLimit(10). - WithArgs("--output", "json"). - OnServer(utils.Peer2Addr). - ShouldSucceed() - - // With minScore=0 defaulting to 1, should find record since query matches - gomega.Expect(output).To(gomega.ContainSubstring(cid)) - gomega.Expect(output).To(gomega.ContainSubstring("\"match_score\": 1")) - - ginkgo.GinkgoWriter.Printf("✅ SUCCESS: minScore=0 correctly defaults to minScore=1 per proto spec") - }) - - ginkgo.It("should handle empty queries with appropriate error", func() { - // Test edge case: no queries should return helpful error message - // This is the correct production behavior to prevent expensive full scans - output := cli.Routing().Search(). - WithMinScore(0). - WithLimit(10). - OnServer(utils.Peer2Addr). - ShouldSucceed() // Command succeeds but returns error message - - // Should get helpful error message, not crash or return all records - gomega.Expect(output).To(gomega.ContainSubstring("No search criteria specified")) - gomega.Expect(output).To(gomega.ContainSubstring("Use --skill, --locator, --domain, or --module flags")) - - ginkgo.GinkgoWriter.Printf("✅ SUCCESS: Empty queries properly rejected with helpful error message") - - // CLEANUP: This is the last test in this Describe block - // Clean up search test records to ensure isolation from subsequent test files - ginkgo.DeferCleanup(func() { - CleanupNetworkRecords(remoteSearchTestCIDs, "search tests") - }) - }) - }) -}) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package network + +import ( + "os" + "path/filepath" + "time" + + "github.com/agntcy/dir/e2e/shared/config" + "github.com/agntcy/dir/e2e/shared/testdata" + "github.com/agntcy/dir/e2e/shared/utils" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +// Test file dedicated to testing remote routing search functionality with OR logic and minMatchScore + +// Package-level variables for cleanup (accessible by AfterSuite) +// CIDs are now tracked in network_suite_test.go + +var _ = ginkgo.Describe("Running dirctl end-to-end tests for remote routing search with OR logic", func() { + var cli *utils.CLI + var cid string + + // Setup temp record file + tempDir := os.Getenv("E2E_COMPILE_OUTPUT_DIR") + if tempDir == "" { + tempDir = os.TempDir() + } + tempPath := filepath.Join(tempDir, "record_v1alpha1_remote_search_test.json") + + // Create directory and write V1Alpha1 record data + _ = os.MkdirAll(filepath.Dir(tempPath), 0o755) + _ = os.WriteFile(tempPath, testdata.ExpectedRecordV070JSON, 0o600) + + ginkgo.BeforeEach(func() { + if cfg.DeploymentMode != config.DeploymentModeNetwork { + ginkgo.Skip("Skipping test, not in network mode") + } + + // ✅ CRITICAL: Reset CLI state to prevent flag accumulation across test executions + utils.ResetCLIState() + + // Initialize CLI helper + cli = utils.NewCLI() + }) + + ginkgo.Context("setup for remote search testing", func() { + ginkgo.It("should push record_070.json to peer 1", func() { + cid = cli.Push(tempPath).WithArgs("--output", "raw").OnServer(utils.Peer1Addr).ShouldSucceed() + + // Track CID for cleanup + RegisterCIDForCleanup(cid, "search") + + // Validate that the returned CID correctly represents the pushed data + utils.LoadAndValidateCID(cid, tempPath) + }) + + ginkgo.It("should publish record_070.json to routing on peer 1 only", func() { + // ONLY publish on Peer 1 - this creates the scenario: + // - Peer 1: has record locally (published) + // - Peer 2: will see it as remote via DHT + cli.Routing().Publish(cid).OnServer(utils.Peer1Addr).ShouldSucceed() + + // Wait for DHT propagation (same timing as working network deploy test) + time.Sleep(15 * time.Second) + ginkgo.GinkgoWriter.Printf("Published CID to routing on Peer 1: %s", cid) + }) + + ginkgo.It("should verify setup - peer 1 has local record, peer 2 does not", func() { + // Debug: Check local records on both peers + peer1LocalRecords := cli.Routing().List().OnServer(utils.Peer1Addr).ShouldSucceed() + ginkgo.GinkgoWriter.Printf("=== PEER 1 LOCAL RECORDS ===\n%s", peer1LocalRecords) + + peer2LocalRecords := cli.Routing().List().OnServer(utils.Peer2Addr).ShouldSucceed() + ginkgo.GinkgoWriter.Printf("=== PEER 2 LOCAL RECORDS ===\n%s", peer2LocalRecords) + + // Peer 1 should have the record locally + gomega.Expect(peer1LocalRecords).To(gomega.ContainSubstring(cid)) + + // Peer 2 should NOT have the record locally (will see it as remote) + gomega.Expect(peer2LocalRecords).To(gomega.ContainSubstring("No local records found")) + }) + }) + + ginkgo.Context("OR logic with minMatchScore tests", func() { + ginkgo.It("should debug: test working pattern first (minScore=1)", func() { + // First, let's replicate the WORKING test pattern from dirctl_network_deploy_test.go + // This should work since the original test works + output := cli.Routing().Search(). + WithSkill("natural_language_processing"). // Same as working test - should match via prefix + WithMinScore(1). // Explicit minScore=1 (same as default) + WithLimit(10). + OnServer(utils.Peer2Addr). // Search FROM Peer 2 to find Peer 1's records + ShouldSucceed() // Don't wait - should be immediate since working test works + + ginkgo.GinkgoWriter.Printf("=== DEBUG: Working pattern with explicit minScore=1 ===\n%s", output) + + // Should find the record like the working test does + gomega.Expect(output).To(gomega.ContainSubstring(cid)) + ginkgo.GinkgoWriter.Printf("✅ SUCCESS: Working pattern with explicit minScore=1 found record") + }) + + ginkgo.It("should debug: test exact skill matching (minScore=1)", func() { + // Test exact skill matching with minScore=1 + output := cli.Routing().Search(). + WithSkill("natural_language_processing/natural_language_generation/text_completion"). // Exact match - should work + WithMinScore(1). // Only need 1 match + WithLimit(10). + OnServer(utils.Peer2Addr). + ShouldSucceed() + + ginkgo.GinkgoWriter.Printf("=== DEBUG: Exact skill with minScore=1 ===\n%s", output) + + // Should find the record + gomega.Expect(output).To(gomega.ContainSubstring(cid)) + ginkgo.GinkgoWriter.Printf("✅ SUCCESS: Exact skill matching with minScore=1 found record") + }) + + ginkgo.It("should debug: test two skills with minScore=2", func() { + // Test two exact skills with minScore=2 (should match both and pass threshold) + output := cli.Routing().Search(). + WithSkill("natural_language_processing/natural_language_generation/text_completion"). // Query 1 - ✅ should match + WithSkill("natural_language_processing/analytical_reasoning/problem_solving"). // Query 2 - ✅ should match + WithMinScore(2). // Need both queries to match + WithLimit(10). + OnServer(utils.Peer2Addr). + ShouldSucceed() + + ginkgo.GinkgoWriter.Printf("=== DEBUG: Two exact skills with minScore=2 ===\n%s", output) + + // Should find the record since both skills should match + gomega.Expect(output).To(gomega.ContainSubstring(cid)) + ginkgo.GinkgoWriter.Printf("✅ SUCCESS: Two skills with minScore=2 found record") + }) + + ginkgo.It("should demonstrate OR logic success - minScore=2 finds record", func() { + // Now test the full OR logic: 2 real skills + 1 fake skill, requiring minScore=2 + output := cli.Routing().Search(). + WithSkill("natural_language_processing/natural_language_generation/text_completion"). // Query 1 - ✅ should match + WithSkill("natural_language_processing/analytical_reasoning/problem_solving"). // Query 2 - ✅ should match + WithSkill("NonexistentSkill"). // Query 3 - ❌ won't match + WithMinScore(2). // Only need 2/3 queries to match + WithLimit(10). + OnServer(utils.Peer2Addr). + ShouldSucceed() + + ginkgo.GinkgoWriter.Printf("=== DEBUG: Full OR logic test (minScore=2) ===\n%s", output) + + // Should find the record since 2/3 queries match + gomega.Expect(output).To(gomega.ContainSubstring(cid)) + ginkgo.GinkgoWriter.Printf("✅ SUCCESS: OR logic test found record with minScore=2 (2/3 queries matched)") + }) + + ginkgo.It("should demonstrate threshold filtering - minScore=3 filters out record", func() { + // Test threshold filtering: same queries but higher minScore should find NO records + // Same 2/3 queries match, but now we require minScore=3 (all queries must match) + output := cli.Routing().Search(). + WithSkill("natural_language_processing/natural_language_generation/text_completion"). // Query 1 - ✅ should match + WithSkill("natural_language_processing/analytical_reasoning/problem_solving"). // Query 2 - ✅ should match + WithSkill("NonexistentSkill"). // Query 3 - ❌ doesn't match + WithMinScore(3). // Require ALL 3 queries to match + WithLimit(10). + OnServer(utils.Peer2Addr). // Search FROM Peer 2 to find Peer 1's records + ShouldSucceed() // Should succeed but return "No remote records found" + + ginkgo.GinkgoWriter.Printf("=== THRESHOLD TEST RESULT (minScore=3) ===\n%s", output) + + // Should find NO records because minScore=3 but record only matches 2/3 queries + gomega.Expect(output).To(gomega.ContainSubstring("No remote records found")) + gomega.Expect(output).NotTo(gomega.ContainSubstring(cid)) // Should NOT contain the CID + + ginkgo.GinkgoWriter.Printf("✅ SUCCESS: Threshold filtering worked - no records found with minScore=3 (only 2/3 queries matched)") + }) + + ginkgo.It("should demonstrate single query match - minScore=1 finds record", func() { + // Test with single query to verify basic functionality + cli.Routing().Search(). + WithSkill("natural_language_processing/natural_language_generation/text_completion"). // Query 1 - ✅ should match + WithMinScore(1). // Only need 1 query to match + WithLimit(10). + OnServer(utils.Peer2Addr). // Search FROM Peer 2 to find Peer 1's records + ShouldEventuallyContain(cid, 60*time.Second) // Shorter timeout since DHT is already propagated + + ginkgo.GinkgoWriter.Printf("✅ SUCCESS: Single query search found record with minScore=1") + }) + + ginkgo.It("should demonstrate all queries match - minScore=2 with 2 real queries", func() { + // Test with 2 real queries that should both match, requiring both (minScore=2) + cli.Routing().Search(). + WithSkill("natural_language_processing/natural_language_generation/text_completion"). // Query 1 - ✅ should match + WithSkill("natural_language_processing/analytical_reasoning/problem_solving"). // Query 2 - ✅ should match + WithMinScore(2). // Need both queries to match + WithLimit(10). + OnServer(utils.Peer2Addr). // Search FROM Peer 2 to find Peer 1's records + ShouldEventuallyContain(cid, 60*time.Second) // Shorter timeout since DHT is already propagated + + ginkgo.GinkgoWriter.Printf("✅ SUCCESS: All matching queries search found record with minScore=2") + }) + }) + + ginkgo.Context("edge case tests", func() { + ginkgo.It("should handle minScore=0 (should default to minScore=1)", func() { + // Test edge case: minScore=0 should default to minScore=1 per proto specification + // Proto: "If not set, it will return records that match at least one query" + output := cli.Routing().Search(). + WithSkill("natural_language_processing/natural_language_generation/text_completion"). // Query 1 - ✅ should match + WithMinScore(0). // Should default to 1 + WithLimit(10). + WithArgs("--output", "json"). + OnServer(utils.Peer2Addr). + ShouldSucceed() + + // With minScore=0 defaulting to 1, should find record since query matches + gomega.Expect(output).To(gomega.ContainSubstring(cid)) + gomega.Expect(output).To(gomega.ContainSubstring("\"match_score\": 1")) + + ginkgo.GinkgoWriter.Printf("✅ SUCCESS: minScore=0 correctly defaults to minScore=1 per proto spec") + }) + + ginkgo.It("should handle empty queries with appropriate error", func() { + // Test edge case: no queries should return helpful error message + // This is the correct production behavior to prevent expensive full scans + output := cli.Routing().Search(). + WithMinScore(0). + WithLimit(10). + OnServer(utils.Peer2Addr). + ShouldSucceed() // Command succeeds but returns error message + + // Should get helpful error message, not crash or return all records + gomega.Expect(output).To(gomega.ContainSubstring("No search criteria specified")) + gomega.Expect(output).To(gomega.ContainSubstring("Use --skill, --locator, --domain, or --module flags")) + + ginkgo.GinkgoWriter.Printf("✅ SUCCESS: Empty queries properly rejected with helpful error message") + + // CLEANUP: This is the last test in this Describe block + // Clean up search test records to ensure isolation from subsequent test files + ginkgo.DeferCleanup(func() { + CleanupNetworkRecords(remoteSearchTestCIDs, "search tests") + }) + }) + }) +}) diff --git a/e2e/network/04_gossipsub_test.go b/e2e/network/04_gossipsub_test.go index f3a47ef38..0b827ffc6 100644 --- a/e2e/network/04_gossipsub_test.go +++ b/e2e/network/04_gossipsub_test.go @@ -1,367 +1,367 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package network - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "time" - - "github.com/agntcy/dir/e2e/shared/config" - "github.com/agntcy/dir/e2e/shared/testdata" - "github.com/agntcy/dir/e2e/shared/utils" - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -// Test file dedicated to testing GossipSub label announcement functionality. -// This verifies that labels are efficiently propagated via GossipSub mesh to ALL subscribed peers. - -// Package-level variables for cleanup (accessible by AfterSuite) -// CIDs are now tracked in network_suite_test.go - -var _ = ginkgo.Describe("Running GossipSub label announcement tests", ginkgo.Ordered, func() { - var cli *utils.CLI - var cid string - - // Setup temp record file - tempDir := os.Getenv("E2E_COMPILE_OUTPUT_DIR") - if tempDir == "" { - tempDir = os.TempDir() - } - tempPath := filepath.Join(tempDir, "record_v070_gossipsub_test.json") - - // Create directory and write record data - _ = os.MkdirAll(filepath.Dir(tempPath), 0o755) - _ = os.WriteFile(tempPath, testdata.ExpectedRecordV070JSON, 0o600) - - ginkgo.BeforeEach(func() { - if cfg.DeploymentMode != config.DeploymentModeNetwork { - ginkgo.Skip("Skipping test, not in network mode") - } - - // Reset CLI state to ensure clean test environment - utils.ResetCLIState() - - // Initialize CLI helper - cli = utils.NewCLI() - }) - - ginkgo.Context("GossipSub wide propagation to all peers", func() { - ginkgo.It("should push record_v070.json to peer 1", func() { - cid = cli.Push(tempPath).WithArgs("--output", "raw").OnServer(utils.Peer1Addr).ShouldSucceed() - - // Track CID for cleanup - RegisterCIDForCleanup(cid, "gossipsub") - - // Validate that the returned CID correctly represents the pushed data - utils.LoadAndValidateCID(cid, tempPath) - }) - - ginkgo.It("should publish record to routing on peer 1", func() { - // Publish triggers both DHT.Provide() and GossipSub.PublishLabels() - cli.Routing().Publish(cid).OnServer(utils.Peer1Addr).ShouldSucceed() - - ginkgo.GinkgoWriter.Printf("Published CID to routing with GossipSub: %s", cid) - }) - - ginkgo.It("should propagate labels via GossipSub to all subscribed peers", func() { - // GossipSub propagates much faster than DHT alone - // Expected: ~5 seconds vs 15 seconds for DHT-only propagation - ginkgo.GinkgoWriter.Printf("Waiting 5 seconds for GossipSub label propagation...") - time.Sleep(5 * time.Second) - - // Verify Peer2 received labels via GossipSub - ginkgo.GinkgoWriter.Printf("Testing label discovery on Peer2...") - utils.ResetCLIState() - output2 := cli.Routing().Search(). - WithSkill("natural_language_processing"). - WithLimit(10). - OnServer(utils.Peer2Addr). - ShouldSucceed() - - gomega.Expect(output2).To(gomega.ContainSubstring(cid)) - ginkgo.GinkgoWriter.Printf("✅ Peer2 discovered labels via GossipSub") - - // Verify Peer3 also received labels via GossipSub - ginkgo.GinkgoWriter.Printf("Testing label discovery on Peer3...") - utils.ResetCLIState() - output3 := cli.Routing().Search(). - WithSkill("natural_language_processing"). - WithLimit(10). - OnServer(utils.Peer3Addr). - ShouldSucceed() - - gomega.Expect(output3).To(gomega.ContainSubstring(cid)) - ginkgo.GinkgoWriter.Printf("✅ Peer3 discovered labels via GossipSub") - - ginkgo.GinkgoWriter.Printf("✅ SUCCESS: GossipSub propagated labels to ALL 3 peers (not just k-closest)") - }) - - ginkgo.It("should verify labels are discoverable from both remote peers", func() { - // Additional verification with different skill query - utils.ResetCLIState() - output2 := cli.Routing().Search(). - WithSkill("natural_language_processing/natural_language_generation/text_completion"). - OnServer(utils.Peer2Addr). - ShouldSucceed() - - gomega.Expect(output2).To(gomega.ContainSubstring(cid)) - gomega.Expect(output2).To(gomega.ContainSubstring("match_score")) - - utils.ResetCLIState() - output3 := cli.Routing().Search(). - WithSkill("natural_language_processing/analytical_reasoning/problem_solving"). - OnServer(utils.Peer3Addr). - ShouldSucceed() - - gomega.Expect(output3).To(gomega.ContainSubstring(cid)) - gomega.Expect(output3).To(gomega.ContainSubstring("match_score")) - - ginkgo.GinkgoWriter.Printf("✅ Both peers can search with specific skill queries") - }) - }) - - ginkgo.Context("GossipSub performance and timing", func() { - var perfCID string - var perfPath string - - ginkgo.BeforeAll(func() { - // Setup separate record for performance testing - perfPath = filepath.Join(tempDir, "record_v070_gossipsub_perf_test.json") - _ = os.WriteFile(perfPath, testdata.ExpectedRecordV070JSON, 0o600) - }) - - ginkgo.It("should push performance test record to peer 1", func() { - perfCID = cli.Push(perfPath).WithArgs("--output", "raw").OnServer(utils.Peer1Addr).ShouldSucceed() - RegisterCIDForCleanup(perfCID, "gossipsub") - }) - - ginkgo.It("should discover labels in under 7 seconds via GossipSub", func() { - // Publish the record - cli.Routing().Publish(perfCID).OnServer(utils.Peer1Addr).ShouldSucceed() - - startTime := time.Now() - ginkgo.GinkgoWriter.Printf("Starting timing test at %s", startTime.Format("15:04:05")) - - // Poll for label discovery with short intervals - // GossipSub should propagate in ~2-5 seconds - utils.ResetCLIState() - output := cli.Routing().Search(). - WithSkill("natural_language_processing"). - OnServer(utils.Peer2Addr). - ShouldEventuallyContain(perfCID, 10*time.Second) // Max 10s timeout - - discoveryTime := time.Since(startTime) - ginkgo.GinkgoWriter.Printf("✅ Labels discovered in %v", discoveryTime) - - // Verify it's faster than baseline DHT propagation (15s) - gomega.Expect(discoveryTime).To(gomega.BeNumerically("<", 7*time.Second), - "GossipSub should propagate faster than DHT-only baseline") - - gomega.Expect(output).To(gomega.ContainSubstring(perfCID)) - }) - }) - - ginkgo.Context("GossipSub bulk record propagation", func() { - var bulkCIDs []string - var bulkPaths []string - - ginkgo.BeforeAll(func() { - // Prepare 5 test records for bulk testing - // Note: Reusing same record content but treating as separate for propagation test - bulkPaths = make([]string, 5) - for i := range 5 { - bulkPaths[i] = filepath.Join(tempDir, fmt.Sprintf("record_v070_gossipsub_bulk_%d_test.json", i)) - _ = os.WriteFile(bulkPaths[i], testdata.ExpectedRecordV070JSON, 0o600) - } - }) - - ginkgo.It("should push 5 records to peer 1", func() { - bulkCIDs = make([]string, 5) - for i, path := range bulkPaths { - cid := cli.Push(path).WithArgs("--output", "raw").OnServer(utils.Peer1Addr).ShouldSucceed() - bulkCIDs[i] = cid - RegisterCIDForCleanup(cid, "gossipsub") - ginkgo.GinkgoWriter.Printf("Pushed bulk record %d/%d: %s", i+1, 5, cid) - } - }) - - ginkgo.It("should publish all 5 records sequentially", func() { - for i, bulkCID := range bulkCIDs { - cli.Routing().Publish(bulkCID).OnServer(utils.Peer1Addr).ShouldSucceed() - ginkgo.GinkgoWriter.Printf("Published bulk record %d/%d via GossipSub", i+1, 5) - } - }) - - ginkgo.It("should propagate all 5 records' labels via GossipSub", func() { - // Verify all 5 records are discoverable from Peer2 - // Wait at least 10 seconds for GossipSub propagation of all announcements - utils.ResetCLIState() - successCount := 0 - for i, bulkCID := range bulkCIDs { - cli.Routing().Search(). - WithSkill("natural_language_processing"). - WithLimit(10). - OnServer(utils.Peer2Addr). - ShouldEventuallyContain(bulkCID, 15*time.Second) - - successCount++ - ginkgo.GinkgoWriter.Printf("✅ Bulk record %d/%d discovered on Peer2", i+1, 5) - utils.ResetCLIState() - } - - // All 5 should be discoverable - gomega.Expect(successCount).To(gomega.Equal(5), - "All 5 records should be discoverable via GossipSub") - - ginkgo.GinkgoWriter.Printf("✅ SUCCESS: GossipSub propagated all 5 records efficiently") - }) - - ginkgo.It("should verify bulk records are also discoverable from peer 3", func() { - // Verify propagation to Peer3 as well (proves mesh propagation) - utils.ResetCLIState() - successCount := 0 - for i, bulkCID := range bulkCIDs { - cli.Routing().Search(). - WithSkill("natural_language_processing"). - WithLimit(10). - OnServer(utils.Peer3Addr). - ShouldEventuallyContain(bulkCID, 15*time.Second) - - successCount++ - ginkgo.GinkgoWriter.Printf("✅ Bulk record %d/%d discovered on Peer3", i+1, 5) - utils.ResetCLIState() - } - - gomega.Expect(successCount).To(gomega.Equal(5), - "All 5 records should be discoverable on Peer3 via GossipSub") - - ginkgo.GinkgoWriter.Printf("✅ SUCCESS: GossipSub mesh propagated to all peers") - }) - }) - - ginkgo.Context("GossipSub edge cases and validation", func() { - var edgeCID string - - ginkgo.It("should push edge case test record to peer 1", func() { - edgePath := filepath.Join(tempDir, "record_v070_gossipsub_edge_test.json") - _ = os.WriteFile(edgePath, testdata.ExpectedRecordV070JSON, 0o600) - - edgeCID = cli.Push(edgePath).WithArgs("--output", "raw").OnServer(utils.Peer1Addr).ShouldSucceed() - RegisterCIDForCleanup(edgeCID, "gossipsub") - }) - - ginkgo.It("should handle search with multiple label types via GossipSub", func() { - // Publish record - cli.Routing().Publish(edgeCID).OnServer(utils.Peer1Addr).ShouldSucceed() - - // Wait for GossipSub propagation - time.Sleep(5 * time.Second) - - // Test search with OR logic across multiple label types - utils.ResetCLIState() - output := cli.Routing().Search(). - WithSkill("natural_language_processing"). // Should match - WithDomain("life_science"). // Should match (record has life_science/biotechnology) - WithMinScore(2). // Both should match - WithLimit(10). - WithArgs("--output", "json"). - OnServer(utils.Peer2Addr). - ShouldSucceed() - - gomega.Expect(output).To(gomega.ContainSubstring(edgeCID)) - gomega.Expect(output).To(gomega.ContainSubstring("\"match_score\": 2")) - - ginkgo.GinkgoWriter.Printf("✅ GossipSub propagates all label types correctly") - }) - - ginkgo.It("should verify labels persist across multiple searches", func() { - // Test that cached labels from GossipSub remain available - // This ensures the fallback to pull is NOT triggered on subsequent searches - - // First search - utils.ResetCLIState() - output1 := cli.Routing().Search(). - WithSkill("natural_language_processing"). - OnServer(utils.Peer2Addr). - ShouldSucceed() - gomega.Expect(output1).To(gomega.ContainSubstring(edgeCID)) - - // Second search (should use cached labels, not pull again) - utils.ResetCLIState() - output2 := cli.Routing().Search(). - WithSkill("natural_language_processing/analytical_reasoning/problem_solving"). - OnServer(utils.Peer2Addr). - ShouldSucceed() - gomega.Expect(output2).To(gomega.ContainSubstring(edgeCID)) - - // Third search with different peer - utils.ResetCLIState() - output3 := cli.Routing().Search(). - WithSkill("natural_language_processing/natural_language_generation"). - OnServer(utils.Peer3Addr). - ShouldSucceed() - gomega.Expect(output3).To(gomega.ContainSubstring(edgeCID)) - - ginkgo.GinkgoWriter.Printf("✅ Cached labels from GossipSub persist across multiple searches") - }) - }) - - ginkgo.Context("GossipSub comparison with baseline", func() { - ginkgo.It("should demonstrate faster propagation compared to DHT-only baseline", func() { - // This test compares against the known baseline from 01_deploy_test.go - // Baseline: 15 seconds wait for DHT propagation - // GossipSub: Should work in ~5 seconds - - baselinePath := filepath.Join(tempDir, "record_v070_gossipsub_baseline_test.json") - _ = os.WriteFile(baselinePath, testdata.ExpectedRecordV070JSON, 0o600) - - baselineCID := cli.Push(baselinePath).WithArgs("--output", "raw").OnServer(utils.Peer1Addr).ShouldSucceed() - RegisterCIDForCleanup(baselineCID, "gossipsub") - - // Publish and start timing - cli.Routing().Publish(baselineCID).OnServer(utils.Peer1Addr).ShouldSucceed() - startTime := time.Now() - - // Poll for discovery with 1-second intervals - ginkgo.GinkgoWriter.Printf("Polling for label discovery (max 10 seconds)...") - utils.ResetCLIState() - - found := false - maxAttempts := 10 - for attempt := 1; attempt <= maxAttempts; attempt++ { - output, err := cli.Routing().Search(). - WithSkill("natural_language_processing"). - WithLimit(10). - OnServer(utils.Peer2Addr). - Execute() - - if err == nil && strings.Contains(output, baselineCID) { - discoveryTime := time.Since(startTime) - ginkgo.GinkgoWriter.Printf("✅ Labels discovered in %v (attempt %d/%d)", discoveryTime, attempt, maxAttempts) - found = true - - // Verify it's faster than DHT baseline - gomega.Expect(discoveryTime).To(gomega.BeNumerically("<", 7*time.Second), - "GossipSub should be significantly faster than DHT-only baseline (15s)") - - break - } - - time.Sleep(1 * time.Second) - utils.ResetCLIState() - } - - gomega.Expect(found).To(gomega.BeTrue(), "Labels should be discovered within 10 seconds via GossipSub") - - // CLEANUP: This is the last test in this Describe block - ginkgo.DeferCleanup(func() { - CleanupNetworkRecords(gossipsubTestCIDs, "gossipsub tests") - }) - }) - }) -}) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package network + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/agntcy/dir/e2e/shared/config" + "github.com/agntcy/dir/e2e/shared/testdata" + "github.com/agntcy/dir/e2e/shared/utils" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +// Test file dedicated to testing GossipSub label announcement functionality. +// This verifies that labels are efficiently propagated via GossipSub mesh to ALL subscribed peers. + +// Package-level variables for cleanup (accessible by AfterSuite) +// CIDs are now tracked in network_suite_test.go + +var _ = ginkgo.Describe("Running GossipSub label announcement tests", ginkgo.Ordered, func() { + var cli *utils.CLI + var cid string + + // Setup temp record file + tempDir := os.Getenv("E2E_COMPILE_OUTPUT_DIR") + if tempDir == "" { + tempDir = os.TempDir() + } + tempPath := filepath.Join(tempDir, "record_v070_gossipsub_test.json") + + // Create directory and write record data + _ = os.MkdirAll(filepath.Dir(tempPath), 0o755) + _ = os.WriteFile(tempPath, testdata.ExpectedRecordV070JSON, 0o600) + + ginkgo.BeforeEach(func() { + if cfg.DeploymentMode != config.DeploymentModeNetwork { + ginkgo.Skip("Skipping test, not in network mode") + } + + // Reset CLI state to ensure clean test environment + utils.ResetCLIState() + + // Initialize CLI helper + cli = utils.NewCLI() + }) + + ginkgo.Context("GossipSub wide propagation to all peers", func() { + ginkgo.It("should push record_v070.json to peer 1", func() { + cid = cli.Push(tempPath).WithArgs("--output", "raw").OnServer(utils.Peer1Addr).ShouldSucceed() + + // Track CID for cleanup + RegisterCIDForCleanup(cid, "gossipsub") + + // Validate that the returned CID correctly represents the pushed data + utils.LoadAndValidateCID(cid, tempPath) + }) + + ginkgo.It("should publish record to routing on peer 1", func() { + // Publish triggers both DHT.Provide() and GossipSub.PublishLabels() + cli.Routing().Publish(cid).OnServer(utils.Peer1Addr).ShouldSucceed() + + ginkgo.GinkgoWriter.Printf("Published CID to routing with GossipSub: %s", cid) + }) + + ginkgo.It("should propagate labels via GossipSub to all subscribed peers", func() { + // GossipSub propagates much faster than DHT alone + // Expected: ~5 seconds vs 15 seconds for DHT-only propagation + ginkgo.GinkgoWriter.Printf("Waiting 5 seconds for GossipSub label propagation...") + time.Sleep(5 * time.Second) + + // Verify Peer2 received labels via GossipSub + ginkgo.GinkgoWriter.Printf("Testing label discovery on Peer2...") + utils.ResetCLIState() + output2 := cli.Routing().Search(). + WithSkill("natural_language_processing"). + WithLimit(10). + OnServer(utils.Peer2Addr). + ShouldSucceed() + + gomega.Expect(output2).To(gomega.ContainSubstring(cid)) + ginkgo.GinkgoWriter.Printf("✅ Peer2 discovered labels via GossipSub") + + // Verify Peer3 also received labels via GossipSub + ginkgo.GinkgoWriter.Printf("Testing label discovery on Peer3...") + utils.ResetCLIState() + output3 := cli.Routing().Search(). + WithSkill("natural_language_processing"). + WithLimit(10). + OnServer(utils.Peer3Addr). + ShouldSucceed() + + gomega.Expect(output3).To(gomega.ContainSubstring(cid)) + ginkgo.GinkgoWriter.Printf("✅ Peer3 discovered labels via GossipSub") + + ginkgo.GinkgoWriter.Printf("✅ SUCCESS: GossipSub propagated labels to ALL 3 peers (not just k-closest)") + }) + + ginkgo.It("should verify labels are discoverable from both remote peers", func() { + // Additional verification with different skill query + utils.ResetCLIState() + output2 := cli.Routing().Search(). + WithSkill("natural_language_processing/natural_language_generation/text_completion"). + OnServer(utils.Peer2Addr). + ShouldSucceed() + + gomega.Expect(output2).To(gomega.ContainSubstring(cid)) + gomega.Expect(output2).To(gomega.ContainSubstring("match_score")) + + utils.ResetCLIState() + output3 := cli.Routing().Search(). + WithSkill("natural_language_processing/analytical_reasoning/problem_solving"). + OnServer(utils.Peer3Addr). + ShouldSucceed() + + gomega.Expect(output3).To(gomega.ContainSubstring(cid)) + gomega.Expect(output3).To(gomega.ContainSubstring("match_score")) + + ginkgo.GinkgoWriter.Printf("✅ Both peers can search with specific skill queries") + }) + }) + + ginkgo.Context("GossipSub performance and timing", func() { + var perfCID string + var perfPath string + + ginkgo.BeforeAll(func() { + // Setup separate record for performance testing + perfPath = filepath.Join(tempDir, "record_v070_gossipsub_perf_test.json") + _ = os.WriteFile(perfPath, testdata.ExpectedRecordV070JSON, 0o600) + }) + + ginkgo.It("should push performance test record to peer 1", func() { + perfCID = cli.Push(perfPath).WithArgs("--output", "raw").OnServer(utils.Peer1Addr).ShouldSucceed() + RegisterCIDForCleanup(perfCID, "gossipsub") + }) + + ginkgo.It("should discover labels in under 7 seconds via GossipSub", func() { + // Publish the record + cli.Routing().Publish(perfCID).OnServer(utils.Peer1Addr).ShouldSucceed() + + startTime := time.Now() + ginkgo.GinkgoWriter.Printf("Starting timing test at %s", startTime.Format("15:04:05")) + + // Poll for label discovery with short intervals + // GossipSub should propagate in ~2-5 seconds + utils.ResetCLIState() + output := cli.Routing().Search(). + WithSkill("natural_language_processing"). + OnServer(utils.Peer2Addr). + ShouldEventuallyContain(perfCID, 10*time.Second) // Max 10s timeout + + discoveryTime := time.Since(startTime) + ginkgo.GinkgoWriter.Printf("✅ Labels discovered in %v", discoveryTime) + + // Verify it's faster than baseline DHT propagation (15s) + gomega.Expect(discoveryTime).To(gomega.BeNumerically("<", 7*time.Second), + "GossipSub should propagate faster than DHT-only baseline") + + gomega.Expect(output).To(gomega.ContainSubstring(perfCID)) + }) + }) + + ginkgo.Context("GossipSub bulk record propagation", func() { + var bulkCIDs []string + var bulkPaths []string + + ginkgo.BeforeAll(func() { + // Prepare 5 test records for bulk testing + // Note: Reusing same record content but treating as separate for propagation test + bulkPaths = make([]string, 5) + for i := range 5 { + bulkPaths[i] = filepath.Join(tempDir, fmt.Sprintf("record_v070_gossipsub_bulk_%d_test.json", i)) + _ = os.WriteFile(bulkPaths[i], testdata.ExpectedRecordV070JSON, 0o600) + } + }) + + ginkgo.It("should push 5 records to peer 1", func() { + bulkCIDs = make([]string, 5) + for i, path := range bulkPaths { + cid := cli.Push(path).WithArgs("--output", "raw").OnServer(utils.Peer1Addr).ShouldSucceed() + bulkCIDs[i] = cid + RegisterCIDForCleanup(cid, "gossipsub") + ginkgo.GinkgoWriter.Printf("Pushed bulk record %d/%d: %s", i+1, 5, cid) + } + }) + + ginkgo.It("should publish all 5 records sequentially", func() { + for i, bulkCID := range bulkCIDs { + cli.Routing().Publish(bulkCID).OnServer(utils.Peer1Addr).ShouldSucceed() + ginkgo.GinkgoWriter.Printf("Published bulk record %d/%d via GossipSub", i+1, 5) + } + }) + + ginkgo.It("should propagate all 5 records' labels via GossipSub", func() { + // Verify all 5 records are discoverable from Peer2 + // Wait at least 10 seconds for GossipSub propagation of all announcements + utils.ResetCLIState() + successCount := 0 + for i, bulkCID := range bulkCIDs { + cli.Routing().Search(). + WithSkill("natural_language_processing"). + WithLimit(10). + OnServer(utils.Peer2Addr). + ShouldEventuallyContain(bulkCID, 15*time.Second) + + successCount++ + ginkgo.GinkgoWriter.Printf("✅ Bulk record %d/%d discovered on Peer2", i+1, 5) + utils.ResetCLIState() + } + + // All 5 should be discoverable + gomega.Expect(successCount).To(gomega.Equal(5), + "All 5 records should be discoverable via GossipSub") + + ginkgo.GinkgoWriter.Printf("✅ SUCCESS: GossipSub propagated all 5 records efficiently") + }) + + ginkgo.It("should verify bulk records are also discoverable from peer 3", func() { + // Verify propagation to Peer3 as well (proves mesh propagation) + utils.ResetCLIState() + successCount := 0 + for i, bulkCID := range bulkCIDs { + cli.Routing().Search(). + WithSkill("natural_language_processing"). + WithLimit(10). + OnServer(utils.Peer3Addr). + ShouldEventuallyContain(bulkCID, 15*time.Second) + + successCount++ + ginkgo.GinkgoWriter.Printf("✅ Bulk record %d/%d discovered on Peer3", i+1, 5) + utils.ResetCLIState() + } + + gomega.Expect(successCount).To(gomega.Equal(5), + "All 5 records should be discoverable on Peer3 via GossipSub") + + ginkgo.GinkgoWriter.Printf("✅ SUCCESS: GossipSub mesh propagated to all peers") + }) + }) + + ginkgo.Context("GossipSub edge cases and validation", func() { + var edgeCID string + + ginkgo.It("should push edge case test record to peer 1", func() { + edgePath := filepath.Join(tempDir, "record_v070_gossipsub_edge_test.json") + _ = os.WriteFile(edgePath, testdata.ExpectedRecordV070JSON, 0o600) + + edgeCID = cli.Push(edgePath).WithArgs("--output", "raw").OnServer(utils.Peer1Addr).ShouldSucceed() + RegisterCIDForCleanup(edgeCID, "gossipsub") + }) + + ginkgo.It("should handle search with multiple label types via GossipSub", func() { + // Publish record + cli.Routing().Publish(edgeCID).OnServer(utils.Peer1Addr).ShouldSucceed() + + // Wait for GossipSub propagation + time.Sleep(5 * time.Second) + + // Test search with OR logic across multiple label types + utils.ResetCLIState() + output := cli.Routing().Search(). + WithSkill("natural_language_processing"). // Should match + WithDomain("life_science"). // Should match (record has life_science/biotechnology) + WithMinScore(2). // Both should match + WithLimit(10). + WithArgs("--output", "json"). + OnServer(utils.Peer2Addr). + ShouldSucceed() + + gomega.Expect(output).To(gomega.ContainSubstring(edgeCID)) + gomega.Expect(output).To(gomega.ContainSubstring("\"match_score\": 2")) + + ginkgo.GinkgoWriter.Printf("✅ GossipSub propagates all label types correctly") + }) + + ginkgo.It("should verify labels persist across multiple searches", func() { + // Test that cached labels from GossipSub remain available + // This ensures the fallback to pull is NOT triggered on subsequent searches + + // First search + utils.ResetCLIState() + output1 := cli.Routing().Search(). + WithSkill("natural_language_processing"). + OnServer(utils.Peer2Addr). + ShouldSucceed() + gomega.Expect(output1).To(gomega.ContainSubstring(edgeCID)) + + // Second search (should use cached labels, not pull again) + utils.ResetCLIState() + output2 := cli.Routing().Search(). + WithSkill("natural_language_processing/analytical_reasoning/problem_solving"). + OnServer(utils.Peer2Addr). + ShouldSucceed() + gomega.Expect(output2).To(gomega.ContainSubstring(edgeCID)) + + // Third search with different peer + utils.ResetCLIState() + output3 := cli.Routing().Search(). + WithSkill("natural_language_processing/natural_language_generation"). + OnServer(utils.Peer3Addr). + ShouldSucceed() + gomega.Expect(output3).To(gomega.ContainSubstring(edgeCID)) + + ginkgo.GinkgoWriter.Printf("✅ Cached labels from GossipSub persist across multiple searches") + }) + }) + + ginkgo.Context("GossipSub comparison with baseline", func() { + ginkgo.It("should demonstrate faster propagation compared to DHT-only baseline", func() { + // This test compares against the known baseline from 01_deploy_test.go + // Baseline: 15 seconds wait for DHT propagation + // GossipSub: Should work in ~5 seconds + + baselinePath := filepath.Join(tempDir, "record_v070_gossipsub_baseline_test.json") + _ = os.WriteFile(baselinePath, testdata.ExpectedRecordV070JSON, 0o600) + + baselineCID := cli.Push(baselinePath).WithArgs("--output", "raw").OnServer(utils.Peer1Addr).ShouldSucceed() + RegisterCIDForCleanup(baselineCID, "gossipsub") + + // Publish and start timing + cli.Routing().Publish(baselineCID).OnServer(utils.Peer1Addr).ShouldSucceed() + startTime := time.Now() + + // Poll for discovery with 1-second intervals + ginkgo.GinkgoWriter.Printf("Polling for label discovery (max 10 seconds)...") + utils.ResetCLIState() + + found := false + maxAttempts := 10 + for attempt := 1; attempt <= maxAttempts; attempt++ { + output, err := cli.Routing().Search(). + WithSkill("natural_language_processing"). + WithLimit(10). + OnServer(utils.Peer2Addr). + Execute() + + if err == nil && strings.Contains(output, baselineCID) { + discoveryTime := time.Since(startTime) + ginkgo.GinkgoWriter.Printf("✅ Labels discovered in %v (attempt %d/%d)", discoveryTime, attempt, maxAttempts) + found = true + + // Verify it's faster than DHT baseline + gomega.Expect(discoveryTime).To(gomega.BeNumerically("<", 7*time.Second), + "GossipSub should be significantly faster than DHT-only baseline (15s)") + + break + } + + time.Sleep(1 * time.Second) + utils.ResetCLIState() + } + + gomega.Expect(found).To(gomega.BeTrue(), "Labels should be discovered within 10 seconds via GossipSub") + + // CLEANUP: This is the last test in this Describe block + ginkgo.DeferCleanup(func() { + CleanupNetworkRecords(gossipsubTestCIDs, "gossipsub tests") + }) + }) + }) +}) diff --git a/e2e/network/cleanup.go b/e2e/network/cleanup.go index 8aa66fba0..f48a58169 100644 --- a/e2e/network/cleanup.go +++ b/e2e/network/cleanup.go @@ -1,82 +1,82 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package network - -import ( - "github.com/agntcy/dir/e2e/shared/utils" - "github.com/onsi/ginkgo/v2" -) - -// Package-level variables for tracking CIDs across all network tests. -var ( - deployTestCIDs []string - syncTestCIDs []string - remoteSearchTestCIDs []string - gossipsubTestCIDs []string -) - -// This ensures clean state between different test files (Describe blocks). -func CleanupNetworkRecords(cids []string, testName string) { - if len(cids) == 0 { - ginkgo.GinkgoWriter.Printf("No CIDs to clean up for %s", testName) - - return - } - - cleanupCLI := utils.NewCLI() - - ginkgo.GinkgoWriter.Printf("Cleaning up %d test records from %s", len(cids), testName) - - for _, cid := range cids { - if cid == "" { - continue // Skip empty CIDs - } - - // Clean up from each peer to ensure complete isolation - for _, peerAddr := range utils.PeerAddrs { - ginkgo.GinkgoWriter.Printf(" Cleaning CID %s from peer %s", cid, peerAddr) - - // Try to unpublish from routing (may fail if not published, which is okay) - _, err := cleanupCLI.Routing().Unpublish(cid).OnServer(peerAddr).Execute() - if err != nil { - ginkgo.GinkgoWriter.Printf(" Unpublish warning: %v (may not have been published)", err) - } - - // Try to delete from storage (may fail if not stored, which is okay) - _, err = cleanupCLI.Delete(cid).OnServer(peerAddr).Execute() - if err != nil { - ginkgo.GinkgoWriter.Printf(" Delete warning: %v (may not have been stored)", err) - } - } - } - - ginkgo.GinkgoWriter.Printf("Cleanup completed for %s - all peers should be clean", testName) -} - -// RegisterCIDForCleanup adds a CID to the appropriate test file's tracking array. -func RegisterCIDForCleanup(cid, testFile string) { - switch testFile { - case "deploy": - deployTestCIDs = append(deployTestCIDs, cid) - case "sync": - syncTestCIDs = append(syncTestCIDs, cid) - case "search": - remoteSearchTestCIDs = append(remoteSearchTestCIDs, cid) - case "gossipsub": - gossipsubTestCIDs = append(gossipsubTestCIDs, cid) - default: - ginkgo.GinkgoWriter.Printf("Warning: Unknown test file %s for CID %s", testFile, cid) - } -} - -// CleanupAllNetworkTests removes all CIDs from all test files (used by AfterSuite). -func CleanupAllNetworkTests() { - allCIDs := []string{} - allCIDs = append(allCIDs, deployTestCIDs...) - allCIDs = append(allCIDs, syncTestCIDs...) - allCIDs = append(allCIDs, remoteSearchTestCIDs...) - allCIDs = append(allCIDs, gossipsubTestCIDs...) - - CleanupNetworkRecords(allCIDs, "all network tests") -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package network + +import ( + "github.com/agntcy/dir/e2e/shared/utils" + "github.com/onsi/ginkgo/v2" +) + +// Package-level variables for tracking CIDs across all network tests. +var ( + deployTestCIDs []string + syncTestCIDs []string + remoteSearchTestCIDs []string + gossipsubTestCIDs []string +) + +// This ensures clean state between different test files (Describe blocks). +func CleanupNetworkRecords(cids []string, testName string) { + if len(cids) == 0 { + ginkgo.GinkgoWriter.Printf("No CIDs to clean up for %s", testName) + + return + } + + cleanupCLI := utils.NewCLI() + + ginkgo.GinkgoWriter.Printf("Cleaning up %d test records from %s", len(cids), testName) + + for _, cid := range cids { + if cid == "" { + continue // Skip empty CIDs + } + + // Clean up from each peer to ensure complete isolation + for _, peerAddr := range utils.PeerAddrs { + ginkgo.GinkgoWriter.Printf(" Cleaning CID %s from peer %s", cid, peerAddr) + + // Try to unpublish from routing (may fail if not published, which is okay) + _, err := cleanupCLI.Routing().Unpublish(cid).OnServer(peerAddr).Execute() + if err != nil { + ginkgo.GinkgoWriter.Printf(" Unpublish warning: %v (may not have been published)", err) + } + + // Try to delete from storage (may fail if not stored, which is okay) + _, err = cleanupCLI.Delete(cid).OnServer(peerAddr).Execute() + if err != nil { + ginkgo.GinkgoWriter.Printf(" Delete warning: %v (may not have been stored)", err) + } + } + } + + ginkgo.GinkgoWriter.Printf("Cleanup completed for %s - all peers should be clean", testName) +} + +// RegisterCIDForCleanup adds a CID to the appropriate test file's tracking array. +func RegisterCIDForCleanup(cid, testFile string) { + switch testFile { + case "deploy": + deployTestCIDs = append(deployTestCIDs, cid) + case "sync": + syncTestCIDs = append(syncTestCIDs, cid) + case "search": + remoteSearchTestCIDs = append(remoteSearchTestCIDs, cid) + case "gossipsub": + gossipsubTestCIDs = append(gossipsubTestCIDs, cid) + default: + ginkgo.GinkgoWriter.Printf("Warning: Unknown test file %s for CID %s", testFile, cid) + } +} + +// CleanupAllNetworkTests removes all CIDs from all test files (used by AfterSuite). +func CleanupAllNetworkTests() { + allCIDs := []string{} + allCIDs = append(allCIDs, deployTestCIDs...) + allCIDs = append(allCIDs, syncTestCIDs...) + allCIDs = append(allCIDs, remoteSearchTestCIDs...) + allCIDs = append(allCIDs, gossipsubTestCIDs...) + + CleanupNetworkRecords(allCIDs, "all network tests") +} diff --git a/e2e/network/network_suite_test.go b/e2e/network/network_suite_test.go index 2b8364584..e638aa97b 100644 --- a/e2e/network/network_suite_test.go +++ b/e2e/network/network_suite_test.go @@ -1,37 +1,37 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package network - -import ( - "testing" - - "github.com/agntcy/dir/e2e/shared/config" - ginkgo "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -var cfg *config.Config - -// CID tracking variables are now in cleanup.go - -func TestNetworkE2E(t *testing.T) { - gomega.RegisterFailHandler(ginkgo.Fail) - - var err error - - cfg, err = config.LoadConfig() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - if cfg.DeploymentMode != config.DeploymentModeNetwork { - t.Skip("Skipping network tests - not in network mode") - } - - ginkgo.RunSpecs(t, "Network E2E Test Suite") -} - -// Final safety cleanup - runs after all network tests complete. -var _ = ginkgo.AfterSuite(func() { - ginkgo.GinkgoWriter.Printf("Final network test suite cleanup (safety net)") - CleanupAllNetworkTests() -}) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package network + +import ( + "testing" + + "github.com/agntcy/dir/e2e/shared/config" + ginkgo "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" +) + +var cfg *config.Config + +// CID tracking variables are now in cleanup.go + +func TestNetworkE2E(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + + var err error + + cfg, err = config.LoadConfig() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + if cfg.DeploymentMode != config.DeploymentModeNetwork { + t.Skip("Skipping network tests - not in network mode") + } + + ginkgo.RunSpecs(t, "Network E2E Test Suite") +} + +// Final safety cleanup - runs after all network tests complete. +var _ = ginkgo.AfterSuite(func() { + ginkgo.GinkgoWriter.Printf("Final network test suite cleanup (safety net)") + CleanupAllNetworkTests() +}) diff --git a/e2e/sdk/Dockerfile b/e2e/sdk/Dockerfile index 17075734c..dca52d831 100644 --- a/e2e/sdk/Dockerfile +++ b/e2e/sdk/Dockerfile @@ -1,34 +1,34 @@ -ARG IMAGE_TAG=latest - -FROM ghcr.io/agntcy/dir-ctl:${IMAGE_TAG} AS dirctl-bin -FROM ghcr.io/sigstore/cosign/cosign:v2.4.1 AS cosign-bin - -FROM astral/uv:0.8.23-python3.12-alpine - -COPY --from=dirctl-bin /dirctl /bin/dirctl -COPY --from=cosign-bin /ko-app/cosign /bin/cosign - -ENV DIRCTL_PATH="/bin/dirctl" -ENV DIRECTORY_CLIENT_SERVER_ADDRESS="dir-apiserver.dir-server.svc.cluster.local:8888" -ENV DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH="" -ENV DIRECTORY_CLIENT_AUTH_MODE="x509" -ENV DIRECTORY_CLIENT_JWT_AUDIENCE="spiffe://dir.example/spire/server" - -WORKDIR /tmp/ -COPY ./sdk /tmp/ - -WORKDIR /tmp/dir-py - -RUN uv sync --all-packages - -RUN apk add --update --no-cache nodejs npm - -WORKDIR /tmp/dir-js -RUN npm install - -WORKDIR /tmp - -RUN printf "#!/bin/sh\n\ncd ./dir-py && uv run pytest\npy_status=\$?\ncd ..\ncd ./dir-js && npm run test\njs_status=\$?\n\nif [ \$py_status -ne 0 ] || [ \$js_status -ne 0 ]; then\n exit 1\nfi" >> entrypoint.sh && chmod +x entrypoint.sh - - -ENTRYPOINT [ "/tmp/entrypoint.sh" ] +ARG IMAGE_TAG=latest + +FROM ghcr.io/agntcy/dir-ctl:${IMAGE_TAG} AS dirctl-bin +FROM ghcr.io/sigstore/cosign/cosign:v2.4.1 AS cosign-bin + +FROM astral/uv:0.8.23-python3.12-alpine + +COPY --from=dirctl-bin /dirctl /bin/dirctl +COPY --from=cosign-bin /ko-app/cosign /bin/cosign + +ENV DIRCTL_PATH="/bin/dirctl" +ENV DIRECTORY_CLIENT_SERVER_ADDRESS="dir-apiserver.dir-server.svc.cluster.local:8888" +ENV DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH="" +ENV DIRECTORY_CLIENT_AUTH_MODE="x509" +ENV DIRECTORY_CLIENT_JWT_AUDIENCE="spiffe://dir.example/spire/server" + +WORKDIR /tmp/ +COPY ./sdk /tmp/ + +WORKDIR /tmp/dir-py + +RUN uv sync --all-packages + +RUN apk add --update --no-cache nodejs npm + +WORKDIR /tmp/dir-js +RUN npm install + +WORKDIR /tmp + +RUN printf "#!/bin/sh\n\ncd ./dir-py && uv run pytest\npy_status=\$?\ncd ..\ncd ./dir-js && npm run test\njs_status=\$?\n\nif [ \$py_status -ne 0 ] || [ \$js_status -ne 0 ]; then\n exit 1\nfi" >> entrypoint.sh && chmod +x entrypoint.sh + + +ENTRYPOINT [ "/tmp/entrypoint.sh" ] diff --git a/e2e/sdk/chart/Chart.yaml b/e2e/sdk/chart/Chart.yaml index ea3ebb79f..a74956c90 100644 --- a/e2e/sdk/chart/Chart.yaml +++ b/e2e/sdk/chart/Chart.yaml @@ -1,27 +1,27 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -apiVersion: v2 -name: sdks-test -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: '1.16.0' +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: v2 +name: sdks-test +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: '1.16.0' diff --git a/e2e/sdk/chart/templates/_helpers.tpl b/e2e/sdk/chart/templates/_helpers.tpl index 7ba5edc27..4dff5d9f2 100644 --- a/e2e/sdk/chart/templates/_helpers.tpl +++ b/e2e/sdk/chart/templates/_helpers.tpl @@ -1,62 +1,62 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "chart.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "chart.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "chart.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "chart.labels" -}} -helm.sh/chart: {{ include "chart.chart" . }} -{{ include "chart.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "chart.selectorLabels" -}} -app.kubernetes.io/name: {{ include "chart.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "chart.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "chart.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} +{{/* +Expand the name of the chart. +*/}} +{{- define "chart.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "chart.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "chart.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "chart.labels" -}} +helm.sh/chart: {{ include "chart.chart" . }} +{{ include "chart.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "chart.selectorLabels" -}} +app.kubernetes.io/name: {{ include "chart.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "chart.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "chart.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/e2e/sdk/chart/templates/clusterspiffeids.yaml b/e2e/sdk/chart/templates/clusterspiffeids.yaml index e11b8f16a..3e6e8f706 100644 --- a/e2e/sdk/chart/templates/clusterspiffeids.yaml +++ b/e2e/sdk/chart/templates/clusterspiffeids.yaml @@ -1,14 +1,14 @@ -apiVersion: spire.spiffe.io/v1alpha1 -kind: ClusterSPIFFEID -metadata: - name: sdks-test -spec: - className: dir-spire - podSelector: - matchExpressions: - - key: batch.kubernetes.io/job-name - operator: In - values: - - sdks-test - spiffeIDTemplate: {{ "spiffe://{{ .TrustDomain }}/ns/{{ .PodMeta.Namespace }}/sa/{{ .PodSpec.ServiceAccountName }}" }} +apiVersion: spire.spiffe.io/v1alpha1 +kind: ClusterSPIFFEID +metadata: + name: sdks-test +spec: + className: dir-spire + podSelector: + matchExpressions: + - key: batch.kubernetes.io/job-name + operator: In + values: + - sdks-test + spiffeIDTemplate: {{ "spiffe://{{ .TrustDomain }}/ns/{{ .PodMeta.Namespace }}/sa/{{ .PodSpec.ServiceAccountName }}" }} autoPopulateDNSNames: true \ No newline at end of file diff --git a/e2e/sdk/chart/templates/job.yaml b/e2e/sdk/chart/templates/job.yaml index 7f17c008c..7dcbd95da 100644 --- a/e2e/sdk/chart/templates/job.yaml +++ b/e2e/sdk/chart/templates/job.yaml @@ -1,31 +1,31 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: sdks-test -spec: - template: - spec: - containers: - - name: sdks-test - image: '{{ .Values.image.repository }}:{{ .Values.image.tag }}' - imagePullPolicy: Never - env: - - name: DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH - value: '{{ .Values.config.spiffe_socket_path }}' - - name: DIRECTORY_CLIENT_AUTH_MODE - value: '{{ .Values.config.auth_mode }}' - - name: DIRECTORY_CLIENT_SERVER_ADDRESS - value: '{{ .Values.config.server_address }}' - - name: DIRECTORY_CLIENT_JWT_AUDIENCE - value: '{{ .Values.config.jwt_audience }}' - volumeMounts: - - name: spire-agent-socket - mountPath: /run/spire/agent-sockets - readOnly: false - volumes: - - name: spire-agent-socket - hostPath: - path: /run/spire/agent-sockets - type: Directory - restartPolicy: Never - backoffLimit: 0 +apiVersion: batch/v1 +kind: Job +metadata: + name: sdks-test +spec: + template: + spec: + containers: + - name: sdks-test + image: '{{ .Values.image.repository }}:{{ .Values.image.tag }}' + imagePullPolicy: Never + env: + - name: DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH + value: '{{ .Values.config.spiffe_socket_path }}' + - name: DIRECTORY_CLIENT_AUTH_MODE + value: '{{ .Values.config.auth_mode }}' + - name: DIRECTORY_CLIENT_SERVER_ADDRESS + value: '{{ .Values.config.server_address }}' + - name: DIRECTORY_CLIENT_JWT_AUDIENCE + value: '{{ .Values.config.jwt_audience }}' + volumeMounts: + - name: spire-agent-socket + mountPath: /run/spire/agent-sockets + readOnly: false + volumes: + - name: spire-agent-socket + hostPath: + path: /run/spire/agent-sockets + type: Directory + restartPolicy: Never + backoffLimit: 0 diff --git a/e2e/sdk/chart/values.yaml b/e2e/sdk/chart/values.yaml index 9f8959609..d0fd215b5 100644 --- a/e2e/sdk/chart/values.yaml +++ b/e2e/sdk/chart/values.yaml @@ -1,9 +1,9 @@ -image: - repository: ghcr.io/agntcy/sdks-test - tag: latest - -config: - auth_mode: x509 - server_address: dir-apiserver.dir-server.svc.cluster.local:8888 - spiffe_socket_path: unix:/run/spire/agent-sockets/api.sock - jwt_audience: "spiffe://dir.example/spire/server" +image: + repository: ghcr.io/agntcy/sdks-test + tag: latest + +config: + auth_mode: x509 + server_address: dir-apiserver.dir-server.svc.cluster.local:8888 + spiffe_socket_path: unix:/run/spire/agent-sockets/api.sock + jwt_audience: "spiffe://dir.example/spire/server" diff --git a/e2e/shared/config/config.go b/e2e/shared/config/config.go index 0dba06658..c62d05568 100644 --- a/e2e/shared/config/config.go +++ b/e2e/shared/config/config.go @@ -1,57 +1,57 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "fmt" - "strings" - - "github.com/mitchellh/mapstructure" - "github.com/spf13/viper" -) - -type DeploymentMode string - -const ( - DeploymentModeLocal DeploymentMode = "local" - DeploymentModeNetwork DeploymentMode = "network" -) - -const ( - DefaultEnvPrefix = "DIRECTORY_E2E" - - DefaultDeploymentMode = DeploymentModeLocal -) - -type Config struct { - DeploymentMode DeploymentMode `json:"deployment_mode,omitempty" mapstructure:"deployment_mode"` -} - -func LoadConfig() (*Config, error) { - v := viper.NewWithOptions( - viper.KeyDelimiter("."), - viper.EnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")), - ) - - v.SetEnvPrefix(DefaultEnvPrefix) - v.AllowEmptyEnv(true) - v.AutomaticEnv() - - _ = v.BindEnv("deployment_mode") - v.SetDefault("deployment_mode", DefaultDeploymentMode) - - // Load configuration into struct - decodeHooks := mapstructure.ComposeDecodeHookFunc( - mapstructure.TextUnmarshallerHookFunc(), - mapstructure.StringToTimeDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - ) - - config := &Config{} - if err := v.Unmarshal(config, viper.DecodeHook(decodeHooks)); err != nil { - return nil, fmt.Errorf("failed to load configuration: %w", err) - } - - return config, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "fmt" + "strings" + + "github.com/mitchellh/mapstructure" + "github.com/spf13/viper" +) + +type DeploymentMode string + +const ( + DeploymentModeLocal DeploymentMode = "local" + DeploymentModeNetwork DeploymentMode = "network" +) + +const ( + DefaultEnvPrefix = "DIRECTORY_E2E" + + DefaultDeploymentMode = DeploymentModeLocal +) + +type Config struct { + DeploymentMode DeploymentMode `json:"deployment_mode,omitempty" mapstructure:"deployment_mode"` +} + +func LoadConfig() (*Config, error) { + v := viper.NewWithOptions( + viper.KeyDelimiter("."), + viper.EnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")), + ) + + v.SetEnvPrefix(DefaultEnvPrefix) + v.AllowEmptyEnv(true) + v.AutomaticEnv() + + _ = v.BindEnv("deployment_mode") + v.SetDefault("deployment_mode", DefaultDeploymentMode) + + // Load configuration into struct + decodeHooks := mapstructure.ComposeDecodeHookFunc( + mapstructure.TextUnmarshallerHookFunc(), + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + ) + + config := &Config{} + if err := v.Unmarshal(config, viper.DecodeHook(decodeHooks)); err != nil { + return nil, fmt.Errorf("failed to load configuration: %w", err) + } + + return config, nil +} diff --git a/e2e/shared/testdata/embed.go b/e2e/shared/testdata/embed.go index 8b298ede7..28e486529 100644 --- a/e2e/shared/testdata/embed.go +++ b/e2e/shared/testdata/embed.go @@ -1,27 +1,27 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package testdata - -import _ "embed" - -// Embedded test data files used across multiple test suites. -// This centralizes all test data to avoid duplication and ensure consistency. - -//go:embed record_031.json -var ExpectedRecordV031JSON []byte - -//go:embed record_070.json -var ExpectedRecordV070JSON []byte - -//go:embed record_080.json -var ExpectedRecordV080JSON []byte - -//go:embed record_070_sync_v4.json -var ExpectedRecordV070SyncV4JSON []byte - -//go:embed record_070_sync_v5.json -var ExpectedRecordV070SyncV5JSON []byte - -//go:embed record_warnings_080.json -var ExpectedRecordWarningsV080JSON []byte +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package testdata + +import _ "embed" + +// Embedded test data files used across multiple test suites. +// This centralizes all test data to avoid duplication and ensure consistency. + +//go:embed record_031.json +var ExpectedRecordV031JSON []byte + +//go:embed record_070.json +var ExpectedRecordV070JSON []byte + +//go:embed record_080.json +var ExpectedRecordV080JSON []byte + +//go:embed record_070_sync_v4.json +var ExpectedRecordV070SyncV4JSON []byte + +//go:embed record_070_sync_v5.json +var ExpectedRecordV070SyncV5JSON []byte + +//go:embed record_warnings_080.json +var ExpectedRecordWarningsV080JSON []byte diff --git a/e2e/shared/testdata/record_031.json b/e2e/shared/testdata/record_031.json index 9fd033b33..78d6dadcb 100644 --- a/e2e/shared/testdata/record_031.json +++ b/e2e/shared/testdata/record_031.json @@ -1,45 +1,45 @@ -{ - "name": "directory.agntcy.org/cisco/marketing-strategy-v1", - "version": "v1.0.0", - "schema_version": "0.3.1", - "description": "Research agent for Cisco's marketing strategy.", - "authors": [ - "Cisco Systems" - ], - "created_at": "2025-03-19T17:06:37Z", - "annotations": { - "key": "value" - }, - "skills": [ - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Text Completion", - "class_uid": 10201 - }, - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Problem Solving", - "class_uid": 10702 - } - ], - "locators": [ - { - "type": "docker-image", - "url": "https://ghcr.io/agntcy/marketing-strategy" - } - ], - "signature": { - "algorithm": "ES256", - "certificate": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0t", - "content_bundle": "eyJ0ZXN0IjogInZhbHVlIn0=", - "content_type": "application/json", - "signature": "MEUCIQDTest123Signature456789", - "signed_at": "2025-09-11T10:00:00Z", - "annotations": { - "signer": "test-authority", - "purpose": "testing" - } - } -} +{ + "name": "directory.agntcy.org/cisco/marketing-strategy-v1", + "version": "v1.0.0", + "schema_version": "0.3.1", + "description": "Research agent for Cisco's marketing strategy.", + "authors": [ + "Cisco Systems" + ], + "created_at": "2025-03-19T17:06:37Z", + "annotations": { + "key": "value" + }, + "skills": [ + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Text Completion", + "class_uid": 10201 + }, + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Problem Solving", + "class_uid": 10702 + } + ], + "locators": [ + { + "type": "docker-image", + "url": "https://ghcr.io/agntcy/marketing-strategy" + } + ], + "signature": { + "algorithm": "ES256", + "certificate": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0t", + "content_bundle": "eyJ0ZXN0IjogInZhbHVlIn0=", + "content_type": "application/json", + "signature": "MEUCIQDTest123Signature456789", + "signed_at": "2025-09-11T10:00:00Z", + "annotations": { + "signer": "test-authority", + "purpose": "testing" + } + } +} diff --git a/e2e/shared/testdata/record_070.json b/e2e/shared/testdata/record_070.json index a2f467ca8..833f05b8f 100644 --- a/e2e/shared/testdata/record_070.json +++ b/e2e/shared/testdata/record_070.json @@ -1,63 +1,63 @@ -{ - "name": "directory.agntcy.org/cisco/marketing-strategy-v3", - "version": "v3.0.0", - "schema_version": "0.7.0", - "description": "Research agent for Cisco's marketing strategy.", - "authors": [ - "Cisco Systems" - ], - "created_at": "2025-03-19T17:06:37Z", - "annotations": { - "key": "value" - }, - "skills": [ - { - "name": "natural_language_processing/natural_language_generation/text_completion", - "id": 10201 - }, - { - "name": "natural_language_processing/analytical_reasoning/problem_solving", - "id": 10702 - } - ], - "locators": [ - { - "type": "docker_image", - "url": "https://ghcr.io/agntcy/marketing-strategy" - } - ], - "domains": [ - { - "id": 301, - "name": "life_science/biotechnology" - } - ], - "modules": [ - { - "name": "runtime/model", - "id": 303, - "data": { - "models": [ - { - "provider": "openai", - "model": "gpt-4", - "api_base": "https://api.openai.com/v1", - "env_vars": [ - { - "name": "OPENAI_API_KEY", - "description": "OpenAI API key for authentication", - "required": true - }, - { - "name": "OPENAI_ORG_ID", - "description": "OpenAI organization ID", - "required": false, - "default_value": "" - } - ] - } - ] - } - } - ] -} +{ + "name": "directory.agntcy.org/cisco/marketing-strategy-v3", + "version": "v3.0.0", + "schema_version": "0.7.0", + "description": "Research agent for Cisco's marketing strategy.", + "authors": [ + "Cisco Systems" + ], + "created_at": "2025-03-19T17:06:37Z", + "annotations": { + "key": "value" + }, + "skills": [ + { + "name": "natural_language_processing/natural_language_generation/text_completion", + "id": 10201 + }, + { + "name": "natural_language_processing/analytical_reasoning/problem_solving", + "id": 10702 + } + ], + "locators": [ + { + "type": "docker_image", + "url": "https://ghcr.io/agntcy/marketing-strategy" + } + ], + "domains": [ + { + "id": 301, + "name": "life_science/biotechnology" + } + ], + "modules": [ + { + "name": "runtime/model", + "id": 303, + "data": { + "models": [ + { + "provider": "openai", + "model": "gpt-4", + "api_base": "https://api.openai.com/v1", + "env_vars": [ + { + "name": "OPENAI_API_KEY", + "description": "OpenAI API key for authentication", + "required": true + }, + { + "name": "OPENAI_ORG_ID", + "description": "OpenAI organization ID", + "required": false, + "default_value": "" + } + ] + } + ] + } + } + ] +} diff --git a/e2e/shared/testdata/record_070_sync_v4.json b/e2e/shared/testdata/record_070_sync_v4.json index d50f8c397..3e5693ea1 100644 --- a/e2e/shared/testdata/record_070_sync_v4.json +++ b/e2e/shared/testdata/record_070_sync_v4.json @@ -1,62 +1,62 @@ -{ - "name": "directory.agntcy.org/cisco/marketing-strategy-v4", - "version": "v4.0.0", - "schema_version": "0.7.0", - "description": "Research agent for Cisco's marketing strategy.", - "authors": [ - "Cisco Systems" - ], - "created_at": "2025-03-19T17:06:37Z", - "annotations": { - "key": "value" - }, - "skills": [ - { - "name": "natural_language_processing/natural_language_generation/text_completion", - "id": 10201 - }, - { - "name": "natural_language_processing/analytical_reasoning/problem_solving", - "id": 10702 - } - ], - "locators": [ - { - "type": "docker_image", - "url": "https://ghcr.io/agntcy/marketing-strategy" - } - ], - "domains": [ - { - "name": "life_science/biotechnology" - } - ], - "modules": [ - { - "name": "runtime/model", - "id": 303, - "data": { - "models": [ - { - "provider": "openai", - "model": "gpt-4", - "api_base": "https://api.openai.com/v1", - "env_vars": [ - { - "name": "OPENAI_API_KEY", - "description": "OpenAI API key for authentication", - "required": true - }, - { - "name": "OPENAI_ORG_ID", - "description": "OpenAI organization ID", - "required": false, - "default_value": "" - } - ] - } - ] - } - } - ] -} +{ + "name": "directory.agntcy.org/cisco/marketing-strategy-v4", + "version": "v4.0.0", + "schema_version": "0.7.0", + "description": "Research agent for Cisco's marketing strategy.", + "authors": [ + "Cisco Systems" + ], + "created_at": "2025-03-19T17:06:37Z", + "annotations": { + "key": "value" + }, + "skills": [ + { + "name": "natural_language_processing/natural_language_generation/text_completion", + "id": 10201 + }, + { + "name": "natural_language_processing/analytical_reasoning/problem_solving", + "id": 10702 + } + ], + "locators": [ + { + "type": "docker_image", + "url": "https://ghcr.io/agntcy/marketing-strategy" + } + ], + "domains": [ + { + "name": "life_science/biotechnology" + } + ], + "modules": [ + { + "name": "runtime/model", + "id": 303, + "data": { + "models": [ + { + "provider": "openai", + "model": "gpt-4", + "api_base": "https://api.openai.com/v1", + "env_vars": [ + { + "name": "OPENAI_API_KEY", + "description": "OpenAI API key for authentication", + "required": true + }, + { + "name": "OPENAI_ORG_ID", + "description": "OpenAI organization ID", + "required": false, + "default_value": "" + } + ] + } + ] + } + } + ] +} diff --git a/e2e/shared/testdata/record_070_sync_v5.json b/e2e/shared/testdata/record_070_sync_v5.json index b503c4479..4da8ffed1 100644 --- a/e2e/shared/testdata/record_070_sync_v5.json +++ b/e2e/shared/testdata/record_070_sync_v5.json @@ -1,58 +1,58 @@ -{ - "name": "directory.agntcy.org/cisco/marketing-strategy-v5", - "version": "v5.0.0", - "schema_version": "0.7.0", - "description": "Research agent for Cisco's marketing strategy.", - "authors": [ - "Cisco Systems" - ], - "created_at": "2025-03-19T17:06:37Z", - "annotations": { - "key": "value" - }, - "skills": [ - { - "name": "audio/audio_classification", - "id": 301 - } - ], - "locators": [ - { - "type": "docker_image", - "url": "https://ghcr.io/agntcy/marketing-strategy" - } - ], - "domains": [ - { - "name": "life_science/biotechnology" - } - ], - "modules": [ - { - "name": "runtime/model", - "id": 303, - "data": { - "models": [ - { - "provider": "openai", - "model": "gpt-4", - "api_base": "https://api.openai.com/v1", - "env_vars": [ - { - "name": "OPENAI_API_KEY", - "description": "OpenAI API key for authentication", - "required": true - }, - { - "name": "OPENAI_ORG_ID", - "description": "OpenAI organization ID", - "required": false, - "default_value": "" - } - ] - } - ] - } - } - ] -} +{ + "name": "directory.agntcy.org/cisco/marketing-strategy-v5", + "version": "v5.0.0", + "schema_version": "0.7.0", + "description": "Research agent for Cisco's marketing strategy.", + "authors": [ + "Cisco Systems" + ], + "created_at": "2025-03-19T17:06:37Z", + "annotations": { + "key": "value" + }, + "skills": [ + { + "name": "audio/audio_classification", + "id": 301 + } + ], + "locators": [ + { + "type": "docker_image", + "url": "https://ghcr.io/agntcy/marketing-strategy" + } + ], + "domains": [ + { + "name": "life_science/biotechnology" + } + ], + "modules": [ + { + "name": "runtime/model", + "id": 303, + "data": { + "models": [ + { + "provider": "openai", + "model": "gpt-4", + "api_base": "https://api.openai.com/v1", + "env_vars": [ + { + "name": "OPENAI_API_KEY", + "description": "OpenAI API key for authentication", + "required": true + }, + { + "name": "OPENAI_ORG_ID", + "description": "OpenAI organization ID", + "required": false, + "default_value": "" + } + ] + } + ] + } + } + ] +} diff --git a/e2e/shared/testdata/record_080.json b/e2e/shared/testdata/record_080.json index 9cf5b390e..bb1ba55f1 100644 --- a/e2e/shared/testdata/record_080.json +++ b/e2e/shared/testdata/record_080.json @@ -1,63 +1,63 @@ -{ - "name": "directory.agntcy.org/example/research-assistant-v4", - "version": "v4.0.0", - "schema_version": "0.8.0", - "description": "AI research assistant for data analysis and problem solving.", - "authors": [ - "AGNTCY Contributors" - ], - "created_at": "2025-03-19T17:06:37Z", - "annotations": { - "key": "value" - }, - "skills": [ - { - "name": "natural_language_processing/natural_language_generation/text_completion", - "id": 10201 - }, - { - "name": "natural_language_processing/analytical_reasoning/problem_solving", - "id": 10702 - } - ], - "locators": [ - { - "type": "docker_image", - "url": "https://ghcr.io/agntcy/research-assistant" - } - ], - "domains": [ - { - "id": 301, - "name": "life_science/biotechnology" - } - ], - "modules": [ - { - "name": "core/llm/model", - "id": 10201, - "data": { - "models": [ - { - "provider": "openai", - "model": "gpt-4", - "api_base": "https://api.openai.com/v1", - "env_vars": [ - { - "name": "OPENAI_API_KEY", - "description": "OpenAI API key for authentication", - "required": true - }, - { - "name": "OPENAI_ORG_ID", - "description": "OpenAI organization ID", - "required": false, - "default_value": "" - } - ] - } - ] - } - } - ] -} +{ + "name": "directory.agntcy.org/example/research-assistant-v4", + "version": "v4.0.0", + "schema_version": "0.8.0", + "description": "AI research assistant for data analysis and problem solving.", + "authors": [ + "AGNTCY Contributors" + ], + "created_at": "2025-03-19T17:06:37Z", + "annotations": { + "key": "value" + }, + "skills": [ + { + "name": "natural_language_processing/natural_language_generation/text_completion", + "id": 10201 + }, + { + "name": "natural_language_processing/analytical_reasoning/problem_solving", + "id": 10702 + } + ], + "locators": [ + { + "type": "docker_image", + "url": "https://ghcr.io/agntcy/research-assistant" + } + ], + "domains": [ + { + "id": 301, + "name": "life_science/biotechnology" + } + ], + "modules": [ + { + "name": "core/llm/model", + "id": 10201, + "data": { + "models": [ + { + "provider": "openai", + "model": "gpt-4", + "api_base": "https://api.openai.com/v1", + "env_vars": [ + { + "name": "OPENAI_API_KEY", + "description": "OpenAI API key for authentication", + "required": true + }, + { + "name": "OPENAI_ORG_ID", + "description": "OpenAI organization ID", + "required": false, + "default_value": "" + } + ] + } + ] + } + } + ] +} diff --git a/e2e/shared/testdata/record_warnings_080.json b/e2e/shared/testdata/record_warnings_080.json index 7b6fc7678..69eebb878 100644 --- a/e2e/shared/testdata/record_warnings_080.json +++ b/e2e/shared/testdata/record_warnings_080.json @@ -1,58 +1,58 @@ -{ - "name": "directory.agntcy.org/example/research-assistant-v4", - "version": "v4.0.0", - "schema_version": "0.8.0", - "description": "AI research assistant for data analysis and problem solving.", - "authors": [ - "AGNTCY Contributors" - ], - "created_at": "2025-03-19T17:06:37Z", - "annotations": { - "key": "value" - }, - "skills": [ - { - "name": "natural_language_processing/natural_language_generation/text_completion", - "id": 10201 - }, - { - "name": "natural_language_processing/analytical_reasoning/problem_solving", - "id": 10702 - } - ], - "locators": [ - { - "type": "docker_image", - "url": "https://ghcr.io/agntcy/research-assistant" - } - ], - "domains": [ - { - "id": 301, - "name": "life_science/biotechnology" - } - ], - "modules": [ - { - "name": "license", - "data": { - "header": "Copyright (c) 2025 Cisco and/or its affiliates.", - "license": "Apache-2.0" - } - }, - { - "name": "runtime/framework", - "data": { - "name": "crewai", - "version": "0.55.2" - } - }, - { - "name": "runtime/language", - "data": { - "type": "python", - "version": ">=3.11,<3.13" - } - } - ] -} +{ + "name": "directory.agntcy.org/example/research-assistant-v4", + "version": "v4.0.0", + "schema_version": "0.8.0", + "description": "AI research assistant for data analysis and problem solving.", + "authors": [ + "AGNTCY Contributors" + ], + "created_at": "2025-03-19T17:06:37Z", + "annotations": { + "key": "value" + }, + "skills": [ + { + "name": "natural_language_processing/natural_language_generation/text_completion", + "id": 10201 + }, + { + "name": "natural_language_processing/analytical_reasoning/problem_solving", + "id": 10702 + } + ], + "locators": [ + { + "type": "docker_image", + "url": "https://ghcr.io/agntcy/research-assistant" + } + ], + "domains": [ + { + "id": 301, + "name": "life_science/biotechnology" + } + ], + "modules": [ + { + "name": "license", + "data": { + "header": "Copyright (c) 2025 Cisco and/or its affiliates.", + "license": "Apache-2.0" + } + }, + { + "name": "runtime/framework", + "data": { + "name": "crewai", + "version": "0.55.2" + } + }, + { + "name": "runtime/language", + "data": { + "type": "python", + "version": ">=3.11,<3.13" + } + } + ] +} diff --git a/e2e/shared/utils/cid.go b/e2e/shared/utils/cid.go index 554230ea7..96b0307a0 100644 --- a/e2e/shared/utils/cid.go +++ b/e2e/shared/utils/cid.go @@ -1,121 +1,121 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import ( - "encoding/json" - "errors" - "fmt" - "os" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/onsi/gomega" -) - -// ValidateCID validates that a string is a properly formatted CID. -// Uses the ConvertCIDToDigest function to ensure the CID can be decoded successfully. -func ValidateCID(cidString string) { - gomega.Expect(cidString).NotTo(gomega.BeEmpty(), "CID should not be empty") - - // Attempt to convert CID to digest - this validates the CID format - _, err := corev1.ConvertCIDToDigest(cidString) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "CID should be valid and decodable") -} - -// ValidateCIDFormat validates CID format and returns whether it's valid. -// This is a non-assertion version for conditional logic. -func ValidateCIDFormat(cidString string) bool { - if cidString == "" { - return false - } - - _, err := corev1.ConvertCIDToDigest(cidString) - - return err == nil -} - -// MarshalOASFCanonical marshals OASF JSON data using canonical JSON serialization. -// This ensures deterministic, cross-language compatible byte representation. -// Mirrors the logic from api/core/v1/oasf.go marshalOASFCanonical function. -func MarshalOASFCanonical(data []byte) ([]byte, error) { - if len(data) == 0 { - return nil, errors.New("cannot marshal empty data") - } - - // Step 1: Parse the JSON to ensure it's valid - var normalized interface{} - if err := json.Unmarshal(data, &normalized); err != nil { - return nil, fmt.Errorf("failed to parse JSON for canonical marshaling: %w", err) - } - - // Step 2: Marshal with sorted keys for deterministic output - // encoding/json.Marshal sorts map keys alphabetically - canonicalBytes, err := json.Marshal(normalized) - if err != nil { - return nil, fmt.Errorf("failed to marshal canonical JSON with sorted keys: %w", err) - } - - return canonicalBytes, nil -} - -// ValidateCIDAgainstData validates that a CID string correctly represents the given data. -// This performs comprehensive validation: format check + data integrity verification. -func ValidateCIDAgainstData(cidString string, originalData []byte) { - // First validate CID format - ValidateCID(cidString) - - expectedCID := CalculateCIDFromData(originalData) - gomega.Expect(expectedCID).NotTo(gomega.BeEmpty(), "Should be able to calculate CID from data") - - // Verify the CID matches what we expect - gomega.Expect(cidString).To(gomega.Equal(expectedCID), - "CID should match the calculated CID for the canonical data") -} - -// LoadAndValidateCID loads a JSON file, canonicalizes it, and validates the CID represents that data. -// This is a convenience function for test files. -func LoadAndValidateCID(cidString string, filePath string) { - // Load the file - data, err := os.ReadFile(filePath) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Should be able to read file "+filePath) - - // Validate CID against the file data - ValidateCIDAgainstData(cidString, data) -} - -// CalculateCIDFromFile calculates the CID for the record file. -func CalculateCIDFromFile(filePath string) string { - // Load the file - data, err := os.ReadFile(filePath) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Should be able to read file "+filePath) - - // Calculate the CID - return CalculateCIDFromData(data) -} - -// CalculateCIDFromData calculates the CID for the record data. -func CalculateCIDFromData(data []byte) string { - // Canonicalize the original data - canonicalData, err := MarshalOASFCanonical(data) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Should be able to canonicalize data") - - // Calculate what the CID should be for this canonical data - digest, err := corev1.CalculateDigest(canonicalData) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Should be able to calculate digest") - - cid, err := corev1.ConvertDigestToCID(digest) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Should be able to convert digest to CID") - - return cid -} - -// ValidateCIDPrefix validates that a CID starts with expected prefixes. -// CIDv1 with codec 1 typically starts with "baf" for base32 encoding. -func ValidateCIDPrefix(cidString string) { - ValidateCID(cidString) // First ensure it's a valid CID - - // CIDv1 with base32 encoding typically starts with "baf" - gomega.Expect(cidString).To(gomega.HavePrefix("baf"), - "CID should start with 'baf' prefix for CIDv1 base32 encoding") -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "encoding/json" + "errors" + "fmt" + "os" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/onsi/gomega" +) + +// ValidateCID validates that a string is a properly formatted CID. +// Uses the ConvertCIDToDigest function to ensure the CID can be decoded successfully. +func ValidateCID(cidString string) { + gomega.Expect(cidString).NotTo(gomega.BeEmpty(), "CID should not be empty") + + // Attempt to convert CID to digest - this validates the CID format + _, err := corev1.ConvertCIDToDigest(cidString) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "CID should be valid and decodable") +} + +// ValidateCIDFormat validates CID format and returns whether it's valid. +// This is a non-assertion version for conditional logic. +func ValidateCIDFormat(cidString string) bool { + if cidString == "" { + return false + } + + _, err := corev1.ConvertCIDToDigest(cidString) + + return err == nil +} + +// MarshalOASFCanonical marshals OASF JSON data using canonical JSON serialization. +// This ensures deterministic, cross-language compatible byte representation. +// Mirrors the logic from api/core/v1/oasf.go marshalOASFCanonical function. +func MarshalOASFCanonical(data []byte) ([]byte, error) { + if len(data) == 0 { + return nil, errors.New("cannot marshal empty data") + } + + // Step 1: Parse the JSON to ensure it's valid + var normalized interface{} + if err := json.Unmarshal(data, &normalized); err != nil { + return nil, fmt.Errorf("failed to parse JSON for canonical marshaling: %w", err) + } + + // Step 2: Marshal with sorted keys for deterministic output + // encoding/json.Marshal sorts map keys alphabetically + canonicalBytes, err := json.Marshal(normalized) + if err != nil { + return nil, fmt.Errorf("failed to marshal canonical JSON with sorted keys: %w", err) + } + + return canonicalBytes, nil +} + +// ValidateCIDAgainstData validates that a CID string correctly represents the given data. +// This performs comprehensive validation: format check + data integrity verification. +func ValidateCIDAgainstData(cidString string, originalData []byte) { + // First validate CID format + ValidateCID(cidString) + + expectedCID := CalculateCIDFromData(originalData) + gomega.Expect(expectedCID).NotTo(gomega.BeEmpty(), "Should be able to calculate CID from data") + + // Verify the CID matches what we expect + gomega.Expect(cidString).To(gomega.Equal(expectedCID), + "CID should match the calculated CID for the canonical data") +} + +// LoadAndValidateCID loads a JSON file, canonicalizes it, and validates the CID represents that data. +// This is a convenience function for test files. +func LoadAndValidateCID(cidString string, filePath string) { + // Load the file + data, err := os.ReadFile(filePath) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Should be able to read file "+filePath) + + // Validate CID against the file data + ValidateCIDAgainstData(cidString, data) +} + +// CalculateCIDFromFile calculates the CID for the record file. +func CalculateCIDFromFile(filePath string) string { + // Load the file + data, err := os.ReadFile(filePath) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Should be able to read file "+filePath) + + // Calculate the CID + return CalculateCIDFromData(data) +} + +// CalculateCIDFromData calculates the CID for the record data. +func CalculateCIDFromData(data []byte) string { + // Canonicalize the original data + canonicalData, err := MarshalOASFCanonical(data) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Should be able to canonicalize data") + + // Calculate what the CID should be for this canonical data + digest, err := corev1.CalculateDigest(canonicalData) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Should be able to calculate digest") + + cid, err := corev1.ConvertDigestToCID(digest) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Should be able to convert digest to CID") + + return cid +} + +// ValidateCIDPrefix validates that a CID starts with expected prefixes. +// CIDv1 with codec 1 typically starts with "baf" for base32 encoding. +func ValidateCIDPrefix(cidString string) { + ValidateCID(cidString) // First ensure it's a valid CID + + // CIDv1 with base32 encoding typically starts with "baf" + gomega.Expect(cidString).To(gomega.HavePrefix("baf"), + "CID should start with 'baf' prefix for CIDv1 base32 encoding") +} diff --git a/e2e/shared/utils/cli.go b/e2e/shared/utils/cli.go index 96eb64ea1..438abe933 100644 --- a/e2e/shared/utils/cli.go +++ b/e2e/shared/utils/cli.go @@ -1,714 +1,714 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import ( - "bytes" - "fmt" - "strconv" - "strings" - "time" - - clicmd "github.com/agntcy/dir/cli/cmd" - "github.com/onsi/gomega" -) - -const ( - // DefaultCommandTimeout is the default timeout for CLI command execution. - DefaultCommandTimeout = 30 * time.Second - // PollingInterval is the interval for Eventually polling operations. - PollingInterval = 5 * time.Second - // PublishProcessingDelay is the delay to allow asynchronous publish operations to complete. - PublishProcessingDelay = 15 * time.Second -) - -// CLI provides a fluent interface for executing CLI commands in tests. -type CLI struct{} - -// NewCLI creates a new CLI test helper. -func NewCLI() *CLI { - return &CLI{} -} - -// Command creates a new command builder. -func (c *CLI) Command(name string) *CommandBuilder { - return &CommandBuilder{ - command: name, - timeout: DefaultCommandTimeout, - } -} - -// Convenience methods for common commands. -func (c *CLI) Push(path string) *CommandBuilder { - return c.Command("push").WithArgs(path) -} - -func (c *CLI) Pull(cid string) *CommandBuilder { - return c.Command("pull").WithArgs(cid) -} - -func (c *CLI) Delete(cid string) *CommandBuilder { - return c.Command("delete").WithArgs(cid) -} - -func (c *CLI) Search() *SearchBuilder { - return &SearchBuilder{ - CommandBuilder: c.Command("search"), - format: "cid", // Default to cid format - names: []string{}, - versions: []string{}, - skillIDs: []string{}, - skillNames: []string{}, - locators: []string{}, - modules: []string{}, - domainIDs: []string{}, - domains: []string{}, - createdAts: []string{}, - authors: []string{}, - schemaVersions: []string{}, - moduleIDs: []string{}, - outputFormatArgs: []string{}, - limit: 0, - offset: 0, - } -} - -func (c *CLI) SearchRecords() *SearchBuilder { - return &SearchBuilder{ - CommandBuilder: c.Command("search"), - format: "record", // Use record format for full records - names: []string{}, - versions: []string{}, - skillIDs: []string{}, - skillNames: []string{}, - locators: []string{}, - modules: []string{}, - domainIDs: []string{}, - domains: []string{}, - createdAts: []string{}, - authors: []string{}, - schemaVersions: []string{}, - moduleIDs: []string{}, - outputFormatArgs: []string{}, - limit: 0, - offset: 0, - } -} - -func (c *CLI) Sign(recordCID, keyPath string) *CommandBuilder { - return c.Command("sign").WithArgs(recordCID, "--key", keyPath) -} - -// Routing commands - all routing operations are now under the routing subcommand. -func (c *CLI) Routing() *RoutingCommands { - return &RoutingCommands{cli: c} -} - -type RoutingCommands struct { - cli *CLI -} - -func (r *RoutingCommands) Publish(cid string) *CommandBuilder { - return r.cli.Command("routing").WithArgs("publish", cid) -} - -func (r *RoutingCommands) Unpublish(cid string) *CommandBuilder { - return r.cli.Command("routing").WithArgs("unpublish", cid) -} - -func (r *RoutingCommands) List() *RoutingListBuilder { - return &RoutingListBuilder{ - CommandBuilder: r.cli.Command("routing").WithArgs("list"), - } -} - -func (r *RoutingCommands) Search() *RoutingSearchBuilder { - return &RoutingSearchBuilder{ - CommandBuilder: r.cli.Command("routing").WithArgs("search"), - } -} - -func (r *RoutingCommands) Info() *CommandBuilder { - return r.cli.Command("routing").WithArgs("info") -} - -func (r *RoutingCommands) WithArgs(args ...string) *CommandBuilder { - return r.cli.Command("routing").WithArgs(args...) -} - -func (c *CLI) Verify(recordCID string) *CommandBuilder { - return c.Command("verify").WithArgs(recordCID) -} - -// Network commands. -func (c *CLI) Network() *NetworkCommands { - return &NetworkCommands{cli: c} -} - -type NetworkCommands struct { - cli *CLI -} - -func (n *NetworkCommands) Info(keyPath string) *CommandBuilder { - return n.cli.Command("network").WithArgs("info", keyPath) -} - -func (n *NetworkCommands) Init() *CommandBuilder { - return n.cli.Command("network").WithArgs("init") -} - -// Sync commands. -func (c *CLI) Sync() *SyncCommands { - return &SyncCommands{cli: c} -} - -type SyncCommands struct { - cli *CLI -} - -func (s *SyncCommands) Create(url string) *CommandBuilder { - return s.cli.Command("sync").WithArgs("create", url) -} - -func (s *SyncCommands) CreateFromStdin(input string) *StdinCommandBuilder { - return &StdinCommandBuilder{ - CommandBuilder: s.cli.Command("sync").WithArgs("create", "--stdin"), - stdinInput: input, - } -} - -func (s *SyncCommands) List() *CommandBuilder { - return s.cli.Command("sync").WithArgs("list") -} - -func (s *SyncCommands) Status(syncID string) *CommandBuilder { - return s.cli.Command("sync").WithArgs("status", syncID) -} - -func (s *SyncCommands) Delete(syncID string) *CommandBuilder { - return s.cli.Command("sync").WithArgs("delete", syncID) -} - -// CommandBuilder provides a fluent interface for building and executing commands. -type CommandBuilder struct { - command string - args []string - serverAddr string - expectErr bool - timeout time.Duration - outputFile string - suppressErr bool -} - -// StdinCommandBuilder extends CommandBuilder to handle stdin input. -type StdinCommandBuilder struct { - *CommandBuilder - stdinInput string -} - -// OnServer sets the server address for StdinCommandBuilder. -func (s *StdinCommandBuilder) OnServer(addr string) *StdinCommandBuilder { - s.CommandBuilder.OnServer(addr) - - return s -} - -// WithTimeout sets the timeout for StdinCommandBuilder. -func (s *StdinCommandBuilder) WithTimeout(timeout time.Duration) *StdinCommandBuilder { - s.CommandBuilder.WithTimeout(timeout) - - return s -} - -// Execute runs the command with stdin input and returns output and error. -func (s *StdinCommandBuilder) Execute() (string, error) { - args := append([]string{s.command}, s.args...) - - if s.serverAddr != "" { - args = append(args, "--server-addr", s.serverAddr) - } - - if s.outputFile != "" { - args = append(args, "--output", s.outputFile) - } - - var outputBuffer bytes.Buffer - - var errorBuffer bytes.Buffer - - cmd := clicmd.RootCmd - - // Store original stdin to restore later - originalIn := cmd.InOrStdin() - - cmd.SetOut(&outputBuffer) - - if s.suppressErr { - cmd.SetErr(&errorBuffer) // Capture stderr to suppress it - } - - // Set stdin input - cmd.SetIn(strings.NewReader(s.stdinInput)) - cmd.SetArgs(args) - - err := cmd.Execute() - output := strings.TrimSpace(outputBuffer.String()) - - // Restore original stdin - cmd.SetIn(originalIn) - - if err != nil { - return output, fmt.Errorf("command execution failed: %w", err) - } - - return output, nil -} - -// ShouldSucceed executes the command with stdin and expects success. -func (s *StdinCommandBuilder) ShouldSucceed() string { - output, err := s.Execute() - gomega.Expect(err).NotTo(gomega.HaveOccurred(), - fmt.Sprintf("Command '%s %s' should succeed", s.command, strings.Join(s.args, " "))) - - return output -} - -func (c *CommandBuilder) WithArgs(args ...string) *CommandBuilder { - c.args = append(c.args, args...) - - return c -} - -func (c *CommandBuilder) OnServer(addr string) *CommandBuilder { - c.serverAddr = addr - - return c -} - -func (c *CommandBuilder) WithTimeout(timeout time.Duration) *CommandBuilder { - c.timeout = timeout - - return c -} - -func (c *CommandBuilder) WithOutput(path string) *CommandBuilder { - c.outputFile = path - - return c -} - -func (c *CommandBuilder) ExpectError() *CommandBuilder { - c.expectErr = true - - return c -} - -func (c *CommandBuilder) SuppressStderr() *CommandBuilder { - c.suppressErr = true - - return c -} - -// Execute runs the command and returns output and error. -func (c *CommandBuilder) Execute() (string, error) { - args := append([]string{c.command}, c.args...) - - if c.serverAddr != "" { - args = append(args, "--server-addr", c.serverAddr) - } - - if c.outputFile != "" { - args = append(args, "--output", c.outputFile) - } - - var outputBuffer bytes.Buffer - - var errorBuffer bytes.Buffer - - cmd := clicmd.RootCmd - cmd.SetOut(&outputBuffer) - - if c.suppressErr { - cmd.SetErr(&errorBuffer) // Capture stderr to suppress it - } - - cmd.SetArgs(args) - - err := cmd.Execute() - output := strings.TrimSpace(outputBuffer.String()) - - if err != nil { - return output, fmt.Errorf("command execution failed: %w", err) - } - - return output, nil -} - -// ShouldSucceed executes the command and expects success. -func (c *CommandBuilder) ShouldSucceed() string { - output, err := c.Execute() - gomega.Expect(err).NotTo(gomega.HaveOccurred(), - fmt.Sprintf("Command '%s %s' should succeed", c.command, strings.Join(c.args, " "))) - - return output -} - -// ShouldFail executes the command and expects failure. -func (c *CommandBuilder) ShouldFail() error { - // Automatically suppress stderr for expected failures to reduce noise - c.suppressErr = true - _, err := c.Execute() - gomega.Expect(err).To(gomega.HaveOccurred(), - fmt.Sprintf("Command '%s %s' should fail", c.command, strings.Join(c.args, " "))) - - return err -} - -// ShouldReturn executes the command and expects specific output. -func (c *CommandBuilder) ShouldReturn(expected string) { - output := c.ShouldSucceed() - gomega.Expect(output).To(gomega.Equal(expected)) -} - -// ShouldContain executes the command and expects output to contain substring. -func (c *CommandBuilder) ShouldContain(substring string) string { - output := c.ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(substring)) - - return output -} - -// ShouldEventuallyContain polls the command until output contains substring. -func (c *CommandBuilder) ShouldEventuallyContain(substring string, timeout time.Duration) string { - var finalOutput string - - gomega.Eventually(func() string { - output, err := c.Execute() - if err != nil { - return "" - } - - finalOutput = output - - return output - }, timeout, PollingInterval).Should(gomega.ContainSubstring(substring)) - - return finalOutput -} - -// ShouldEventuallySucceed polls the command until it succeeds. -func (c *CommandBuilder) ShouldEventuallySucceed(timeout time.Duration) string { - var finalOutput string - - gomega.Eventually(func() error { - output, err := c.Execute() - finalOutput = output - - return err - }, timeout, PollingInterval).Should(gomega.Succeed()) - - return finalOutput -} - -// SearchBuilder extends CommandBuilder with search-specific methods. -type SearchBuilder struct { - *CommandBuilder - format string // "cid" or "record" - names []string - versions []string - skillIDs []string - skillNames []string - locators []string - modules []string - domainIDs []string - domains []string - createdAts []string - authors []string - schemaVersions []string - moduleIDs []string - outputFormatArgs []string - limit int - offset int -} - -func (s *SearchBuilder) WithName(name string) *SearchBuilder { - s.names = append(s.names, name) - - return s -} - -func (s *SearchBuilder) WithVersion(version string) *SearchBuilder { - s.versions = append(s.versions, version) - - return s -} - -func (s *SearchBuilder) WithSkillID(skillID string) *SearchBuilder { - s.skillIDs = append(s.skillIDs, skillID) - - return s -} - -func (s *SearchBuilder) WithSkillName(skillName string) *SearchBuilder { - s.skillNames = append(s.skillNames, skillName) - - return s -} - -func (s *SearchBuilder) WithLocator(locator string) *SearchBuilder { - s.locators = append(s.locators, locator) - - return s -} - -func (s *SearchBuilder) WithModule(module string) *SearchBuilder { - s.modules = append(s.modules, module) - - return s -} - -func (s *SearchBuilder) WithDomainID(domainID string) *SearchBuilder { - s.domainIDs = append(s.domainIDs, domainID) - - return s -} - -func (s *SearchBuilder) WithDomain(domain string) *SearchBuilder { - s.domains = append(s.domains, domain) - - return s -} - -func (s *SearchBuilder) WithCreatedAt(createdAt string) *SearchBuilder { - s.createdAts = append(s.createdAts, createdAt) - - return s -} - -func (s *SearchBuilder) WithAuthor(author string) *SearchBuilder { - s.authors = append(s.authors, author) - - return s -} - -func (s *SearchBuilder) WithSchemaVersion(schemaVersion string) *SearchBuilder { - s.schemaVersions = append(s.schemaVersions, schemaVersion) - - return s -} - -func (s *SearchBuilder) WithModuleID(moduleID string) *SearchBuilder { - s.moduleIDs = append(s.moduleIDs, moduleID) - - return s -} - -func (s *SearchBuilder) WithLimit(limit int) *SearchBuilder { - s.limit = limit - - return s -} - -func (s *SearchBuilder) WithOffset(offset int) *SearchBuilder { - s.offset = offset - - return s -} - -func (s *SearchBuilder) WithArgs(args ...string) *SearchBuilder { - s.outputFormatArgs = append(s.outputFormatArgs, args...) - - return s -} - -func (s *SearchBuilder) Execute() (string, error) { - // Reset args and add format flag - s.args = []string{"--format", s.format} - - // Build search arguments using direct field flags - for _, name := range s.names { - s.args = append(s.args, "--name", name) - } - - for _, version := range s.versions { - s.args = append(s.args, "--version", version) - } - - for _, skillID := range s.skillIDs { - s.args = append(s.args, "--skill-id", skillID) - } - - for _, skillName := range s.skillNames { - s.args = append(s.args, "--skill", skillName) - } - - for _, locator := range s.locators { - s.args = append(s.args, "--locator", locator) - } - - for _, module := range s.modules { - s.args = append(s.args, "--module", module) - } - - for _, domainID := range s.domainIDs { - s.args = append(s.args, "--domain-id", domainID) - } - - for _, domain := range s.domains { - s.args = append(s.args, "--domain", domain) - } - - for _, createdAt := range s.createdAts { - s.args = append(s.args, "--created-at", createdAt) - } - - for _, author := range s.authors { - s.args = append(s.args, "--author", author) - } - - for _, schemaVersion := range s.schemaVersions { - s.args = append(s.args, "--schema-version", schemaVersion) - } - - for _, moduleID := range s.moduleIDs { - s.args = append(s.args, "--module-id", moduleID) - } - - if s.limit > 0 { - s.args = append(s.args, "--limit", strconv.Itoa(s.limit)) - } - - if s.offset > 0 { - s.args = append(s.args, "--offset", strconv.Itoa(s.offset)) - } - - // Append any additional args (like --output) at the end - s.args = append(s.args, s.outputFormatArgs...) - - return s.CommandBuilder.Execute() -} - -func (s *SearchBuilder) ShouldSucceed() string { - output, err := s.Execute() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - return output -} - -func (s *SearchBuilder) ShouldReturn(expectedCID string) { - output := s.ShouldSucceed() - gomega.Expect(output).To(gomega.Equal(expectedCID)) -} - -func (s *SearchBuilder) ShouldContain(substring string) string { - output := s.ShouldSucceed() - gomega.Expect(output).To(gomega.ContainSubstring(substring)) - - return output -} - -func (s *SearchBuilder) ShouldEventuallyContain(substring string, timeout time.Duration) string { - var finalOutput string - - gomega.Eventually(func() string { - output, err := s.Execute() - if err != nil { - return "" - } - - finalOutput = output - - return output - }, timeout, PollingInterval).Should(gomega.ContainSubstring(substring)) - - return finalOutput -} - -func (s *SearchBuilder) OnServer(addr string) *SearchBuilder { - s.serverAddr = addr - - return s -} - -// RoutingListBuilder extends CommandBuilder with routing list-specific methods. -type RoutingListBuilder struct { - *CommandBuilder -} - -func (l *RoutingListBuilder) WithCid(cid string) *RoutingListBuilder { - l.args = append(l.args, "--cid", cid) - - return l -} - -func (l *RoutingListBuilder) WithSkill(skill string) *RoutingListBuilder { - l.args = append(l.args, "--skill", skill) - - return l -} - -func (l *RoutingListBuilder) WithLocator(locator string) *RoutingListBuilder { - l.args = append(l.args, "--locator", locator) - - return l -} - -func (l *RoutingListBuilder) WithDomain(domain string) *RoutingListBuilder { - l.args = append(l.args, "--domain", domain) - - return l -} - -func (l *RoutingListBuilder) WithModule(module string) *RoutingListBuilder { - l.args = append(l.args, "--module", module) - - return l -} - -func (l *RoutingListBuilder) WithLimit(limit int) *RoutingListBuilder { - l.args = append(l.args, "--limit", strconv.Itoa(limit)) - - return l -} - -// RoutingSearchBuilder extends CommandBuilder with routing search-specific methods. -type RoutingSearchBuilder struct { - *CommandBuilder -} - -func (s *RoutingSearchBuilder) WithSkill(skill string) *RoutingSearchBuilder { - s.args = append(s.args, "--skill", skill) - - return s -} - -func (s *RoutingSearchBuilder) WithLocator(locator string) *RoutingSearchBuilder { - s.args = append(s.args, "--locator", locator) - - return s -} - -func (s *RoutingSearchBuilder) WithDomain(domain string) *RoutingSearchBuilder { - s.args = append(s.args, "--domain", domain) - - return s -} - -func (s *RoutingSearchBuilder) WithModule(module string) *RoutingSearchBuilder { - s.args = append(s.args, "--module", module) - - return s -} - -func (s *RoutingSearchBuilder) WithLimit(limit int) *RoutingSearchBuilder { - s.args = append(s.args, "--limit", strconv.Itoa(limit)) - - return s -} - -func (s *RoutingSearchBuilder) WithMinScore(minScore int) *RoutingSearchBuilder { - s.args = append(s.args, "--min-score", strconv.Itoa(minScore)) - - return s -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "time" + + clicmd "github.com/agntcy/dir/cli/cmd" + "github.com/onsi/gomega" +) + +const ( + // DefaultCommandTimeout is the default timeout for CLI command execution. + DefaultCommandTimeout = 30 * time.Second + // PollingInterval is the interval for Eventually polling operations. + PollingInterval = 5 * time.Second + // PublishProcessingDelay is the delay to allow asynchronous publish operations to complete. + PublishProcessingDelay = 15 * time.Second +) + +// CLI provides a fluent interface for executing CLI commands in tests. +type CLI struct{} + +// NewCLI creates a new CLI test helper. +func NewCLI() *CLI { + return &CLI{} +} + +// Command creates a new command builder. +func (c *CLI) Command(name string) *CommandBuilder { + return &CommandBuilder{ + command: name, + timeout: DefaultCommandTimeout, + } +} + +// Convenience methods for common commands. +func (c *CLI) Push(path string) *CommandBuilder { + return c.Command("push").WithArgs(path) +} + +func (c *CLI) Pull(cid string) *CommandBuilder { + return c.Command("pull").WithArgs(cid) +} + +func (c *CLI) Delete(cid string) *CommandBuilder { + return c.Command("delete").WithArgs(cid) +} + +func (c *CLI) Search() *SearchBuilder { + return &SearchBuilder{ + CommandBuilder: c.Command("search"), + format: "cid", // Default to cid format + names: []string{}, + versions: []string{}, + skillIDs: []string{}, + skillNames: []string{}, + locators: []string{}, + modules: []string{}, + domainIDs: []string{}, + domains: []string{}, + createdAts: []string{}, + authors: []string{}, + schemaVersions: []string{}, + moduleIDs: []string{}, + outputFormatArgs: []string{}, + limit: 0, + offset: 0, + } +} + +func (c *CLI) SearchRecords() *SearchBuilder { + return &SearchBuilder{ + CommandBuilder: c.Command("search"), + format: "record", // Use record format for full records + names: []string{}, + versions: []string{}, + skillIDs: []string{}, + skillNames: []string{}, + locators: []string{}, + modules: []string{}, + domainIDs: []string{}, + domains: []string{}, + createdAts: []string{}, + authors: []string{}, + schemaVersions: []string{}, + moduleIDs: []string{}, + outputFormatArgs: []string{}, + limit: 0, + offset: 0, + } +} + +func (c *CLI) Sign(recordCID, keyPath string) *CommandBuilder { + return c.Command("sign").WithArgs(recordCID, "--key", keyPath) +} + +// Routing commands - all routing operations are now under the routing subcommand. +func (c *CLI) Routing() *RoutingCommands { + return &RoutingCommands{cli: c} +} + +type RoutingCommands struct { + cli *CLI +} + +func (r *RoutingCommands) Publish(cid string) *CommandBuilder { + return r.cli.Command("routing").WithArgs("publish", cid) +} + +func (r *RoutingCommands) Unpublish(cid string) *CommandBuilder { + return r.cli.Command("routing").WithArgs("unpublish", cid) +} + +func (r *RoutingCommands) List() *RoutingListBuilder { + return &RoutingListBuilder{ + CommandBuilder: r.cli.Command("routing").WithArgs("list"), + } +} + +func (r *RoutingCommands) Search() *RoutingSearchBuilder { + return &RoutingSearchBuilder{ + CommandBuilder: r.cli.Command("routing").WithArgs("search"), + } +} + +func (r *RoutingCommands) Info() *CommandBuilder { + return r.cli.Command("routing").WithArgs("info") +} + +func (r *RoutingCommands) WithArgs(args ...string) *CommandBuilder { + return r.cli.Command("routing").WithArgs(args...) +} + +func (c *CLI) Verify(recordCID string) *CommandBuilder { + return c.Command("verify").WithArgs(recordCID) +} + +// Network commands. +func (c *CLI) Network() *NetworkCommands { + return &NetworkCommands{cli: c} +} + +type NetworkCommands struct { + cli *CLI +} + +func (n *NetworkCommands) Info(keyPath string) *CommandBuilder { + return n.cli.Command("network").WithArgs("info", keyPath) +} + +func (n *NetworkCommands) Init() *CommandBuilder { + return n.cli.Command("network").WithArgs("init") +} + +// Sync commands. +func (c *CLI) Sync() *SyncCommands { + return &SyncCommands{cli: c} +} + +type SyncCommands struct { + cli *CLI +} + +func (s *SyncCommands) Create(url string) *CommandBuilder { + return s.cli.Command("sync").WithArgs("create", url) +} + +func (s *SyncCommands) CreateFromStdin(input string) *StdinCommandBuilder { + return &StdinCommandBuilder{ + CommandBuilder: s.cli.Command("sync").WithArgs("create", "--stdin"), + stdinInput: input, + } +} + +func (s *SyncCommands) List() *CommandBuilder { + return s.cli.Command("sync").WithArgs("list") +} + +func (s *SyncCommands) Status(syncID string) *CommandBuilder { + return s.cli.Command("sync").WithArgs("status", syncID) +} + +func (s *SyncCommands) Delete(syncID string) *CommandBuilder { + return s.cli.Command("sync").WithArgs("delete", syncID) +} + +// CommandBuilder provides a fluent interface for building and executing commands. +type CommandBuilder struct { + command string + args []string + serverAddr string + expectErr bool + timeout time.Duration + outputFile string + suppressErr bool +} + +// StdinCommandBuilder extends CommandBuilder to handle stdin input. +type StdinCommandBuilder struct { + *CommandBuilder + stdinInput string +} + +// OnServer sets the server address for StdinCommandBuilder. +func (s *StdinCommandBuilder) OnServer(addr string) *StdinCommandBuilder { + s.CommandBuilder.OnServer(addr) + + return s +} + +// WithTimeout sets the timeout for StdinCommandBuilder. +func (s *StdinCommandBuilder) WithTimeout(timeout time.Duration) *StdinCommandBuilder { + s.CommandBuilder.WithTimeout(timeout) + + return s +} + +// Execute runs the command with stdin input and returns output and error. +func (s *StdinCommandBuilder) Execute() (string, error) { + args := append([]string{s.command}, s.args...) + + if s.serverAddr != "" { + args = append(args, "--server-addr", s.serverAddr) + } + + if s.outputFile != "" { + args = append(args, "--output", s.outputFile) + } + + var outputBuffer bytes.Buffer + + var errorBuffer bytes.Buffer + + cmd := clicmd.RootCmd + + // Store original stdin to restore later + originalIn := cmd.InOrStdin() + + cmd.SetOut(&outputBuffer) + + if s.suppressErr { + cmd.SetErr(&errorBuffer) // Capture stderr to suppress it + } + + // Set stdin input + cmd.SetIn(strings.NewReader(s.stdinInput)) + cmd.SetArgs(args) + + err := cmd.Execute() + output := strings.TrimSpace(outputBuffer.String()) + + // Restore original stdin + cmd.SetIn(originalIn) + + if err != nil { + return output, fmt.Errorf("command execution failed: %w", err) + } + + return output, nil +} + +// ShouldSucceed executes the command with stdin and expects success. +func (s *StdinCommandBuilder) ShouldSucceed() string { + output, err := s.Execute() + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Command '%s %s' should succeed", s.command, strings.Join(s.args, " "))) + + return output +} + +func (c *CommandBuilder) WithArgs(args ...string) *CommandBuilder { + c.args = append(c.args, args...) + + return c +} + +func (c *CommandBuilder) OnServer(addr string) *CommandBuilder { + c.serverAddr = addr + + return c +} + +func (c *CommandBuilder) WithTimeout(timeout time.Duration) *CommandBuilder { + c.timeout = timeout + + return c +} + +func (c *CommandBuilder) WithOutput(path string) *CommandBuilder { + c.outputFile = path + + return c +} + +func (c *CommandBuilder) ExpectError() *CommandBuilder { + c.expectErr = true + + return c +} + +func (c *CommandBuilder) SuppressStderr() *CommandBuilder { + c.suppressErr = true + + return c +} + +// Execute runs the command and returns output and error. +func (c *CommandBuilder) Execute() (string, error) { + args := append([]string{c.command}, c.args...) + + if c.serverAddr != "" { + args = append(args, "--server-addr", c.serverAddr) + } + + if c.outputFile != "" { + args = append(args, "--output", c.outputFile) + } + + var outputBuffer bytes.Buffer + + var errorBuffer bytes.Buffer + + cmd := clicmd.RootCmd + cmd.SetOut(&outputBuffer) + + if c.suppressErr { + cmd.SetErr(&errorBuffer) // Capture stderr to suppress it + } + + cmd.SetArgs(args) + + err := cmd.Execute() + output := strings.TrimSpace(outputBuffer.String()) + + if err != nil { + return output, fmt.Errorf("command execution failed: %w", err) + } + + return output, nil +} + +// ShouldSucceed executes the command and expects success. +func (c *CommandBuilder) ShouldSucceed() string { + output, err := c.Execute() + gomega.Expect(err).NotTo(gomega.HaveOccurred(), + fmt.Sprintf("Command '%s %s' should succeed", c.command, strings.Join(c.args, " "))) + + return output +} + +// ShouldFail executes the command and expects failure. +func (c *CommandBuilder) ShouldFail() error { + // Automatically suppress stderr for expected failures to reduce noise + c.suppressErr = true + _, err := c.Execute() + gomega.Expect(err).To(gomega.HaveOccurred(), + fmt.Sprintf("Command '%s %s' should fail", c.command, strings.Join(c.args, " "))) + + return err +} + +// ShouldReturn executes the command and expects specific output. +func (c *CommandBuilder) ShouldReturn(expected string) { + output := c.ShouldSucceed() + gomega.Expect(output).To(gomega.Equal(expected)) +} + +// ShouldContain executes the command and expects output to contain substring. +func (c *CommandBuilder) ShouldContain(substring string) string { + output := c.ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(substring)) + + return output +} + +// ShouldEventuallyContain polls the command until output contains substring. +func (c *CommandBuilder) ShouldEventuallyContain(substring string, timeout time.Duration) string { + var finalOutput string + + gomega.Eventually(func() string { + output, err := c.Execute() + if err != nil { + return "" + } + + finalOutput = output + + return output + }, timeout, PollingInterval).Should(gomega.ContainSubstring(substring)) + + return finalOutput +} + +// ShouldEventuallySucceed polls the command until it succeeds. +func (c *CommandBuilder) ShouldEventuallySucceed(timeout time.Duration) string { + var finalOutput string + + gomega.Eventually(func() error { + output, err := c.Execute() + finalOutput = output + + return err + }, timeout, PollingInterval).Should(gomega.Succeed()) + + return finalOutput +} + +// SearchBuilder extends CommandBuilder with search-specific methods. +type SearchBuilder struct { + *CommandBuilder + format string // "cid" or "record" + names []string + versions []string + skillIDs []string + skillNames []string + locators []string + modules []string + domainIDs []string + domains []string + createdAts []string + authors []string + schemaVersions []string + moduleIDs []string + outputFormatArgs []string + limit int + offset int +} + +func (s *SearchBuilder) WithName(name string) *SearchBuilder { + s.names = append(s.names, name) + + return s +} + +func (s *SearchBuilder) WithVersion(version string) *SearchBuilder { + s.versions = append(s.versions, version) + + return s +} + +func (s *SearchBuilder) WithSkillID(skillID string) *SearchBuilder { + s.skillIDs = append(s.skillIDs, skillID) + + return s +} + +func (s *SearchBuilder) WithSkillName(skillName string) *SearchBuilder { + s.skillNames = append(s.skillNames, skillName) + + return s +} + +func (s *SearchBuilder) WithLocator(locator string) *SearchBuilder { + s.locators = append(s.locators, locator) + + return s +} + +func (s *SearchBuilder) WithModule(module string) *SearchBuilder { + s.modules = append(s.modules, module) + + return s +} + +func (s *SearchBuilder) WithDomainID(domainID string) *SearchBuilder { + s.domainIDs = append(s.domainIDs, domainID) + + return s +} + +func (s *SearchBuilder) WithDomain(domain string) *SearchBuilder { + s.domains = append(s.domains, domain) + + return s +} + +func (s *SearchBuilder) WithCreatedAt(createdAt string) *SearchBuilder { + s.createdAts = append(s.createdAts, createdAt) + + return s +} + +func (s *SearchBuilder) WithAuthor(author string) *SearchBuilder { + s.authors = append(s.authors, author) + + return s +} + +func (s *SearchBuilder) WithSchemaVersion(schemaVersion string) *SearchBuilder { + s.schemaVersions = append(s.schemaVersions, schemaVersion) + + return s +} + +func (s *SearchBuilder) WithModuleID(moduleID string) *SearchBuilder { + s.moduleIDs = append(s.moduleIDs, moduleID) + + return s +} + +func (s *SearchBuilder) WithLimit(limit int) *SearchBuilder { + s.limit = limit + + return s +} + +func (s *SearchBuilder) WithOffset(offset int) *SearchBuilder { + s.offset = offset + + return s +} + +func (s *SearchBuilder) WithArgs(args ...string) *SearchBuilder { + s.outputFormatArgs = append(s.outputFormatArgs, args...) + + return s +} + +func (s *SearchBuilder) Execute() (string, error) { + // Reset args and add format flag + s.args = []string{"--format", s.format} + + // Build search arguments using direct field flags + for _, name := range s.names { + s.args = append(s.args, "--name", name) + } + + for _, version := range s.versions { + s.args = append(s.args, "--version", version) + } + + for _, skillID := range s.skillIDs { + s.args = append(s.args, "--skill-id", skillID) + } + + for _, skillName := range s.skillNames { + s.args = append(s.args, "--skill", skillName) + } + + for _, locator := range s.locators { + s.args = append(s.args, "--locator", locator) + } + + for _, module := range s.modules { + s.args = append(s.args, "--module", module) + } + + for _, domainID := range s.domainIDs { + s.args = append(s.args, "--domain-id", domainID) + } + + for _, domain := range s.domains { + s.args = append(s.args, "--domain", domain) + } + + for _, createdAt := range s.createdAts { + s.args = append(s.args, "--created-at", createdAt) + } + + for _, author := range s.authors { + s.args = append(s.args, "--author", author) + } + + for _, schemaVersion := range s.schemaVersions { + s.args = append(s.args, "--schema-version", schemaVersion) + } + + for _, moduleID := range s.moduleIDs { + s.args = append(s.args, "--module-id", moduleID) + } + + if s.limit > 0 { + s.args = append(s.args, "--limit", strconv.Itoa(s.limit)) + } + + if s.offset > 0 { + s.args = append(s.args, "--offset", strconv.Itoa(s.offset)) + } + + // Append any additional args (like --output) at the end + s.args = append(s.args, s.outputFormatArgs...) + + return s.CommandBuilder.Execute() +} + +func (s *SearchBuilder) ShouldSucceed() string { + output, err := s.Execute() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + return output +} + +func (s *SearchBuilder) ShouldReturn(expectedCID string) { + output := s.ShouldSucceed() + gomega.Expect(output).To(gomega.Equal(expectedCID)) +} + +func (s *SearchBuilder) ShouldContain(substring string) string { + output := s.ShouldSucceed() + gomega.Expect(output).To(gomega.ContainSubstring(substring)) + + return output +} + +func (s *SearchBuilder) ShouldEventuallyContain(substring string, timeout time.Duration) string { + var finalOutput string + + gomega.Eventually(func() string { + output, err := s.Execute() + if err != nil { + return "" + } + + finalOutput = output + + return output + }, timeout, PollingInterval).Should(gomega.ContainSubstring(substring)) + + return finalOutput +} + +func (s *SearchBuilder) OnServer(addr string) *SearchBuilder { + s.serverAddr = addr + + return s +} + +// RoutingListBuilder extends CommandBuilder with routing list-specific methods. +type RoutingListBuilder struct { + *CommandBuilder +} + +func (l *RoutingListBuilder) WithCid(cid string) *RoutingListBuilder { + l.args = append(l.args, "--cid", cid) + + return l +} + +func (l *RoutingListBuilder) WithSkill(skill string) *RoutingListBuilder { + l.args = append(l.args, "--skill", skill) + + return l +} + +func (l *RoutingListBuilder) WithLocator(locator string) *RoutingListBuilder { + l.args = append(l.args, "--locator", locator) + + return l +} + +func (l *RoutingListBuilder) WithDomain(domain string) *RoutingListBuilder { + l.args = append(l.args, "--domain", domain) + + return l +} + +func (l *RoutingListBuilder) WithModule(module string) *RoutingListBuilder { + l.args = append(l.args, "--module", module) + + return l +} + +func (l *RoutingListBuilder) WithLimit(limit int) *RoutingListBuilder { + l.args = append(l.args, "--limit", strconv.Itoa(limit)) + + return l +} + +// RoutingSearchBuilder extends CommandBuilder with routing search-specific methods. +type RoutingSearchBuilder struct { + *CommandBuilder +} + +func (s *RoutingSearchBuilder) WithSkill(skill string) *RoutingSearchBuilder { + s.args = append(s.args, "--skill", skill) + + return s +} + +func (s *RoutingSearchBuilder) WithLocator(locator string) *RoutingSearchBuilder { + s.args = append(s.args, "--locator", locator) + + return s +} + +func (s *RoutingSearchBuilder) WithDomain(domain string) *RoutingSearchBuilder { + s.args = append(s.args, "--domain", domain) + + return s +} + +func (s *RoutingSearchBuilder) WithModule(module string) *RoutingSearchBuilder { + s.args = append(s.args, "--module", module) + + return s +} + +func (s *RoutingSearchBuilder) WithLimit(limit int) *RoutingSearchBuilder { + s.args = append(s.args, "--limit", strconv.Itoa(limit)) + + return s +} + +func (s *RoutingSearchBuilder) WithMinScore(minScore int) *RoutingSearchBuilder { + s.args = append(s.args, "--min-score", strconv.Itoa(minScore)) + + return s +} diff --git a/e2e/shared/utils/common.go b/e2e/shared/utils/common.go index 29dfaa420..77f72969c 100644 --- a/e2e/shared/utils/common.go +++ b/e2e/shared/utils/common.go @@ -1,235 +1,235 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import ( - "reflect" - - corev1 "github.com/agntcy/dir/api/core/v1" - routingv1 "github.com/agntcy/dir/api/routing/v1" - clicmd "github.com/agntcy/dir/cli/cmd" - searchcmd "github.com/agntcy/dir/cli/cmd/search" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -// Ptr creates a pointer to the given value. -func Ptr[T any](v T) *T { - return &v -} - -// CollectItems collects all items from a channel into a slice. -// This generic utility eliminates the repetitive pattern of iterating over channels -// and works with any channel type. -func CollectItems[T any](itemsChan <-chan T) []T { - //nolint:prealloc // Cannot pre-allocate when reading from channel - count is unknown - var items []T - for item := range itemsChan { - items = append(items, item) - } - - return items -} - -// CollectListItems collects all list items from a channel into a slice. -// Wrapper around generic CollectItems for routing list operations. -func CollectListItems(itemsChan <-chan *routingv1.ListResponse) []*routingv1.ListResponse { - return CollectItems(itemsChan) -} - -// CollectSearchItems collects all search items from a channel into a slice. -// Wrapper around generic CollectItems for routing search operations. -func CollectSearchItems(searchChan <-chan *routingv1.SearchResponse) []*routingv1.SearchResponse { - return CollectItems(searchChan) -} - -// CompareOASFRecords compares two OASF JSON records with version-aware logic. -// This function automatically detects OASF versions and uses appropriate comparison logic. -// -//nolint:wrapcheck -func CompareOASFRecords(json1, json2 []byte) (bool, error) { - record1, err := corev1.UnmarshalRecord(json1) - if err != nil { - return false, err - } - - record2, err := corev1.UnmarshalRecord(json2) - if err != nil { - return false, err - } - - return reflect.DeepEqual(record1, record2), nil -} - -// ResetCobraFlags resets all CLI command flags to their default values. -// This ensures clean state between test executions. -func ResetCobraFlags() { - // Reset root command flags - resetCommandFlags(clicmd.RootCmd) - - // Walk through all subcommands and reset their flags - for _, cmd := range clicmd.RootCmd.Commands() { - resetCommandFlags(cmd) - - // Also reset any nested subcommands - resetNestedCommandFlags(cmd) - } -} - -// resetCommandFlags resets flags for a specific command. -// -//nolint:errcheck -func resetCommandFlags(cmd *cobra.Command) { - if cmd.Flags() != nil { - // Reset local flags - cmd.Flags().VisitAll(func(flag *pflag.Flag) { - if flag.Value != nil { - // Reset to default value based on flag type - switch flag.Value.Type() { - case "string": - flag.Value.Set(flag.DefValue) - case "bool": - flag.Value.Set(flag.DefValue) - case "int", "int32", "int64": - flag.Value.Set(flag.DefValue) - case "uint", "uint32", "uint64": - flag.Value.Set(flag.DefValue) - case "float32", "float64": - flag.Value.Set(flag.DefValue) - case "stringArray", "stringSlice": - // For string arrays/slices, completely clear them - // Setting to empty string should clear the underlying slice - flag.Value.Set("") - // Also reset the default value to ensure clean state - flag.DefValue = "" - default: - // For custom types, try to set to default value - flag.Value.Set(flag.DefValue) - } - // Mark as not changed - flag.Changed = false - } - }) - } - - if cmd.PersistentFlags() != nil { - // Reset persistent flags - cmd.PersistentFlags().VisitAll(func(flag *pflag.Flag) { - if flag.Value != nil { - // Handle string arrays specially for persistent flags too - if flag.Value.Type() == "stringArray" || flag.Value.Type() == "stringSlice" { - flag.Value.Set("") - flag.DefValue = "" - } else { - flag.Value.Set(flag.DefValue) - } - - flag.Changed = false - } - }) - } -} - -// resetNestedCommandFlags recursively resets flags for nested commands. -func resetNestedCommandFlags(cmd *cobra.Command) { - for _, subCmd := range cmd.Commands() { - resetCommandFlags(subCmd) - resetNestedCommandFlags(subCmd) - } -} - -// ResetCLIState provides a comprehensive reset of CLI state. -// This combines flag reset with any other state that needs to be cleared. -func ResetCLIState() { - ResetCobraFlags() - - // Reset command args - clicmd.RootCmd.SetArgs(nil) - - // Clear any output buffers by setting output to default - clicmd.RootCmd.SetOut(nil) - clicmd.RootCmd.SetErr(nil) - - // Reset search command global state - ResetSearchCommandState() - - // Force complete re-initialization of routing command flags to clear accumulated state - resetRoutingCommandFlags() -} - -// ResetSearchCommandState resets the global state in search command. -// -//nolint:errcheck -func ResetSearchCommandState() { - if cmd := searchcmd.Command; cmd != nil { - // Reset flags to default values - cmd.Flags().Set("format", "cid") - cmd.Flags().Set("limit", "100") - cmd.Flags().Set("offset", "0") - - // Reset all string array flags - resetStringArrayFlag(cmd, "name") - resetStringArrayFlag(cmd, "version") - resetStringArrayFlag(cmd, "skill-id") - resetStringArrayFlag(cmd, "skill") - resetStringArrayFlag(cmd, "locator") - resetStringArrayFlag(cmd, "module") - resetStringArrayFlag(cmd, "domain-id") - resetStringArrayFlag(cmd, "domain") - resetStringArrayFlag(cmd, "created-at") - resetStringArrayFlag(cmd, "author") - resetStringArrayFlag(cmd, "schema-version") - resetStringArrayFlag(cmd, "module-id") - } -} - -// resetRoutingCommandFlags aggressively resets routing command flags and their underlying variables. -// The key insight is that Cobra StringArrayVar flags are bound to Go slice variables that persist -// across command executions. We need to reset both the flag state AND the underlying variables. -func resetRoutingCommandFlags() { - // Import the routing package to access the global option variables - // Since we can't import the routing package directly (circular dependency), - // we need to reset the flags in a way that also clears the underlying slices - // Find the routing command - for _, cmd := range clicmd.RootCmd.Commands() { - if cmd.Name() == "routing" { - // Reset all routing subcommands - for _, subCmd := range cmd.Commands() { - switch subCmd.Name() { - case "list": - // Reset list command flags and underlying variables - resetStringArrayFlag(subCmd, "skill") - resetStringArrayFlag(subCmd, "locator") - resetStringArrayFlag(subCmd, "domain") - resetStringArrayFlag(subCmd, "module") - case "search": - // Reset search command flags and underlying variables - resetStringArrayFlag(subCmd, "skill") - resetStringArrayFlag(subCmd, "locator") - resetStringArrayFlag(subCmd, "domain") - resetStringArrayFlag(subCmd, "module") - } - } - } - } -} - -// resetStringArrayFlag completely resets a StringArrayVar flag by clearing its underlying slice. -func resetStringArrayFlag(cmd *cobra.Command, flagName string) { - if flag := cmd.Flags().Lookup(flagName); flag != nil { - // For StringArrayVar flags, we need to clear the underlying slice completely - // The flag.Value is a pointer to a stringArrayValue that wraps the actual slice - // Method 1: Set to empty string (should clear the slice) - _ = flag.Value.Set("") // Ignore error - flag reset is best effort - - // Method 2: Reset all flag metadata - flag.DefValue = "" - flag.Changed = false - - // Method 3: If the flag has a slice interface, try to clear it directly - if sliceValue, ok := flag.Value.(interface{ Replace([]string) error }); ok { - sliceValue.Replace([]string{}) //nolint:errcheck - } - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "reflect" + + corev1 "github.com/agntcy/dir/api/core/v1" + routingv1 "github.com/agntcy/dir/api/routing/v1" + clicmd "github.com/agntcy/dir/cli/cmd" + searchcmd "github.com/agntcy/dir/cli/cmd/search" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +// Ptr creates a pointer to the given value. +func Ptr[T any](v T) *T { + return &v +} + +// CollectItems collects all items from a channel into a slice. +// This generic utility eliminates the repetitive pattern of iterating over channels +// and works with any channel type. +func CollectItems[T any](itemsChan <-chan T) []T { + //nolint:prealloc // Cannot pre-allocate when reading from channel - count is unknown + var items []T + for item := range itemsChan { + items = append(items, item) + } + + return items +} + +// CollectListItems collects all list items from a channel into a slice. +// Wrapper around generic CollectItems for routing list operations. +func CollectListItems(itemsChan <-chan *routingv1.ListResponse) []*routingv1.ListResponse { + return CollectItems(itemsChan) +} + +// CollectSearchItems collects all search items from a channel into a slice. +// Wrapper around generic CollectItems for routing search operations. +func CollectSearchItems(searchChan <-chan *routingv1.SearchResponse) []*routingv1.SearchResponse { + return CollectItems(searchChan) +} + +// CompareOASFRecords compares two OASF JSON records with version-aware logic. +// This function automatically detects OASF versions and uses appropriate comparison logic. +// +//nolint:wrapcheck +func CompareOASFRecords(json1, json2 []byte) (bool, error) { + record1, err := corev1.UnmarshalRecord(json1) + if err != nil { + return false, err + } + + record2, err := corev1.UnmarshalRecord(json2) + if err != nil { + return false, err + } + + return reflect.DeepEqual(record1, record2), nil +} + +// ResetCobraFlags resets all CLI command flags to their default values. +// This ensures clean state between test executions. +func ResetCobraFlags() { + // Reset root command flags + resetCommandFlags(clicmd.RootCmd) + + // Walk through all subcommands and reset their flags + for _, cmd := range clicmd.RootCmd.Commands() { + resetCommandFlags(cmd) + + // Also reset any nested subcommands + resetNestedCommandFlags(cmd) + } +} + +// resetCommandFlags resets flags for a specific command. +// +//nolint:errcheck +func resetCommandFlags(cmd *cobra.Command) { + if cmd.Flags() != nil { + // Reset local flags + cmd.Flags().VisitAll(func(flag *pflag.Flag) { + if flag.Value != nil { + // Reset to default value based on flag type + switch flag.Value.Type() { + case "string": + flag.Value.Set(flag.DefValue) + case "bool": + flag.Value.Set(flag.DefValue) + case "int", "int32", "int64": + flag.Value.Set(flag.DefValue) + case "uint", "uint32", "uint64": + flag.Value.Set(flag.DefValue) + case "float32", "float64": + flag.Value.Set(flag.DefValue) + case "stringArray", "stringSlice": + // For string arrays/slices, completely clear them + // Setting to empty string should clear the underlying slice + flag.Value.Set("") + // Also reset the default value to ensure clean state + flag.DefValue = "" + default: + // For custom types, try to set to default value + flag.Value.Set(flag.DefValue) + } + // Mark as not changed + flag.Changed = false + } + }) + } + + if cmd.PersistentFlags() != nil { + // Reset persistent flags + cmd.PersistentFlags().VisitAll(func(flag *pflag.Flag) { + if flag.Value != nil { + // Handle string arrays specially for persistent flags too + if flag.Value.Type() == "stringArray" || flag.Value.Type() == "stringSlice" { + flag.Value.Set("") + flag.DefValue = "" + } else { + flag.Value.Set(flag.DefValue) + } + + flag.Changed = false + } + }) + } +} + +// resetNestedCommandFlags recursively resets flags for nested commands. +func resetNestedCommandFlags(cmd *cobra.Command) { + for _, subCmd := range cmd.Commands() { + resetCommandFlags(subCmd) + resetNestedCommandFlags(subCmd) + } +} + +// ResetCLIState provides a comprehensive reset of CLI state. +// This combines flag reset with any other state that needs to be cleared. +func ResetCLIState() { + ResetCobraFlags() + + // Reset command args + clicmd.RootCmd.SetArgs(nil) + + // Clear any output buffers by setting output to default + clicmd.RootCmd.SetOut(nil) + clicmd.RootCmd.SetErr(nil) + + // Reset search command global state + ResetSearchCommandState() + + // Force complete re-initialization of routing command flags to clear accumulated state + resetRoutingCommandFlags() +} + +// ResetSearchCommandState resets the global state in search command. +// +//nolint:errcheck +func ResetSearchCommandState() { + if cmd := searchcmd.Command; cmd != nil { + // Reset flags to default values + cmd.Flags().Set("format", "cid") + cmd.Flags().Set("limit", "100") + cmd.Flags().Set("offset", "0") + + // Reset all string array flags + resetStringArrayFlag(cmd, "name") + resetStringArrayFlag(cmd, "version") + resetStringArrayFlag(cmd, "skill-id") + resetStringArrayFlag(cmd, "skill") + resetStringArrayFlag(cmd, "locator") + resetStringArrayFlag(cmd, "module") + resetStringArrayFlag(cmd, "domain-id") + resetStringArrayFlag(cmd, "domain") + resetStringArrayFlag(cmd, "created-at") + resetStringArrayFlag(cmd, "author") + resetStringArrayFlag(cmd, "schema-version") + resetStringArrayFlag(cmd, "module-id") + } +} + +// resetRoutingCommandFlags aggressively resets routing command flags and their underlying variables. +// The key insight is that Cobra StringArrayVar flags are bound to Go slice variables that persist +// across command executions. We need to reset both the flag state AND the underlying variables. +func resetRoutingCommandFlags() { + // Import the routing package to access the global option variables + // Since we can't import the routing package directly (circular dependency), + // we need to reset the flags in a way that also clears the underlying slices + // Find the routing command + for _, cmd := range clicmd.RootCmd.Commands() { + if cmd.Name() == "routing" { + // Reset all routing subcommands + for _, subCmd := range cmd.Commands() { + switch subCmd.Name() { + case "list": + // Reset list command flags and underlying variables + resetStringArrayFlag(subCmd, "skill") + resetStringArrayFlag(subCmd, "locator") + resetStringArrayFlag(subCmd, "domain") + resetStringArrayFlag(subCmd, "module") + case "search": + // Reset search command flags and underlying variables + resetStringArrayFlag(subCmd, "skill") + resetStringArrayFlag(subCmd, "locator") + resetStringArrayFlag(subCmd, "domain") + resetStringArrayFlag(subCmd, "module") + } + } + } + } +} + +// resetStringArrayFlag completely resets a StringArrayVar flag by clearing its underlying slice. +func resetStringArrayFlag(cmd *cobra.Command, flagName string) { + if flag := cmd.Flags().Lookup(flagName); flag != nil { + // For StringArrayVar flags, we need to clear the underlying slice completely + // The flag.Value is a pointer to a stringArrayValue that wraps the actual slice + // Method 1: Set to empty string (should clear the slice) + _ = flag.Value.Set("") // Ignore error - flag reset is best effort + + // Method 2: Reset all flag metadata + flag.DefValue = "" + flag.Changed = false + + // Method 3: If the flag has a slice interface, try to clear it directly + if sliceValue, ok := flag.Value.(interface{ Replace([]string) error }); ok { + sliceValue.Replace([]string{}) //nolint:errcheck + } + } +} diff --git a/e2e/shared/utils/constants.go b/e2e/shared/utils/constants.go index 1289117f6..7d70d8a53 100644 --- a/e2e/shared/utils/constants.go +++ b/e2e/shared/utils/constants.go @@ -1,22 +1,22 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package utils - -// Network test constants for peer addresses. -const ( - // Local peer addresses used in network deployment tests. - Peer1Addr = "0.0.0.0:8890" - Peer2Addr = "0.0.0.0:8891" - Peer3Addr = "0.0.0.0:8892" - - // Internal Kubernetes service address for peer1. - Peer1InternalAddr = "agntcy-dir-apiserver.peer1.svc.cluster.local:8888" - - // Test directory prefixes for temporary files. - NetworkTestDirPrefix = "network-test" - SignTestDirPrefix = "sign-test" -) - -// PeerAddrs contains all peer addresses for iteration in tests. -var PeerAddrs = []string{Peer1Addr, Peer2Addr, Peer3Addr} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package utils + +// Network test constants for peer addresses. +const ( + // Local peer addresses used in network deployment tests. + Peer1Addr = "0.0.0.0:8890" + Peer2Addr = "0.0.0.0:8891" + Peer3Addr = "0.0.0.0:8892" + + // Internal Kubernetes service address for peer1. + Peer1InternalAddr = "agntcy-dir-apiserver.peer1.svc.cluster.local:8888" + + // Test directory prefixes for temporary files. + NetworkTestDirPrefix = "network-test" + SignTestDirPrefix = "sign-test" +) + +// PeerAddrs contains all peer addresses for iteration in tests. +var PeerAddrs = []string{Peer1Addr, Peer2Addr, Peer3Addr} diff --git a/e2e/shared/utils/network.go b/e2e/shared/utils/network.go index 5d8470595..b4159804d 100644 --- a/e2e/shared/utils/network.go +++ b/e2e/shared/utils/network.go @@ -1,45 +1,45 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import ( - "os" - "path/filepath" - - initcmd "github.com/agntcy/dir/cli/cmd/network/init" - "github.com/onsi/gomega" -) - -const ( - // PrivateKeyFileMode is the file permission for private key files (read/write for owner only). - PrivateKeyFileMode = 0o0600 -) - -// GenerateNetworkKeyPair generates an ED25519 key pair for network tests. -// Returns the path to the private key file. -func GenerateNetworkKeyPair(tempDir string) string { - // Generate OpenSSL-style ED25519 key - _, privateKey, err := initcmd.GenerateED25519OpenSSLKey() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // Write the private key to a temporary file - keyPath := filepath.Join(tempDir, "test_key") - err = os.WriteFile(keyPath, privateKey, PrivateKeyFileMode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - return keyPath -} - -// SetupNetworkTestDir creates a temporary directory for network tests. -func SetupNetworkTestDir() (string, func()) { - tempDir, err := os.MkdirTemp("", NetworkTestDirPrefix) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - cleanup := func() { - err := os.RemoveAll(tempDir) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - } - - return tempDir, cleanup -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "os" + "path/filepath" + + initcmd "github.com/agntcy/dir/cli/cmd/network/init" + "github.com/onsi/gomega" +) + +const ( + // PrivateKeyFileMode is the file permission for private key files (read/write for owner only). + PrivateKeyFileMode = 0o0600 +) + +// GenerateNetworkKeyPair generates an ED25519 key pair for network tests. +// Returns the path to the private key file. +func GenerateNetworkKeyPair(tempDir string) string { + // Generate OpenSSL-style ED25519 key + _, privateKey, err := initcmd.GenerateED25519OpenSSLKey() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Write the private key to a temporary file + keyPath := filepath.Join(tempDir, "test_key") + err = os.WriteFile(keyPath, privateKey, PrivateKeyFileMode) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + return keyPath +} + +// SetupNetworkTestDir creates a temporary directory for network tests. +func SetupNetworkTestDir() (string, func()) { + tempDir, err := os.MkdirTemp("", NetworkTestDirPrefix) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + cleanup := func() { + err := os.RemoveAll(tempDir) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + return tempDir, cleanup +} diff --git a/e2e/shared/utils/sign.go b/e2e/shared/utils/sign.go index f8e392090..83246ee1b 100644 --- a/e2e/shared/utils/sign.go +++ b/e2e/shared/utils/sign.go @@ -1,29 +1,29 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import ( - "context" - - "github.com/agntcy/dir/utils/cosign" - "github.com/onsi/ginkgo/v2" -) - -// Test constants for signature operations. -const ( - TestPassword = "testpassword" -) - -// GenerateCosignKeyPair generates a cosign key pair in the specified directory. -// Helper function for signature testing. -func GenerateCosignKeyPair(dir string) { - opts := &cosign.GenerateKeyPairOptions{ - Directory: dir, - Password: TestPassword, - } - - if err := cosign.GenerateKeyPair(context.Background(), opts); err != nil { - ginkgo.Fail("cosign generate-key-pair failed: " + err.Error()) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "context" + + "github.com/agntcy/dir/utils/cosign" + "github.com/onsi/ginkgo/v2" +) + +// Test constants for signature operations. +const ( + TestPassword = "testpassword" +) + +// GenerateCosignKeyPair generates a cosign key pair in the specified directory. +// Helper function for signature testing. +func GenerateCosignKeyPair(dir string) { + opts := &cosign.GenerateKeyPairOptions{ + Directory: dir, + Password: TestPassword, + } + + if err := cosign.GenerateKeyPair(context.Background(), opts); err != nil { + ginkgo.Fail("cosign generate-key-pair failed: " + err.Error()) + } +} diff --git a/e2e/shared/utils/utils.go b/e2e/shared/utils/utils.go index 47bb0af2a..befa78e99 100644 --- a/e2e/shared/utils/utils.go +++ b/e2e/shared/utils/utils.go @@ -1,15 +1,15 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Package utils provides test utilities for E2E tests. -package utils - -// Commonly used functions are re-exported for convenience -// -// This package provides: -// - CLI command builders with fluent API (cli.go) -// - Common test utilities, comparisons, and channel collection (common.go) -// - Signature testing utilities (sign.go) -// - Network test constants and addresses (constants.go) -// - Network key generation utilities (network.go) -// - CID validation utilities (cid.go) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Package utils provides test utilities for E2E tests. +package utils + +// Commonly used functions are re-exported for convenience +// +// This package provides: +// - CLI command builders with fluent API (cli.go) +// - Common test utilities, comparisons, and channel collection (common.go) +// - Signature testing utilities (sign.go) +// - Network test constants and addresses (constants.go) +// - Network key generation utilities (network.go) +// - CID validation utilities (cid.go) diff --git a/examples/directory_sample.json b/examples/directory_sample.json new file mode 100644 index 000000000..a5fbd4d05 --- /dev/null +++ b/examples/directory_sample.json @@ -0,0 +1,131 @@ +{ + "agents": [ + { + "id": "agent_alpha_clean", + "name": "Alpha Services", + "url": "https://alpha.example", + "capabilities": ["book", "quote", "support"], + "contact": "ops@alpha.example", + "updated_at": "2025-12-20", + "domain_verified": true, + "key_present": true, + "handshake_fail_ratio": 0.01, + "rate_limit_violations": 0, + "complaint_flags": 0 + }, + { + "id": "agent_beta_clean", + "name": "Beta Concierge", + "url": "https://beta.example", + "capabilities": ["schedule", "cancel", "status"], + "contact": "support@beta.example", + "updated_at": "2025-12-10", + "domain_verified": true, + "key_present": true, + "handshake_fail_ratio": 0.03, + "rate_limit_violations": 1, + "complaint_flags": 0 + }, + { + "id": "agent_gamma_ok", + "name": "Gamma Helper", + "url": "https://gamma.example", + "capabilities": ["info"], + "contact": "hello@gamma.example", + "updated_at": "2025-10-01", + "domain_verified": false, + "key_present": true, + "handshake_fail_ratio": 0.08, + "rate_limit_violations": 2, + "complaint_flags": 1 + }, + { + "id": "agent_delta_sparse", + "name": "Delta Agent", + "url": "https://delta.example", + "capabilities": [], + "updated_at": "2025-07-15", + "domain_verified": false, + "key_present": false, + "handshake_fail_ratio": 0.12, + "rate_limit_violations": 3, + "complaint_flags": 0 + }, + { + "id": "agent_epsilon_missing_contact", + "name": "Epsilon Tools", + "url": "https://epsilon.example", + "capabilities": ["quote"], + "updated_at": "2025-11-05", + "domain_verified": true, + "key_present": false, + "handshake_fail_ratio": 0.06, + "rate_limit_violations": 0, + "complaint_flags": 0 + }, + { + "id": "agent_zeta_suspicious", + "name": "Zeta Fast-Track", + "url": "https://zeta.example", + "capabilities": ["book", "pay", "refund"], + "contact": "contact@zeta.example", + "updated_at": "2024-12-01", + "domain_verified": false, + "key_present": false, + "handshake_fail_ratio": 0.45, + "rate_limit_violations": 25, + "complaint_flags": 7 + }, + { + "id": "agent_eta_suspicious", + "name": "ETA Deals", + "url": "https://eta.example", + "capabilities": ["book"], + "contact": "admin@eta.example", + "updated_at": "2025-01-10", + "domain_verified": false, + "key_present": false, + "handshake_fail_ratio": 0.33, + "rate_limit_violations": 12, + "complaint_flags": 4 + }, + { + "id": "agent_theta_clean", + "name": "Theta Support", + "url": "https://theta.example", + "capabilities": ["support", "status"], + "contact": "help@theta.example", + "updated_at": "2025-12-28", + "domain_verified": true, + "key_present": true, + "handshake_fail_ratio": 0.00, + "rate_limit_violations": 0, + "complaint_flags": 0 + }, + { + "id": "agent_iota_ok", + "name": "Iota Scheduling", + "url": "https://iota.example", + "capabilities": ["schedule"], + "contact": "team@iota.example", + "updated_at": "2025-09-09", + "domain_verified": false, + "key_present": true, + "handshake_fail_ratio": 0.10, + "rate_limit_violations": 1, + "complaint_flags": 0 + }, + { + "id": "agent_kappa_broken", + "name": "Kappa Broken Link", + "url": "", + "capabilities": ["book"], + "updated_at": "2023-06-01", + "domain_verified": false, + "key_present": false, + "handshake_fail_ratio": 0.60, + "rate_limit_violations": 40, + "complaint_flags": 10 + } + ] +} diff --git a/extensions/trust_ranking/README.md b/extensions/trust_ranking/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/extensions/trust_ranking/REFERENCE.md b/extensions/trust_ranking/REFERENCE.md new file mode 100644 index 000000000..84541f3da --- /dev/null +++ b/extensions/trust_ranking/REFERENCE.md @@ -0,0 +1,238 @@ +# Trust Ranking Extension (Reference PoC) + +> **Status:** Reference-only implementation +> **Scope:** Demonstration and discussion +> **Non-goals:** Security guarantees, standards, production readiness + +This document describes a **reference trust-ranking extension** that can be used +*alongside* the AGNTCY directory. It is intentionally optional, additive, and +non-authoritative. + +## Overview + +The AGNTCY directory enables capability-based discovery: + +> “Find agents that can do X.” + +As ecosystems grow, this becomes insufficient on its own. +When many agents claim the same capability, consumers need additional signals +to decide *which* agent to try first. + +This reference extension demonstrates how **trust-related signals** could +influence ranking decisions without changing directory semantics or protocol +behavior. + +## Background + +Identity verification answers “who is this agent?” +Trust-related signals answer “how reliable does this agent appear to be?” + +Both dimensions matter. +This reference focuses on ranking based on the latter. + +## What this extension is + +- A **sidecar** ranking module +- A **toy scoring model** using simple heuristics +- A **runnable demo** that produces explainable results +- A way to explore *interfaces*, not prescribe policy + +## What this extension is not + +- Not a security system +- Not a standard +- Not a recommendation for production use +- Not a source of truth for trust decisions + +All trust logic here is local, subjective, and replaceable. + +## Architecture + +``` +User / Client + | + v +Directory Search → [Capable Agents] + | + v + Trust Ranking (optional) + | + v + [Ranked Results] +``` + +- The AGNTCY directory remains unchanged. +- Ranking occurs **after** discovery. +- Consumers opt in by choosing to apply a ranker. + +## Scoring Model (Reference Only) + +The reference implementation evaluates four dimensions. +Weights are fixed and chosen for clarity, not optimality. + +### 1. Completeness (35%) + +**What:** Is the agent profile reasonably complete? + +**Signals:** + +- presence of `id`, `name`, `url` +- non-empty `capabilities` +- contact information +- `updated_at` timestamp + +**Rationale:** Complete profiles are easier to understand and maintain. + +### 2. Verification (25%) + +**What:** Are basic identity signals present? + +**Signals:** + +- `domain_verified` +- `key_present` + +**Rationale:** Verification raises the cost of impersonation, even if imperfect. + +### 3. Freshness (20%) + +**What:** Is the profile actively maintained? + +**Signals:** + +- recently updated +- moderately recent +- stale + +**Rationale:** Abandoned profiles correlate with broken integrations. + +### 4. Behavior (20%) + +**What:** Are there basic indicators of operational reliability? + +**Signals (simulated in demo):** + +- handshake failure ratio +- rate limit violations +- complaint flags + +**Rationale:** Past behavior is often predictive of future reliability. + +## Final Score + +Scores are combined into a 0–100 range and capped below 100 +to avoid implying certainty. + +**Trust bands:** + +- **Green:** high confidence +- **Yellow:** medium confidence +- **Red:** low confidence + +Scores are accompanied by **human-readable reasons** to make +ranking decisions inspectable. + +## Usage + +### Run the demo + +```bash +python scripts/run_trust_ranking.py --top 10 +``` + +### JSON Output + +```bash +python scripts/run_trust_ranking.py --json > ranked.json +``` + +Each returned agent may include: + +```json +"trust": { + "score": 77.0, + "band": "yellow", + "reasons": [ + "Profile is somewhat complete", + "Updated this quarter", + "No rate limit violations" + ] +} +``` + +### Example Output (Illustrative) + +``` +1. Theta Support + id: agent_theta_clean + url: https://theta.example + trust: 99.0 (green) + reason: Profile is complete; Recently updated; Low handshake failure rate + +2. Alpha Services + id: agent_alpha_clean + url: https://alpha.example + trust: 99.0 (green) + reason: Profile is complete; Recently updated; Domain verified +``` + +Output is dependent on local scoring logic and input data; results are not authoritative. + +## Integration Patterns + +### Pattern 1: Client-side ranking (recommended) + +```python +# Fetch capable agents from a directory +agents = directory_search(...) + +# Apply optional trust ranking locally +ranked_agents = rank_agents(agents) + +# Select a preferred agent +selected = ranked_agents[0] +``` + +- No server or protocol changes required +- Multiple ranking models can coexist +- Trust preferences remain local to the client + + + +### Pattern 2: Proxy service + +A proxy queries the directory, applies ranking, and returns ordered results. +Useful for shared logic, but introduces centralization tradeoffs. + +### Pattern 3: Directory plugin (future) + +Trust ranking as an optional server-side hook. +This requires community discussion and governance alignment. + +## Limitations + +This PoC intentionally omits: + +- adversarial robustness and Sybil resistance +- cryptographic binding of behavior to identity +- adaptive or context-dependent weighting +- trust decay, recovery, or volatility +- cross-observer reputation aggregation +- production concerns (scale, abuse, monitoring) + +These omissions are deliberate. + +## Purpose + +This reference exists to support discussion around: + +- where trust-based ranking should live +- how ranking logic can remain optional +- how explanations improve transparency +- how ecosystems avoid a single “trust authority” + +Feedback and alternative approaches are encouraged. + +## License + +Apache 2.0 (same as AGNTCY dir) diff --git a/extensions/trust_ranking/interface.py b/extensions/trust_ranking/interface.py new file mode 100644 index 000000000..f716b45f2 --- /dev/null +++ b/extensions/trust_ranking/interface.py @@ -0,0 +1,28 @@ +""" +Trust ranking interface (reference only). + +This defines the minimal contract a trust ranker must implement. +It is intentionally simple and non-prescriptive. +""" + +from typing import List, Dict, Any + + +def rank_agents( + agents: List[Dict[str, Any]], + query: Dict[str, Any] | None = None, + context: Dict[str, Any] | None = None, +) -> List[Dict[str, Any]]: + """ + Rank a list of agent directory entries. + + Parameters: + agents: list of agent-like dicts from a directory + query: optional user or agent query context + context: optional execution or environment context + + Returns: + The same agents, ordered by preference. + Each agent MAY include a 'trust' field with scoring metadata. + """ + raise NotImplementedError("Trust ranker not implemented") diff --git a/extensions/trust_ranking/reference_ranker.py b/extensions/trust_ranking/reference_ranker.py new file mode 100644 index 000000000..bd65f6779 --- /dev/null +++ b/extensions/trust_ranking/reference_ranker.py @@ -0,0 +1,254 @@ +""" +Reference trust ranker (toy scoring). + +This is NOT a security system. +It is a demo of how directories could incorporate trust signals +into ranking decisions in an explainable way. +""" + +from __future__ import annotations + +from typing import List, Dict, Any, Tuple +from datetime import datetime, timezone + + +def _parse_date_yyyy_mm_dd(s: str | None) -> datetime | None: + if not s: + return None + try: + return datetime.strptime(s, "%Y-%m-%d").replace(tzinfo=timezone.utc) + except ValueError: + return None + + +def _clamp(x: float, lo: float, hi: float) -> float: + return max(lo, min(hi, x)) + + +def _completeness(agent: Dict[str, Any]) -> Tuple[float, List[str]]: + """ + Completeness based on presence of common directory fields. + Returns score 0..1 and reasons. + """ + required = ["id", "name", "url", "capabilities", "contact", "updated_at"] + present = 0 + missing = [] + + for k in required: + v = agent.get(k) + ok = v is not None and v != "" and (v != [] if k == "capabilities" else True) + if ok: + present += 1 + else: + missing.append(k) + + score = present / float(len(required)) + reasons = [] + if score >= 0.9: + reasons.append("Profile is complete") + elif score >= 0.6: + reasons.append("Profile is somewhat complete") + else: + reasons.append("Profile is missing key fields") + + if missing: + reasons.append("Missing: " + ", ".join(missing[:3]) + ("..." if len(missing) > 3 else "")) + + return score, reasons + + +def _freshness(agent: Dict[str, Any]) -> Tuple[float, List[str]]: + """ + Freshness score based on updated_at. 0..1. + Newer is better. Very old is bad. + """ + dt = _parse_date_yyyy_mm_dd(agent.get("updated_at")) + if not dt: + return 0.2, ["No valid updated_at date"] + + now = datetime.now(timezone.utc) + days = (now - dt).days + + # Simple buckets + if days <= 30: + return 1.0, ["Recently updated"] + if days <= 120: + return 0.7, ["Updated this quarter"] + if days <= 365: + return 0.4, ["Updated within a year"] + + return 0.1, ["Stale profile"] + + +def _verification(agent: Dict[str, Any]) -> Tuple[float, List[str]]: + """ + Verification score based on flags. 0..1. + """ + domain_verified = bool(agent.get("domain_verified")) + key_present = bool(agent.get("key_present")) + + score = 0.0 + reasons = [] + + if domain_verified: + score += 0.6 + reasons.append("Domain verified") + else: + reasons.append("Domain not verified") + + if key_present: + score += 0.4 + reasons.append("Key present") + else: + reasons.append("No key") + + return score, reasons + + +def _behavior(agent: Dict[str, Any]) -> Tuple[float, List[str]]: + """ + Behavior score from simulated hints. 0..1. + Lower failures/violations/complaints is better. + """ + fail_ratio = agent.get("handshake_fail_ratio") + violations = agent.get("rate_limit_violations") + complaints = agent.get("complaint_flags") + + # defaults if absent + try: + fail_ratio = float(fail_ratio) if fail_ratio is not None else 0.10 + except (TypeError, ValueError): + fail_ratio = 0.10 + + try: + violations = int(violations) if violations is not None else 0 + except (TypeError, ValueError): + violations = 0 + + try: + complaints = int(complaints) if complaints is not None else 0 + except (TypeError, ValueError): + complaints = 0 + + # Convert to penalties (toy) + # Fail ratio: 0.0 -> 0 penalty, 0.5 -> heavy penalty + fail_pen = _clamp(fail_ratio / 0.5, 0.0, 1.0) + + # Violations: 0 -> 0 penalty, 25+ -> heavy penalty + viol_pen = _clamp(violations / 25.0, 0.0, 1.0) + + # Complaints: 0 -> 0 penalty, 10+ -> heavy penalty + comp_pen = _clamp(complaints / 10.0, 0.0, 1.0) + + penalty = 0.5 * fail_pen + 0.3 * viol_pen + 0.2 * comp_pen + score = 1.0 - _clamp(penalty, 0.0, 1.0) + + reasons = [] + if fail_ratio >= 0.30: + reasons.append("High handshake failure rate") + elif fail_ratio <= 0.05: + reasons.append("Low handshake failure rate") + + if violations >= 10: + reasons.append("Many rate limit violations") + elif violations == 0: + reasons.append("No rate limit violations") + + if complaints >= 3: + reasons.append("Multiple complaint flags") + elif complaints == 0: + reasons.append("No complaint flags") + + return score, reasons + + +def _band(score_0_100: float) -> str: + if score_0_100 >= 80: + return "green" + if score_0_100 >= 50: + return "yellow" + return "red" + + +def _top_reasons(reasons: List[str], limit: int = 3) -> List[str]: + # Keep unique, preserve order + out = [] + seen = set() + for r in reasons: + r = r.strip() + if not r or r in seen: + continue + out.append(r) + seen.add(r) + if len(out) >= limit: + break + return out + + +def rank_agents( + agents: List[Dict[str, Any]], + query: Dict[str, Any] | None = None, + context: Dict[str, Any] | None = None, +) -> List[Dict[str, Any]]: + """ + Toy ranker. Produces: + trust.score 0..100 + trust.band green|yellow|red + trust.reasons[] (top 3, human-readable) + + Returns ranked list (descending trust.score). + """ + scored: List[Tuple[float, Dict[str, Any]]] = [] + + for agent in agents: + a = dict(agent) + + comp, comp_r = _completeness(a) + fresh, fresh_r = _freshness(a) + ver, ver_r = _verification(a) + beh, beh_r = _behavior(a) + + # Weights (toy). Sum to 1.0. + score_0_1 = 0.35 * comp + 0.20 * fresh + 0.25 * ver + 0.20 * beh + score_0_100 = round(_clamp(score_0_1, 0.0, 1.0) * 100.0, 1) + # Avoid "perfect trust" optics in a PoC + score_0_100 = min(score_0_100, 99.0) + + # Build an explanation that covers different categories. + # We want: completeness, freshness, and either verification OR behavior, + # but behavior should show up when it has something to say. + comp_pick = comp_r[:1] + fresh_pick = fresh_r[:1] + ver_pick = ver_r[:1] + beh_pick = beh_r[:1] + + # Start with completeness + freshness + preferred = comp_pick + fresh_pick + + # Then prefer behavior if it's informative (not empty) + if beh_pick: + preferred += beh_pick + else: + preferred += ver_pick + + # Fill remaining slots from everything else, preserving uniqueness + reasons_all = comp_r + fresh_r + ver_r + beh_r + reasons = _top_reasons(preferred + reasons_all, limit=3) + + a["trust"] = { + "score": score_0_100, + "band": _band(score_0_100), + "reasons": reasons, + } + + scored.append((score_0_100, a)) + + # Stable ordering: score desc, then name/id asc + scored.sort( + key=lambda t: ( + -t[0], + (t[1].get("name") or "").lower(), + (t[1].get("id") or "").lower(), + ) + ) + return [a for _, a in scored] diff --git a/extensions/trust_ranking/tests/test_reference_ranker.py b/extensions/trust_ranking/tests/test_reference_ranker.py new file mode 100644 index 000000000..6d29ad3d7 --- /dev/null +++ b/extensions/trust_ranking/tests/test_reference_ranker.py @@ -0,0 +1,98 @@ +import unittest + +from extensions.trust_ranking.reference_ranker import rank_agents + + +class TestReferenceRanker(unittest.TestCase): + def setUp(self): + # Minimal "good" agent: complete + verified + fresh + clean behavior + self.good_agent = { + "id": "agent_good", + "name": "Good Agent", + "url": "https://good.example", + "capabilities": ["book"], + "contact": "ops@good.example", + "updated_at": "2025-12-28", + "domain_verified": True, + "key_present": True, + "handshake_fail_ratio": 0.0, + "rate_limit_violations": 0, + "complaint_flags": 0, + } + + # Minimal "bad" agent: missing fields + stale + unverified + bad behavior + self.bad_agent = { + "id": "agent_bad", + "name": "Bad Agent", + "url": "", + "capabilities": [], + "updated_at": "2023-01-01", + "domain_verified": False, + "key_present": False, + "handshake_fail_ratio": 0.60, + "rate_limit_violations": 40, + "complaint_flags": 10, + } + + # Two agents with identical score inputs except name/id to test tie-break stability + self.tie_a = { + "id": "agent_tie_a", + "name": "Alpha", + "url": "https://tie.example/a", + "capabilities": ["info"], + "contact": "a@tie.example", + "updated_at": "2025-12-28", + "domain_verified": True, + "key_present": True, + "handshake_fail_ratio": 0.01, + "rate_limit_violations": 0, + "complaint_flags": 0, + } + self.tie_b = dict(self.tie_a) + self.tie_b["id"] = "agent_tie_b" + self.tie_b["name"] = "Beta" + self.tie_b["url"] = "https://tie.example/b" + + def test_good_agent_scores_high(self): + ranked = rank_agents([self.good_agent]) + trust = ranked[0].get("trust", {}) + self.assertIn("score", trust) + self.assertIn("band", trust) + self.assertIn("reasons", trust) + + # "High" threshold. Adjust if you change weights later. + self.assertGreaterEqual(trust["score"], 80.0) + self.assertEqual(trust["band"], "green") + self.assertTrue(isinstance(trust["reasons"], list)) + self.assertGreaterEqual(len(trust["reasons"]), 1) + + def test_bad_agent_scores_low(self): + ranked = rank_agents([self.bad_agent]) + trust = ranked[0].get("trust", {}) + self.assertLessEqual(trust["score"], 49.9) + self.assertEqual(trust["band"], "red") + + def test_ranking_orders_by_score_desc(self): + ranked = rank_agents([self.bad_agent, self.good_agent]) + self.assertEqual(ranked[0]["id"], "agent_good") + self.assertEqual(ranked[-1]["id"], "agent_bad") + + # Explicit score ordering check + top_score = ranked[0]["trust"]["score"] + bottom_score = ranked[-1]["trust"]["score"] + self.assertGreaterEqual(top_score, bottom_score) + + def test_deterministic_tie_break(self): + # With identical scores, we expect stable ordering: + # score desc, then name asc, then id asc (per your sort key). + ranked = rank_agents([self.tie_b, self.tie_a]) + self.assertEqual(ranked[0]["id"], "agent_tie_a") + self.assertEqual(ranked[1]["id"], "agent_tie_b") + + # Run again to ensure repeatability + ranked2 = rank_agents([self.tie_b, self.tie_a]) + self.assertEqual([a["id"] for a in ranked], [a["id"] for a in ranked2]) + + +if __name__ == "__main__": + unittest.main() diff --git a/importer/config/config.go b/importer/config/config.go index a42fc3b70..92df9d1d5 100644 --- a/importer/config/config.go +++ b/importer/config/config.go @@ -1,69 +1,69 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "context" - "errors" - - corev1 "github.com/agntcy/dir/api/core/v1" - searchv1 "github.com/agntcy/dir/api/search/v1" - "github.com/agntcy/dir/client/streaming" -) - -// RegistryType represents the type of external registry to import from. -type RegistryType string - -const ( - // RegistryTypeMCP represents the Model Context Protocol registry. - RegistryTypeMCP RegistryType = "mcp" - - // FUTURE: RegistryTypeNANDA represents the NANDA registry. - // RegistryTypeNANDA RegistryType = "nanda". - - // FUTURE:RegistryTypeA2A represents the Agent-to-Agent protocol registry. - // RegistryTypeA2A RegistryType = "a2a". -) - -// ClientInterface defines the interface for the DIR client used by importers. -// This allows for easier testing and mocking. -type ClientInterface interface { - Push(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) - SearchCIDs(ctx context.Context, req *searchv1.SearchCIDsRequest) (streaming.StreamResult[searchv1.SearchCIDsResponse], error) - PullBatch(ctx context.Context, recordRefs []*corev1.RecordRef) ([]*corev1.Record, error) -} - -// Config contains configuration for an import operation. -type Config struct { - RegistryType RegistryType // Registry type identifier - RegistryURL string // Base URL of the registry - Filters map[string]string // Registry-specific filters - Limit int // Number of records to import (default: 0 for all) - Concurrency int // Number of concurrent workers (default: 1) - DryRun bool // If true, preview without actually importing - - Enrich bool // If true, enrich the records with LLM - EnricherConfigFile string // Path to MCPHost configuration file (e.g., mcphost.json) - EnricherSkillsPromptTemplate string // Optional: path to custom skills prompt template or inline prompt (empty = use default) - EnricherDomainsPromptTemplate string // Optional: path to custom domains prompt template or inline prompt (empty = use default) - Force bool // If true, push even if record already exists - Debug bool // If true, enable verbose debug output -} - -// Validate checks if the configuration is valid. -func (c *Config) Validate() error { - if c.RegistryType == "" { - return errors.New("registry type is required") - } - - if c.RegistryURL == "" { - return errors.New("registry URL is required") - } - - if c.Concurrency <= 0 { - c.Concurrency = 1 // Set default concurrency - } - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "context" + "errors" + + corev1 "github.com/agntcy/dir/api/core/v1" + searchv1 "github.com/agntcy/dir/api/search/v1" + "github.com/agntcy/dir/client/streaming" +) + +// RegistryType represents the type of external registry to import from. +type RegistryType string + +const ( + // RegistryTypeMCP represents the Model Context Protocol registry. + RegistryTypeMCP RegistryType = "mcp" + + // FUTURE: RegistryTypeNANDA represents the NANDA registry. + // RegistryTypeNANDA RegistryType = "nanda". + + // FUTURE:RegistryTypeA2A represents the Agent-to-Agent protocol registry. + // RegistryTypeA2A RegistryType = "a2a". +) + +// ClientInterface defines the interface for the DIR client used by importers. +// This allows for easier testing and mocking. +type ClientInterface interface { + Push(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) + SearchCIDs(ctx context.Context, req *searchv1.SearchCIDsRequest) (streaming.StreamResult[searchv1.SearchCIDsResponse], error) + PullBatch(ctx context.Context, recordRefs []*corev1.RecordRef) ([]*corev1.Record, error) +} + +// Config contains configuration for an import operation. +type Config struct { + RegistryType RegistryType // Registry type identifier + RegistryURL string // Base URL of the registry + Filters map[string]string // Registry-specific filters + Limit int // Number of records to import (default: 0 for all) + Concurrency int // Number of concurrent workers (default: 1) + DryRun bool // If true, preview without actually importing + + Enrich bool // If true, enrich the records with LLM + EnricherConfigFile string // Path to MCPHost configuration file (e.g., mcphost.json) + EnricherSkillsPromptTemplate string // Optional: path to custom skills prompt template or inline prompt (empty = use default) + EnricherDomainsPromptTemplate string // Optional: path to custom domains prompt template or inline prompt (empty = use default) + Force bool // If true, push even if record already exists + Debug bool // If true, enable verbose debug output +} + +// Validate checks if the configuration is valid. +func (c *Config) Validate() error { + if c.RegistryType == "" { + return errors.New("registry type is required") + } + + if c.RegistryURL == "" { + return errors.New("registry URL is required") + } + + if c.Concurrency <= 0 { + c.Concurrency = 1 // Set default concurrency + } + + return nil +} diff --git a/importer/config/config_test.go b/importer/config/config_test.go index 6c1e50f00..bccbc01f7 100644 --- a/importer/config/config_test.go +++ b/importer/config/config_test.go @@ -1,86 +1,86 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:nilnil -package config - -import ( - "testing" -) - -func TestConfig_Validate(t *testing.T) { - tests := []struct { - name string - config Config - wantErr bool - errMsg string - }{ - { - name: "valid config", - config: Config{ - RegistryType: RegistryTypeMCP, - RegistryURL: "https://registry.example.com", - Concurrency: 10, - }, - wantErr: false, - }, - { - name: "missing registry type", - config: Config{ - RegistryURL: "https://registry.example.com", - Concurrency: 10, - }, - wantErr: true, - errMsg: "registry type is required", - }, - { - name: "missing registry URL", - config: Config{ - RegistryType: RegistryTypeMCP, - Concurrency: 10, - }, - wantErr: true, - errMsg: "registry URL is required", - }, - { - name: "zero concurrency sets default", - config: Config{ - RegistryType: RegistryTypeMCP, - RegistryURL: "https://registry.example.com", - Concurrency: 0, - }, - wantErr: false, - }, - { - name: "negative concurrency sets default", - config: Config{ - RegistryType: RegistryTypeMCP, - RegistryURL: "https://registry.example.com", - Concurrency: -1, - }, - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.config.Validate() - if (err != nil) != tt.wantErr { - t.Errorf("Config.Validate() error = %v, wantErr %v", err, tt.wantErr) - - return - } - - if tt.wantErr && err.Error() != tt.errMsg { - t.Errorf("Config.Validate() error message = %v, want %v", err.Error(), tt.errMsg) - } - - // Check that default concurrency is set when invalid - if !tt.wantErr && tt.config.Concurrency <= 0 { - if tt.config.Concurrency != 5 { - t.Errorf("Config.Validate() did not set default concurrency, got %d, want 5", tt.config.Concurrency) - } - } - }) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:nilnil +package config + +import ( + "testing" +) + +func TestConfig_Validate(t *testing.T) { + tests := []struct { + name string + config Config + wantErr bool + errMsg string + }{ + { + name: "valid config", + config: Config{ + RegistryType: RegistryTypeMCP, + RegistryURL: "https://registry.example.com", + Concurrency: 10, + }, + wantErr: false, + }, + { + name: "missing registry type", + config: Config{ + RegistryURL: "https://registry.example.com", + Concurrency: 10, + }, + wantErr: true, + errMsg: "registry type is required", + }, + { + name: "missing registry URL", + config: Config{ + RegistryType: RegistryTypeMCP, + Concurrency: 10, + }, + wantErr: true, + errMsg: "registry URL is required", + }, + { + name: "zero concurrency sets default", + config: Config{ + RegistryType: RegistryTypeMCP, + RegistryURL: "https://registry.example.com", + Concurrency: 0, + }, + wantErr: false, + }, + { + name: "negative concurrency sets default", + config: Config{ + RegistryType: RegistryTypeMCP, + RegistryURL: "https://registry.example.com", + Concurrency: -1, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.config.Validate() + if (err != nil) != tt.wantErr { + t.Errorf("Config.Validate() error = %v, wantErr %v", err, tt.wantErr) + + return + } + + if tt.wantErr && err.Error() != tt.errMsg { + t.Errorf("Config.Validate() error message = %v, want %v", err.Error(), tt.errMsg) + } + + // Check that default concurrency is set when invalid + if !tt.wantErr && tt.config.Concurrency <= 0 { + if tt.config.Concurrency != 5 { + t.Errorf("Config.Validate() did not set default concurrency, got %d, want 5", tt.config.Concurrency) + } + } + }) + } +} diff --git a/importer/enricher/enricher.domains.prompt.md b/importer/enricher/enricher.domains.prompt.md index b0b18fb69..864330af8 100644 --- a/importer/enricher/enricher.domains.prompt.md +++ b/importer/enricher/enricher.domains.prompt.md @@ -1,57 +1,57 @@ -CRITICAL: You MUST call tools FIRST before responding! - -STEP 1 - CALL THIS TOOL NOW: -Tool: dir-mcp-server__agntcy_oasf_get_schema_domains -Args: {"version": "0.7.0"} - -Wait for response. The response will show top-level domains like: -{"name": "artificial_intelligence", ...}, {"name": "data_science", ...}, {"name": "software_engineering", ...} - -STEP 2 - Pick ONE domain "name" from Step 1 (e.g. "artificial_intelligence") - -STEP 3 - CALL THIS TOOL NOW: -Tool: dir-mcp-server__agntcy_oasf_get_schema_domains -Args: {"version": "0.7.0", "parent_domain": "YOUR_CHOICE_FROM_STEP_2"} - -Wait for response. The response will show sub-domains with "name" and "id" fields like: -{"name": "machine_learning", "caption": "ML", "id": 101} -{"name": "computer_vision", "caption": "CV", "id": 102} - -STEP 4 - Pick 1-3 sub-domains and extract BOTH "name" and "id" from Step 3 - -DO NOT INVENT NAMES! These DO NOT exist: -❌ "ai_model_development" -❌ "cloud_services" -❌ "web_development" -❌ "mobile_apps" - -Real examples (from actual schema): -✓ "technology/internet_of_things" with id 101 -✓ "technology/software_engineering" with id 102 -✓ "trust_and_safety/online_safety" with its corresponding id 401 -✓ "finance_and_business/consumer_goods" with its corresponding id 204 - -STEP 5 - OUTPUT FORMAT (CRITICAL): -Return ONLY the raw JSON object below. DO NOT wrap in markdown code blocks. -DO NOT use markdown formatting. DO NOT add language tags like "json". -DO NOT add ANY text or explanation before or after the JSON. - -Your response must start with "{" and end with "}". - -Return exactly this structure: -{ - "domains": [ - { - "name": "parent_domain/sub_domain", - "id": 101, - "confidence": 0.95, - "reasoning": "Brief explanation" - } - ] -} - -IMPORTANT: The "id" field MUST be the exact ID returned by the get_schema_domains tool in Step 3. -Do NOT invent or guess IDs. Use only the IDs from the tool response. - -Agent record to analyze: - +CRITICAL: You MUST call tools FIRST before responding! + +STEP 1 - CALL THIS TOOL NOW: +Tool: dir-mcp-server__agntcy_oasf_get_schema_domains +Args: {"version": "0.7.0"} + +Wait for response. The response will show top-level domains like: +{"name": "artificial_intelligence", ...}, {"name": "data_science", ...}, {"name": "software_engineering", ...} + +STEP 2 - Pick ONE domain "name" from Step 1 (e.g. "artificial_intelligence") + +STEP 3 - CALL THIS TOOL NOW: +Tool: dir-mcp-server__agntcy_oasf_get_schema_domains +Args: {"version": "0.7.0", "parent_domain": "YOUR_CHOICE_FROM_STEP_2"} + +Wait for response. The response will show sub-domains with "name" and "id" fields like: +{"name": "machine_learning", "caption": "ML", "id": 101} +{"name": "computer_vision", "caption": "CV", "id": 102} + +STEP 4 - Pick 1-3 sub-domains and extract BOTH "name" and "id" from Step 3 + +DO NOT INVENT NAMES! These DO NOT exist: +❌ "ai_model_development" +❌ "cloud_services" +❌ "web_development" +❌ "mobile_apps" + +Real examples (from actual schema): +✓ "technology/internet_of_things" with id 101 +✓ "technology/software_engineering" with id 102 +✓ "trust_and_safety/online_safety" with its corresponding id 401 +✓ "finance_and_business/consumer_goods" with its corresponding id 204 + +STEP 5 - OUTPUT FORMAT (CRITICAL): +Return ONLY the raw JSON object below. DO NOT wrap in markdown code blocks. +DO NOT use markdown formatting. DO NOT add language tags like "json". +DO NOT add ANY text or explanation before or after the JSON. + +Your response must start with "{" and end with "}". + +Return exactly this structure: +{ + "domains": [ + { + "name": "parent_domain/sub_domain", + "id": 101, + "confidence": 0.95, + "reasoning": "Brief explanation" + } + ] +} + +IMPORTANT: The "id" field MUST be the exact ID returned by the get_schema_domains tool in Step 3. +Do NOT invent or guess IDs. Use only the IDs from the tool response. + +Agent record to analyze: + diff --git a/importer/enricher/enricher.go b/importer/enricher/enricher.go index b179238f0..0343871e5 100644 --- a/importer/enricher/enricher.go +++ b/importer/enricher/enricher.go @@ -1,316 +1,316 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package enricher - -import ( - "context" - _ "embed" - "encoding/json" - "fmt" - "os" - "strings" - - typesv1alpha1 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha1" - "github.com/agntcy/dir/utils/logging" - "github.com/mark3labs/mcphost/sdk" -) - -var logger = logging.Logger("importer/enricher") - -//go:embed enricher.skills.prompt.md -var defaultSkillsPromptTemplate string - -//go:embed enricher.domains.prompt.md -var defaultDomainsPromptTemplate string - -const ( - DebugMode = false - DefaultConfigFile = "importer/enricher/mcphost.json" - DefaultConfidenceThreshold = 0.5 -) - -type Config struct { - ConfigFile string // Path to mcphost configuration file (e.g., mcphost.json) - SkillsPromptTemplate string // Optional: path to custom skills prompt template file or inline prompt (empty = use default) - DomainsPromptTemplate string // Optional: path to custom domains prompt template file or inline prompt (empty = use default) -} - -type MCPHostClient struct { - host *sdk.MCPHost - skillsPromptTemplate string - domainsPromptTemplate string -} - -// EnrichedField represents a single enriched field (skill or domain) with metadata. -type EnrichedField struct { - Name string `json:"name"` - ID uint32 `json:"id"` - Confidence float64 `json:"confidence"` - Reasoning string `json:"reasoning"` -} - -// EnrichmentResponse represents the structured JSON response from the LLM. -// It can contain either skills or domains depending on the enrichment type. -type EnrichmentResponse struct { - Skills []EnrichedField `json:"skills,omitempty"` - Domains []EnrichedField `json:"domains,omitempty"` -} - -func NewMCPHost(ctx context.Context, config Config) (*MCPHostClient, error) { - // Initialize MCP Host - host, err := sdk.New(ctx, &sdk.Options{ - ConfigFile: config.ConfigFile, - }) - if err != nil { - return nil, fmt.Errorf("failed to create MCPHost client: %w", err) - } - - // Load prompt templates - use custom if provided, otherwise use defaults - skillsPrompt, err := loadPromptTemplate(config.SkillsPromptTemplate, defaultSkillsPromptTemplate) - if err != nil { - return nil, fmt.Errorf("failed to load skills prompt template: %w", err) - } - - domainsPrompt, err := loadPromptTemplate(config.DomainsPromptTemplate, defaultDomainsPromptTemplate) - if err != nil { - return nil, fmt.Errorf("failed to load domains prompt template: %w", err) - } - - if DebugMode { - runGetSchemaToolsPrompt(ctx, host) - } - - return &MCPHostClient{ - host: host, - skillsPromptTemplate: skillsPrompt, - domainsPromptTemplate: domainsPrompt, - }, nil -} - -// loadPromptTemplate loads the prompt template from config or uses the provided default. -// If promptTemplateConfig is empty, uses the provided default template. -// If promptTemplateConfig looks like a file path (contains "/" or ends with ".md"), loads from file. -// Otherwise, treats it as an inline prompt template string. -func loadPromptTemplate(promptTemplateConfig, defaultTemplate string) (string, error) { - // Use default embedded template if no custom template specified - if promptTemplateConfig == "" { - logger.Debug("Using default embedded prompt template") - - return defaultTemplate, nil - } - - // Check if it looks like a file path - if strings.Contains(promptTemplateConfig, "/") || strings.HasSuffix(promptTemplateConfig, ".md") { - logger.Debug("Loading prompt template from file", "path", promptTemplateConfig) - - data, err := os.ReadFile(promptTemplateConfig) - if err != nil { - return "", fmt.Errorf("failed to read prompt template file %s: %w", promptTemplateConfig, err) - } - - return string(data), nil - } - - // Treat as inline prompt template - logger.Debug("Using inline prompt template from config") - - return promptTemplateConfig, nil -} - -// fieldType represents the type of field being enriched (skills or domains). -type fieldType string - -const ( - fieldTypeSkills fieldType = "skills" - fieldTypeDomains fieldType = "domains" -) - -// EnrichWithSkills enriches the record with OASF skills using the LLM and MCP tools. -func (c *MCPHostClient) EnrichWithSkills(ctx context.Context, record *typesv1alpha1.Record) (*typesv1alpha1.Record, error) { - return c.enrichField(ctx, record, fieldTypeSkills, c.skillsPromptTemplate) -} - -// EnrichWithDomains enriches the record with OASF domains using the LLM and MCP tools. -func (c *MCPHostClient) EnrichWithDomains(ctx context.Context, record *typesv1alpha1.Record) (*typesv1alpha1.Record, error) { - return c.enrichField(ctx, record, fieldTypeDomains, c.domainsPromptTemplate) -} - -// enrichField is the generic enrichment method that handles both skills and domains. -func (c *MCPHostClient) enrichField( - ctx context.Context, - record *typesv1alpha1.Record, - fType fieldType, - promptTemplate string, -) (*typesv1alpha1.Record, error) { - // Marshal the record to JSON - recordJSON, err := json.Marshal(record) - if err != nil { - return nil, fmt.Errorf("failed to marshal record: %w", err) - } - - // Run prompt with the specified template - response, err := c.runPrompt(ctx, promptTemplate, recordJSON) - if err != nil { - return nil, fmt.Errorf("failed to run prompt for %s: %w", fType, err) - } - - // Parse response to get enriched fields - enrichedFields, err := c.parseResponse(response, fType) - if err != nil { - return nil, fmt.Errorf("failed to parse %s: %w", fType, err) - } - - // Filter by confidence threshold and add to record - for _, field := range enrichedFields { - if field.Confidence >= DefaultConfidenceThreshold { - switch fType { - case fieldTypeSkills: - record.Skills = append(record.Skills, &typesv1alpha1.Skill{ - Name: field.Name, - Id: field.ID, - }) - case fieldTypeDomains: - record.Domains = append(record.Domains, &typesv1alpha1.Domain{ - Name: field.Name, - Id: field.ID, - }) - } - - logger.Debug(fmt.Sprintf("Added %s", fType), "name", field.Name, "id", field.ID, "confidence", field.Confidence, "reasoning", field.Reasoning) - } else { - logger.Debug(fmt.Sprintf("Skipped low-confidence %s", fType), "name", field.Name, "confidence", field.Confidence, "threshold", DefaultConfidenceThreshold) - } - } - - enrichedRecordJSON, err := json.Marshal(record) - if err != nil { - return nil, fmt.Errorf("failed to marshal enriched record: %w", err) - } - - logger.Debug(fmt.Sprintf("Enriched record with %s", fType), "record", string(enrichedRecordJSON)) - - return record, nil -} - -func runGetSchemaToolsPrompt(ctx context.Context, host *sdk.MCPHost) { - // Get 3 OASF skills - resp, err := host.Prompt(ctx, "Call the tool 'dir-mcp-server__agntcy_oasf_get_schema_skills' and return 3 skill names)") - if err != nil { - logger.Error("failed to get 3 OASF skills", "error", err) - } - - logger.Info("3 OASF skills", "skills", resp) - - // Get 3 sub-skills for the skill natural_language_processing - resp, err = host.Prompt(ctx, "Call the tool 'dir-mcp-server__agntcy_oasf_get_schema_skills' and return 3 sub-skills for the skill natural_language_processing") - if err != nil { - logger.Error("failed to get 3 sub-skills for natural_language_processing", "error", err) - } - - logger.Info("3 sub-skills for natural_language_processing", "sub-skills", resp) -} - -func (c *MCPHostClient) runPrompt(ctx context.Context, promptTemplate string, recordJSON []byte) (string, error) { - prompt := promptTemplate + string(recordJSON) - - var ( - response string - err error - ) - - if DebugMode { - logger.Info("Original record", "record", string(recordJSON)) - - // Send a prompt and get response with callbacks to see tool usage - response, err = c.host.PromptWithCallbacks( - ctx, - prompt, - func(name, args string) { - logger.Info("Calling tool", "tool", name) - }, - func(name, args, result string, isError bool) { - if isError { - logger.Error("Tool failed", "tool", name) - } else { - logger.Info("Tool completed", "tool", name) - } - }, - func(chunk string) { - }, - ) - if err != nil { - return "", fmt.Errorf("failed to send prompt: %w", err) - } - - logger.Info("Response", "response", response) - - return response, nil - } - - // No debug, just send the prompt and get the response - response, err = c.host.Prompt(ctx, prompt) - if err != nil { - return "", fmt.Errorf("failed to send prompt: %w", err) - } - - return response, nil -} - -func (c *MCPHostClient) parseResponse(response string, fType fieldType) ([]EnrichedField, error) { - // Trim the entire response first to remove leading/trailing whitespace - response = strings.TrimSpace(response) - - // Try to parse as structured JSON first - var enrichmentResp EnrichmentResponse - - err := json.Unmarshal([]byte(response), &enrichmentResp) - if err == nil { - // Get the appropriate field list based on type - var fields []EnrichedField - - switch fType { - case fieldTypeSkills: - fields = enrichmentResp.Skills - case fieldTypeDomains: - fields = enrichmentResp.Domains - default: - return nil, fmt.Errorf("unknown field type: %s", fType) - } - - // Validate and filter fields - validFields := make([]EnrichedField, 0, len(fields)) - for _, field := range fields { - // Basic validation: must contain exactly one forward slash - if strings.Count(field.Name, "/") != 1 { - logger.Warn(fmt.Sprintf("Skipping invalid %s format (must be parent/child)", fType), "name", field.Name) - - continue - } - - // Validate ID is provided - if field.ID == 0 { - logger.Warn(fmt.Sprintf("Skipping %s without valid ID", fType), "name", field.Name) - - continue - } - - // Validate confidence is in valid range - if field.Confidence < 0.0 || field.Confidence > 1.0 { - logger.Warn(fmt.Sprintf("Skipping %s with invalid confidence", fType), "name", field.Name, "confidence", field.Confidence) - - continue - } - - validFields = append(validFields, field) - } - - if len(validFields) == 0 { - return nil, fmt.Errorf("no valid %s found in JSON response", fType) - } - - return validFields, nil - } - - return nil, fmt.Errorf("failed to parse response: %w", err) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package enricher + +import ( + "context" + _ "embed" + "encoding/json" + "fmt" + "os" + "strings" + + typesv1alpha1 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha1" + "github.com/agntcy/dir/utils/logging" + "github.com/mark3labs/mcphost/sdk" +) + +var logger = logging.Logger("importer/enricher") + +//go:embed enricher.skills.prompt.md +var defaultSkillsPromptTemplate string + +//go:embed enricher.domains.prompt.md +var defaultDomainsPromptTemplate string + +const ( + DebugMode = false + DefaultConfigFile = "importer/enricher/mcphost.json" + DefaultConfidenceThreshold = 0.5 +) + +type Config struct { + ConfigFile string // Path to mcphost configuration file (e.g., mcphost.json) + SkillsPromptTemplate string // Optional: path to custom skills prompt template file or inline prompt (empty = use default) + DomainsPromptTemplate string // Optional: path to custom domains prompt template file or inline prompt (empty = use default) +} + +type MCPHostClient struct { + host *sdk.MCPHost + skillsPromptTemplate string + domainsPromptTemplate string +} + +// EnrichedField represents a single enriched field (skill or domain) with metadata. +type EnrichedField struct { + Name string `json:"name"` + ID uint32 `json:"id"` + Confidence float64 `json:"confidence"` + Reasoning string `json:"reasoning"` +} + +// EnrichmentResponse represents the structured JSON response from the LLM. +// It can contain either skills or domains depending on the enrichment type. +type EnrichmentResponse struct { + Skills []EnrichedField `json:"skills,omitempty"` + Domains []EnrichedField `json:"domains,omitempty"` +} + +func NewMCPHost(ctx context.Context, config Config) (*MCPHostClient, error) { + // Initialize MCP Host + host, err := sdk.New(ctx, &sdk.Options{ + ConfigFile: config.ConfigFile, + }) + if err != nil { + return nil, fmt.Errorf("failed to create MCPHost client: %w", err) + } + + // Load prompt templates - use custom if provided, otherwise use defaults + skillsPrompt, err := loadPromptTemplate(config.SkillsPromptTemplate, defaultSkillsPromptTemplate) + if err != nil { + return nil, fmt.Errorf("failed to load skills prompt template: %w", err) + } + + domainsPrompt, err := loadPromptTemplate(config.DomainsPromptTemplate, defaultDomainsPromptTemplate) + if err != nil { + return nil, fmt.Errorf("failed to load domains prompt template: %w", err) + } + + if DebugMode { + runGetSchemaToolsPrompt(ctx, host) + } + + return &MCPHostClient{ + host: host, + skillsPromptTemplate: skillsPrompt, + domainsPromptTemplate: domainsPrompt, + }, nil +} + +// loadPromptTemplate loads the prompt template from config or uses the provided default. +// If promptTemplateConfig is empty, uses the provided default template. +// If promptTemplateConfig looks like a file path (contains "/" or ends with ".md"), loads from file. +// Otherwise, treats it as an inline prompt template string. +func loadPromptTemplate(promptTemplateConfig, defaultTemplate string) (string, error) { + // Use default embedded template if no custom template specified + if promptTemplateConfig == "" { + logger.Debug("Using default embedded prompt template") + + return defaultTemplate, nil + } + + // Check if it looks like a file path + if strings.Contains(promptTemplateConfig, "/") || strings.HasSuffix(promptTemplateConfig, ".md") { + logger.Debug("Loading prompt template from file", "path", promptTemplateConfig) + + data, err := os.ReadFile(promptTemplateConfig) + if err != nil { + return "", fmt.Errorf("failed to read prompt template file %s: %w", promptTemplateConfig, err) + } + + return string(data), nil + } + + // Treat as inline prompt template + logger.Debug("Using inline prompt template from config") + + return promptTemplateConfig, nil +} + +// fieldType represents the type of field being enriched (skills or domains). +type fieldType string + +const ( + fieldTypeSkills fieldType = "skills" + fieldTypeDomains fieldType = "domains" +) + +// EnrichWithSkills enriches the record with OASF skills using the LLM and MCP tools. +func (c *MCPHostClient) EnrichWithSkills(ctx context.Context, record *typesv1alpha1.Record) (*typesv1alpha1.Record, error) { + return c.enrichField(ctx, record, fieldTypeSkills, c.skillsPromptTemplate) +} + +// EnrichWithDomains enriches the record with OASF domains using the LLM and MCP tools. +func (c *MCPHostClient) EnrichWithDomains(ctx context.Context, record *typesv1alpha1.Record) (*typesv1alpha1.Record, error) { + return c.enrichField(ctx, record, fieldTypeDomains, c.domainsPromptTemplate) +} + +// enrichField is the generic enrichment method that handles both skills and domains. +func (c *MCPHostClient) enrichField( + ctx context.Context, + record *typesv1alpha1.Record, + fType fieldType, + promptTemplate string, +) (*typesv1alpha1.Record, error) { + // Marshal the record to JSON + recordJSON, err := json.Marshal(record) + if err != nil { + return nil, fmt.Errorf("failed to marshal record: %w", err) + } + + // Run prompt with the specified template + response, err := c.runPrompt(ctx, promptTemplate, recordJSON) + if err != nil { + return nil, fmt.Errorf("failed to run prompt for %s: %w", fType, err) + } + + // Parse response to get enriched fields + enrichedFields, err := c.parseResponse(response, fType) + if err != nil { + return nil, fmt.Errorf("failed to parse %s: %w", fType, err) + } + + // Filter by confidence threshold and add to record + for _, field := range enrichedFields { + if field.Confidence >= DefaultConfidenceThreshold { + switch fType { + case fieldTypeSkills: + record.Skills = append(record.Skills, &typesv1alpha1.Skill{ + Name: field.Name, + Id: field.ID, + }) + case fieldTypeDomains: + record.Domains = append(record.Domains, &typesv1alpha1.Domain{ + Name: field.Name, + Id: field.ID, + }) + } + + logger.Debug(fmt.Sprintf("Added %s", fType), "name", field.Name, "id", field.ID, "confidence", field.Confidence, "reasoning", field.Reasoning) + } else { + logger.Debug(fmt.Sprintf("Skipped low-confidence %s", fType), "name", field.Name, "confidence", field.Confidence, "threshold", DefaultConfidenceThreshold) + } + } + + enrichedRecordJSON, err := json.Marshal(record) + if err != nil { + return nil, fmt.Errorf("failed to marshal enriched record: %w", err) + } + + logger.Debug(fmt.Sprintf("Enriched record with %s", fType), "record", string(enrichedRecordJSON)) + + return record, nil +} + +func runGetSchemaToolsPrompt(ctx context.Context, host *sdk.MCPHost) { + // Get 3 OASF skills + resp, err := host.Prompt(ctx, "Call the tool 'dir-mcp-server__agntcy_oasf_get_schema_skills' and return 3 skill names)") + if err != nil { + logger.Error("failed to get 3 OASF skills", "error", err) + } + + logger.Info("3 OASF skills", "skills", resp) + + // Get 3 sub-skills for the skill natural_language_processing + resp, err = host.Prompt(ctx, "Call the tool 'dir-mcp-server__agntcy_oasf_get_schema_skills' and return 3 sub-skills for the skill natural_language_processing") + if err != nil { + logger.Error("failed to get 3 sub-skills for natural_language_processing", "error", err) + } + + logger.Info("3 sub-skills for natural_language_processing", "sub-skills", resp) +} + +func (c *MCPHostClient) runPrompt(ctx context.Context, promptTemplate string, recordJSON []byte) (string, error) { + prompt := promptTemplate + string(recordJSON) + + var ( + response string + err error + ) + + if DebugMode { + logger.Info("Original record", "record", string(recordJSON)) + + // Send a prompt and get response with callbacks to see tool usage + response, err = c.host.PromptWithCallbacks( + ctx, + prompt, + func(name, args string) { + logger.Info("Calling tool", "tool", name) + }, + func(name, args, result string, isError bool) { + if isError { + logger.Error("Tool failed", "tool", name) + } else { + logger.Info("Tool completed", "tool", name) + } + }, + func(chunk string) { + }, + ) + if err != nil { + return "", fmt.Errorf("failed to send prompt: %w", err) + } + + logger.Info("Response", "response", response) + + return response, nil + } + + // No debug, just send the prompt and get the response + response, err = c.host.Prompt(ctx, prompt) + if err != nil { + return "", fmt.Errorf("failed to send prompt: %w", err) + } + + return response, nil +} + +func (c *MCPHostClient) parseResponse(response string, fType fieldType) ([]EnrichedField, error) { + // Trim the entire response first to remove leading/trailing whitespace + response = strings.TrimSpace(response) + + // Try to parse as structured JSON first + var enrichmentResp EnrichmentResponse + + err := json.Unmarshal([]byte(response), &enrichmentResp) + if err == nil { + // Get the appropriate field list based on type + var fields []EnrichedField + + switch fType { + case fieldTypeSkills: + fields = enrichmentResp.Skills + case fieldTypeDomains: + fields = enrichmentResp.Domains + default: + return nil, fmt.Errorf("unknown field type: %s", fType) + } + + // Validate and filter fields + validFields := make([]EnrichedField, 0, len(fields)) + for _, field := range fields { + // Basic validation: must contain exactly one forward slash + if strings.Count(field.Name, "/") != 1 { + logger.Warn(fmt.Sprintf("Skipping invalid %s format (must be parent/child)", fType), "name", field.Name) + + continue + } + + // Validate ID is provided + if field.ID == 0 { + logger.Warn(fmt.Sprintf("Skipping %s without valid ID", fType), "name", field.Name) + + continue + } + + // Validate confidence is in valid range + if field.Confidence < 0.0 || field.Confidence > 1.0 { + logger.Warn(fmt.Sprintf("Skipping %s with invalid confidence", fType), "name", field.Name, "confidence", field.Confidence) + + continue + } + + validFields = append(validFields, field) + } + + if len(validFields) == 0 { + return nil, fmt.Errorf("no valid %s found in JSON response", fType) + } + + return validFields, nil + } + + return nil, fmt.Errorf("failed to parse response: %w", err) +} diff --git a/importer/enricher/enricher.skills.prompt.md b/importer/enricher/enricher.skills.prompt.md index 50f59aab5..611f7aa4c 100644 --- a/importer/enricher/enricher.skills.prompt.md +++ b/importer/enricher/enricher.skills.prompt.md @@ -1,59 +1,59 @@ -CRITICAL: You MUST call tools FIRST before responding! - -STEP 1 - CALL THIS TOOL NOW: -Tool: dir-mcp-server__agntcy_oasf_get_schema_skills -Args: {"version": "0.7.0"} - -Wait for response. The response will show top-level skills like: -{"name": "analytical_skills", ...}, {"name": "retrieval_augmented_generation", ...}, {"name": "natural_language_processing", ...} - -STEP 2 - Pick ONE skill "name" from Step 1 (e.g. "retrieval_augmented_generation") - -STEP 3 - CALL THIS TOOL NOW: -Tool: dir-mcp-server__agntcy_oasf_get_schema_skills -Args: {"version": "0.7.0", "parent_skill": "YOUR_CHOICE_FROM_STEP_2"} - -Wait for response. The response will show sub-skills with "name" and "id" fields like: -{"name": "retrieval_of_information", "caption": "Indexing", "id": 601} -{"name": "document_or_database_question_answering", "caption": "Q&A", "id": 602} - -STEP 4 - Pick 1-5 sub-skills and extract BOTH "name" and "id" from Step 3 - -DO NOT INVENT NAMES! These DO NOT exist: -❌ "information_retrieval_synthesis" -❌ "api_server_operations" -❌ "statistical_analysis" -❌ "data_visualization" -❌ "code_generation" -❌ "data_retrieval" - -Real examples (from actual schema): -✓ "retrieval_augmented_generation/retrieval_of_information" with id 601 -✓ "retrieval_augmented_generation/document_or_database_question_answering" with id 602 -✓ "natural_language_processing/ethical_interaction" with its corresponding id 108 -✓ "analytical_skills/mathematical_reasoning" with its corresponding id 501 - -STEP 5 - OUTPUT FORMAT (CRITICAL): -Return ONLY the raw JSON object below. DO NOT wrap in markdown code blocks. -DO NOT use markdown formatting. DO NOT add language tags like "json". -DO NOT add ANY text or explanation before or after the JSON. - -Your response must start with "{" and end with "}". - -Return exactly this structure: -{ - "skills": [ - { - "name": "parent_skill/sub_skill", - "id": 601, - "confidence": 0.95, - "reasoning": "Brief explanation" - } - ] -} - -IMPORTANT: The "id" field MUST be the exact ID returned by the get_schema_skills tool in Step 3. -Do NOT invent or guess IDs. Use only the IDs from the tool response. - -Agent record to analyze: - +CRITICAL: You MUST call tools FIRST before responding! + +STEP 1 - CALL THIS TOOL NOW: +Tool: dir-mcp-server__agntcy_oasf_get_schema_skills +Args: {"version": "0.7.0"} + +Wait for response. The response will show top-level skills like: +{"name": "analytical_skills", ...}, {"name": "retrieval_augmented_generation", ...}, {"name": "natural_language_processing", ...} + +STEP 2 - Pick ONE skill "name" from Step 1 (e.g. "retrieval_augmented_generation") + +STEP 3 - CALL THIS TOOL NOW: +Tool: dir-mcp-server__agntcy_oasf_get_schema_skills +Args: {"version": "0.7.0", "parent_skill": "YOUR_CHOICE_FROM_STEP_2"} + +Wait for response. The response will show sub-skills with "name" and "id" fields like: +{"name": "retrieval_of_information", "caption": "Indexing", "id": 601} +{"name": "document_or_database_question_answering", "caption": "Q&A", "id": 602} + +STEP 4 - Pick 1-5 sub-skills and extract BOTH "name" and "id" from Step 3 + +DO NOT INVENT NAMES! These DO NOT exist: +❌ "information_retrieval_synthesis" +❌ "api_server_operations" +❌ "statistical_analysis" +❌ "data_visualization" +❌ "code_generation" +❌ "data_retrieval" + +Real examples (from actual schema): +✓ "retrieval_augmented_generation/retrieval_of_information" with id 601 +✓ "retrieval_augmented_generation/document_or_database_question_answering" with id 602 +✓ "natural_language_processing/ethical_interaction" with its corresponding id 108 +✓ "analytical_skills/mathematical_reasoning" with its corresponding id 501 + +STEP 5 - OUTPUT FORMAT (CRITICAL): +Return ONLY the raw JSON object below. DO NOT wrap in markdown code blocks. +DO NOT use markdown formatting. DO NOT add language tags like "json". +DO NOT add ANY text or explanation before or after the JSON. + +Your response must start with "{" and end with "}". + +Return exactly this structure: +{ + "skills": [ + { + "name": "parent_skill/sub_skill", + "id": 601, + "confidence": 0.95, + "reasoning": "Brief explanation" + } + ] +} + +IMPORTANT: The "id" field MUST be the exact ID returned by the get_schema_skills tool in Step 3. +Do NOT invent or guess IDs. Use only the IDs from the tool response. + +Agent record to analyze: + diff --git a/importer/enricher/mcphost.json b/importer/enricher/mcphost.json index 069082978..e92204974 100644 --- a/importer/enricher/mcphost.json +++ b/importer/enricher/mcphost.json @@ -1,15 +1,15 @@ -{ - "mcpServers": { - "dir-mcp-server": { - "command": "dirctl", - "args": [ - "mcp", - "serve" - ], - "env": { - "SCHEMA_URL": "https://schema.oasf.outshift.com" - } - } - }, - "model": "ollama:qwen3:8b" -} +{ + "mcpServers": { + "dir-mcp-server": { + "command": "dirctl", + "args": [ + "mcp", + "serve" + ], + "env": { + "SCHEMA_URL": "https://schema.oasf.outshift.com" + } + } + }, + "model": "ollama:qwen3:8b" +} diff --git a/importer/go.mod b/importer/go.mod index c1310afb5..41a775416 100644 --- a/importer/go.mod +++ b/importer/go.mod @@ -1,279 +1,279 @@ -module github.com/agntcy/dir/importer - -go 1.25.2 - -replace ( - github.com/agntcy/dir/api => ../api - github.com/agntcy/dir/client => ../client - github.com/agntcy/dir/utils => ../utils -) - -require ( - buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 - github.com/agntcy/dir/api v0.6.0 - github.com/agntcy/dir/client v0.6.0 - github.com/agntcy/dir/utils v0.6.0 - github.com/agntcy/oasf-sdk/pkg v0.0.14 - github.com/mark3labs/mcphost v0.31.3 - github.com/modelcontextprotocol/registry v1.2.3 - google.golang.org/protobuf v1.36.10 -) - -require ( - buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 // indirect - cloud.google.com/go v0.121.6 // indirect - cloud.google.com/go/auth v0.17.0 // indirect - cloud.google.com/go/compute/metadata v0.9.0 // indirect - github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect - github.com/JohannesKaufmann/html-to-markdown v1.6.0 // indirect - github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/PuerkitoBio/goquery v1.10.3 // indirect - github.com/ThalesIgnite/crypto11 v1.2.5 // indirect - github.com/alecthomas/chroma/v2 v2.20.0 // indirect - github.com/andybalholm/cascadia v1.3.3 // indirect - github.com/anthropics/anthropic-sdk-go v1.10.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/atotto/clipboard v0.1.4 // indirect - github.com/aws/aws-sdk-go-v2 v1.40.0 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 // indirect - github.com/aws/aws-sdk-go-v2/config v1.32.2 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.19.2 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 // indirect - github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 // indirect - github.com/aws/smithy-go v1.24.0 // indirect - github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect - github.com/aymerick/douceur v0.2.0 // indirect - github.com/bahlo/generic-list-go v0.2.0 // indirect - github.com/blang/semver v3.5.1+incompatible // indirect - github.com/buger/jsonparser v1.1.1 // indirect - github.com/bytedance/gopkg v0.1.3 // indirect - github.com/bytedance/sonic v1.14.1 // indirect - github.com/bytedance/sonic/loader v0.3.0 // indirect - github.com/cenkalti/backoff/v5 v5.0.3 // indirect - github.com/charmbracelet/bubbles v0.21.0 // indirect - github.com/charmbracelet/bubbletea v1.3.10 // indirect - github.com/charmbracelet/colorprofile v0.3.2 // indirect - github.com/charmbracelet/glamour v0.10.0 // indirect - github.com/charmbracelet/harmonica v0.2.0 // indirect - github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 // indirect - github.com/charmbracelet/x/ansi v0.10.2 // indirect - github.com/charmbracelet/x/cellbuf v0.0.13 // indirect - github.com/charmbracelet/x/exp/slice v0.0.0-20250902204034-1cdc10c66d5b // indirect - github.com/charmbracelet/x/term v0.2.1 // indirect - github.com/cloudwego/base64x v0.1.6 // indirect - github.com/cloudwego/eino v0.5.0-alpha.11 // indirect - github.com/cloudwego/eino-ext/components/model/claude v0.1.0 // indirect - github.com/cloudwego/eino-ext/components/model/ollama v0.1.2 // indirect - github.com/cloudwego/eino-ext/components/model/openai v0.0.0-20250903035842-96774a3ec845 // indirect - github.com/cloudwego/eino-ext/libs/acl/openai v0.0.0-20250826113018-8c6f6358d4bb // indirect - github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect - github.com/coreos/go-oidc/v3 v3.17.0 // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect - github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect - github.com/djherbis/times v1.6.0 // indirect - github.com/dlclark/regexp2 v1.11.5 // indirect - github.com/docker/cli v29.0.3+incompatible // indirect - github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker-credential-helpers v0.9.4 // indirect - github.com/dustin/go-humanize v1.0.1 // indirect - github.com/eino-contrib/jsonschema v1.0.0 // indirect - github.com/emicklei/go-restful/v3 v3.12.2 // indirect - github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect - github.com/evanphx/json-patch v0.5.2 // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/fxamacker/cbor/v2 v2.9.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.10 // indirect - github.com/getkin/kin-openapi v0.120.0 // indirect - github.com/go-chi/chi/v5 v5.2.3 // indirect - github.com/go-jose/go-jose/v4 v4.1.3 // indirect - github.com/go-logr/logr v1.4.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.24.1 // indirect - github.com/go-openapi/errors v0.22.4 // indirect - github.com/go-openapi/jsonpointer v0.22.1 // indirect - github.com/go-openapi/jsonreference v0.21.3 // indirect - github.com/go-openapi/loads v0.23.2 // indirect - github.com/go-openapi/runtime v0.29.2 // indirect - github.com/go-openapi/spec v0.22.1 // indirect - github.com/go-openapi/strfmt v0.25.0 // indirect - github.com/go-openapi/swag v0.25.4 // indirect - github.com/go-openapi/swag/cmdutils v0.25.4 // indirect - github.com/go-openapi/swag/conv v0.25.4 // indirect - github.com/go-openapi/swag/fileutils v0.25.4 // indirect - github.com/go-openapi/swag/jsonname v0.25.4 // indirect - github.com/go-openapi/swag/jsonutils v0.25.4 // indirect - github.com/go-openapi/swag/loading v0.25.4 // indirect - github.com/go-openapi/swag/mangling v0.25.4 // indirect - github.com/go-openapi/swag/netutils v0.25.4 // indirect - github.com/go-openapi/swag/stringutils v0.25.4 // indirect - github.com/go-openapi/swag/typeutils v0.25.4 // indirect - github.com/go-openapi/swag/yamlutils v0.25.4 // indirect - github.com/go-openapi/validate v0.25.1 // indirect - github.com/go-viper/mapstructure/v2 v2.4.0 // indirect - github.com/gobwas/glob v0.2.3 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/certificate-transparency-go v1.3.2 // indirect - github.com/google/gnostic-models v0.7.0 // indirect - github.com/google/go-cmp v0.7.0 // indirect - github.com/google/go-containerregistry v0.20.7 // indirect - github.com/google/go-github/v73 v73.0.0 // indirect - github.com/google/go-querystring v1.1.0 // indirect - github.com/google/s2a-go v0.1.9 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect - github.com/googleapis/gax-go/v2 v2.15.0 // indirect - github.com/goph/emperror v0.17.2 // indirect - github.com/gorilla/css v1.0.1 // indirect - github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect - github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-retryablehttp v0.7.8 // indirect - github.com/in-toto/attestation v1.1.2 // indirect - github.com/in-toto/in-toto-golang v0.9.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/invopop/jsonschema v0.13.0 // indirect - github.com/invopop/yaml v0.2.0 // indirect - github.com/ipfs/go-cid v0.5.0 // indirect - github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect - github.com/josharian/intern v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.18.1 // indirect - github.com/klauspost/cpuid/v2 v2.3.0 // indirect - github.com/letsencrypt/boulder v0.20251110.0 // indirect - github.com/lucasb-eyer/go-colorful v1.3.0 // indirect - github.com/mailru/easyjson v0.9.0 // indirect - github.com/mark3labs/mcp-filesystem-server v0.11.1 // indirect - github.com/mark3labs/mcp-go v0.41.1 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-localereader v0.0.1 // indirect - github.com/mattn/go-runewidth v0.0.17 // indirect - github.com/meguminnnnnnnnn/go-openai v0.0.0-20250821095446-07791bea23a0 // indirect - github.com/microcosm-cc/bluemonday v1.0.27 // indirect - github.com/miekg/pkcs11 v1.1.1 // indirect - github.com/minio/sha256-simd v1.0.1 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect - github.com/moby/term v0.5.2 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect - github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect - github.com/mr-tron/base58 v1.2.0 // indirect - github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect - github.com/muesli/cancelreader v0.2.2 // indirect - github.com/muesli/reflow v0.3.0 // indirect - github.com/muesli/termenv v0.16.0 // indirect - github.com/multiformats/go-base32 v0.1.0 // indirect - github.com/multiformats/go-base36 v0.2.0 // indirect - github.com/multiformats/go-multibase v0.2.0 // indirect - github.com/multiformats/go-multihash v0.2.3 // indirect - github.com/multiformats/go-varint v0.0.7 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/nikolalohinski/gonja v1.5.3 // indirect - github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect - github.com/oklog/ulid v1.3.1 // indirect - github.com/ollama/ollama v0.12.9 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.1 // indirect - github.com/pelletier/go-toml/v2 v2.2.4 // indirect - github.com/perimeterx/marshmallow v1.1.5 // indirect - github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/rivo/uniseg v0.4.7 // indirect - github.com/sagikazarmark/locafero v0.11.0 // indirect - github.com/sassoftware/relic v7.2.1+incompatible // indirect - github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect - github.com/shibumi/go-pathspec v1.3.0 // indirect - github.com/sigstore/cosign/v3 v3.0.3 // indirect - github.com/sigstore/protobuf-specs v0.5.0 // indirect - github.com/sigstore/rekor v1.4.3 // indirect - github.com/sigstore/rekor-tiles/v2 v2.0.1 // indirect - github.com/sigstore/sigstore v1.10.0 // indirect - github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 // indirect - github.com/sigstore/timestamp-authority/v2 v2.0.3 // indirect - github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect - github.com/slongfield/pyfmt v0.0.0-20220222012616-ea85ff4c361f // indirect - github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect - github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.15.0 // indirect - github.com/spf13/cast v1.10.0 // indirect - github.com/spf13/cobra v1.10.2 // indirect - github.com/spf13/pflag v1.0.10 // indirect - github.com/spf13/viper v1.21.0 // indirect - github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect - github.com/subosito/gotenv v1.6.0 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect - github.com/thales-e-security/pool v0.0.2 // indirect - github.com/theupdateframework/go-tuf v0.7.0 // indirect - github.com/theupdateframework/go-tuf/v2 v2.3.0 // indirect - github.com/tidwall/gjson v1.18.0 // indirect - github.com/tidwall/match v1.1.1 // indirect - github.com/tidwall/pretty v1.2.1 // indirect - github.com/tidwall/sjson v1.2.5 // indirect - github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect - github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect - github.com/transparency-dev/merkle v0.0.2 // indirect - github.com/twitchyliquid64/golang-asm v0.15.1 // indirect - github.com/vbatts/tar-split v0.12.2 // indirect - github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect - github.com/x448/float16 v0.8.4 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xeipuuv/gojsonschema v1.2.0 // indirect - github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect - github.com/yargevad/filepathx v1.0.0 // indirect - github.com/yosida95/uritemplate/v3 v3.0.2 // indirect - github.com/yuin/goldmark v1.7.13 // indirect - github.com/yuin/goldmark-emoji v1.0.6 // indirect - gitlab.com/gitlab-org/api/client-go v0.160.0 // indirect - go.mongodb.org/mongo-driver v1.17.6 // indirect - go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect - go.opentelemetry.io/otel v1.38.0 // indirect - go.opentelemetry.io/otel/metric v1.38.0 // indirect - go.opentelemetry.io/otel/trace v1.38.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.1 // indirect - go.yaml.in/yaml/v2 v2.4.3 // indirect - go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/arch v0.20.0 // indirect - golang.org/x/crypto v0.45.0 // indirect - golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect - golang.org/x/mod v0.30.0 // indirect - golang.org/x/net v0.47.0 // indirect - golang.org/x/oauth2 v0.33.0 // indirect - golang.org/x/sync v0.18.0 // indirect - golang.org/x/sys v0.38.0 // indirect - golang.org/x/term v0.37.0 // indirect - golang.org/x/text v0.31.0 // indirect - golang.org/x/time v0.14.0 // indirect - google.golang.org/genai v1.22.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect - google.golang.org/grpc v1.77.0 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.34.2 // indirect - k8s.io/apimachinery v0.34.2 // indirect - k8s.io/client-go v0.34.2 // indirect - k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect - k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d // indirect - lukechampine.com/blake3 v1.4.0 // indirect - sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect - sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect - sigs.k8s.io/yaml v1.6.0 // indirect -) +module github.com/agntcy/dir/importer + +go 1.25.2 + +replace ( + github.com/agntcy/dir/api => ../api + github.com/agntcy/dir/client => ../client + github.com/agntcy/dir/utils => ../utils +) + +require ( + buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 + github.com/agntcy/dir/api v0.6.0 + github.com/agntcy/dir/client v0.6.0 + github.com/agntcy/dir/utils v0.6.0 + github.com/agntcy/oasf-sdk/pkg v0.0.14 + github.com/mark3labs/mcphost v0.31.3 + github.com/modelcontextprotocol/registry v1.2.3 + google.golang.org/protobuf v1.36.10 +) + +require ( + buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 // indirect + cloud.google.com/go v0.121.6 // indirect + cloud.google.com/go/auth v0.17.0 // indirect + cloud.google.com/go/compute/metadata v0.9.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/JohannesKaufmann/html-to-markdown v1.6.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/PuerkitoBio/goquery v1.10.3 // indirect + github.com/ThalesIgnite/crypto11 v1.2.5 // indirect + github.com/alecthomas/chroma/v2 v2.20.0 // indirect + github.com/andybalholm/cascadia v1.3.3 // indirect + github.com/anthropics/anthropic-sdk-go v1.10.0 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/atotto/clipboard v0.1.4 // indirect + github.com/aws/aws-sdk-go-v2 v1.40.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 // indirect + github.com/aws/aws-sdk-go-v2/config v1.32.2 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.19.2 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 // indirect + github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 // indirect + github.com/aws/smithy-go v1.24.0 // indirect + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect + github.com/aymerick/douceur v0.2.0 // indirect + github.com/bahlo/generic-list-go v0.2.0 // indirect + github.com/blang/semver v3.5.1+incompatible // indirect + github.com/buger/jsonparser v1.1.1 // indirect + github.com/bytedance/gopkg v0.1.3 // indirect + github.com/bytedance/sonic v1.14.1 // indirect + github.com/bytedance/sonic/loader v0.3.0 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect + github.com/charmbracelet/bubbles v0.21.0 // indirect + github.com/charmbracelet/bubbletea v1.3.10 // indirect + github.com/charmbracelet/colorprofile v0.3.2 // indirect + github.com/charmbracelet/glamour v0.10.0 // indirect + github.com/charmbracelet/harmonica v0.2.0 // indirect + github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 // indirect + github.com/charmbracelet/x/ansi v0.10.2 // indirect + github.com/charmbracelet/x/cellbuf v0.0.13 // indirect + github.com/charmbracelet/x/exp/slice v0.0.0-20250902204034-1cdc10c66d5b // indirect + github.com/charmbracelet/x/term v0.2.1 // indirect + github.com/cloudwego/base64x v0.1.6 // indirect + github.com/cloudwego/eino v0.5.0-alpha.11 // indirect + github.com/cloudwego/eino-ext/components/model/claude v0.1.0 // indirect + github.com/cloudwego/eino-ext/components/model/ollama v0.1.2 // indirect + github.com/cloudwego/eino-ext/components/model/openai v0.0.0-20250903035842-96774a3ec845 // indirect + github.com/cloudwego/eino-ext/libs/acl/openai v0.0.0-20250826113018-8c6f6358d4bb // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect + github.com/coreos/go-oidc/v3 v3.17.0 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect + github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect + github.com/djherbis/times v1.6.0 // indirect + github.com/dlclark/regexp2 v1.11.5 // indirect + github.com/docker/cli v29.0.3+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.4 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/eino-contrib/jsonschema v1.0.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect + github.com/evanphx/json-patch v0.5.2 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.10 // indirect + github.com/getkin/kin-openapi v0.120.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect + github.com/go-jose/go-jose/v4 v4.1.3 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/analysis v0.24.1 // indirect + github.com/go-openapi/errors v0.22.4 // indirect + github.com/go-openapi/jsonpointer v0.22.1 // indirect + github.com/go-openapi/jsonreference v0.21.3 // indirect + github.com/go-openapi/loads v0.23.2 // indirect + github.com/go-openapi/runtime v0.29.2 // indirect + github.com/go-openapi/spec v0.22.1 // indirect + github.com/go-openapi/strfmt v0.25.0 // indirect + github.com/go-openapi/swag v0.25.4 // indirect + github.com/go-openapi/swag/cmdutils v0.25.4 // indirect + github.com/go-openapi/swag/conv v0.25.4 // indirect + github.com/go-openapi/swag/fileutils v0.25.4 // indirect + github.com/go-openapi/swag/jsonname v0.25.4 // indirect + github.com/go-openapi/swag/jsonutils v0.25.4 // indirect + github.com/go-openapi/swag/loading v0.25.4 // indirect + github.com/go-openapi/swag/mangling v0.25.4 // indirect + github.com/go-openapi/swag/netutils v0.25.4 // indirect + github.com/go-openapi/swag/stringutils v0.25.4 // indirect + github.com/go-openapi/swag/typeutils v0.25.4 // indirect + github.com/go-openapi/swag/yamlutils v0.25.4 // indirect + github.com/go-openapi/validate v0.25.1 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/certificate-transparency-go v1.3.2 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/go-containerregistry v0.20.7 // indirect + github.com/google/go-github/v73 v73.0.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/s2a-go v0.1.9 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect + github.com/googleapis/gax-go/v2 v2.15.0 // indirect + github.com/goph/emperror v0.17.2 // indirect + github.com/gorilla/css v1.0.1 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-retryablehttp v0.7.8 // indirect + github.com/in-toto/attestation v1.1.2 // indirect + github.com/in-toto/in-toto-golang v0.9.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/invopop/jsonschema v0.13.0 // indirect + github.com/invopop/yaml v0.2.0 // indirect + github.com/ipfs/go-cid v0.5.0 // indirect + github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.18.1 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/letsencrypt/boulder v0.20251110.0 // indirect + github.com/lucasb-eyer/go-colorful v1.3.0 // indirect + github.com/mailru/easyjson v0.9.0 // indirect + github.com/mark3labs/mcp-filesystem-server v0.11.1 // indirect + github.com/mark3labs/mcp-go v0.41.1 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-localereader v0.0.1 // indirect + github.com/mattn/go-runewidth v0.0.17 // indirect + github.com/meguminnnnnnnnn/go-openai v0.0.0-20250821095446-07791bea23a0 // indirect + github.com/microcosm-cc/bluemonday v1.0.27 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect + github.com/moby/term v0.5.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect + github.com/muesli/cancelreader v0.2.2 // indirect + github.com/muesli/reflow v0.3.0 // indirect + github.com/muesli/termenv v0.16.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multihash v0.2.3 // indirect + github.com/multiformats/go-varint v0.0.7 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/nikolalohinski/gonja v1.5.3 // indirect + github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/ollama/ollama v0.12.9 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/perimeterx/marshmallow v1.1.5 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect + github.com/sassoftware/relic v7.2.1+incompatible // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect + github.com/shibumi/go-pathspec v1.3.0 // indirect + github.com/sigstore/cosign/v3 v3.0.3 // indirect + github.com/sigstore/protobuf-specs v0.5.0 // indirect + github.com/sigstore/rekor v1.4.3 // indirect + github.com/sigstore/rekor-tiles/v2 v2.0.1 // indirect + github.com/sigstore/sigstore v1.10.0 // indirect + github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 // indirect + github.com/sigstore/timestamp-authority/v2 v2.0.3 // indirect + github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect + github.com/slongfield/pyfmt v0.0.0-20220222012616-ea85ff4c361f // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/cobra v1.10.2 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/spf13/viper v1.21.0 // indirect + github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/thales-e-security/pool v0.0.2 // indirect + github.com/theupdateframework/go-tuf v0.7.0 // indirect + github.com/theupdateframework/go-tuf/v2 v2.3.0 // indirect + github.com/tidwall/gjson v1.18.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect + github.com/transparency-dev/merkle v0.0.2 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/vbatts/tar-split v0.12.2 // indirect + github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect + github.com/yargevad/filepathx v1.0.0 // indirect + github.com/yosida95/uritemplate/v3 v3.0.2 // indirect + github.com/yuin/goldmark v1.7.13 // indirect + github.com/yuin/goldmark-emoji v1.0.6 // indirect + gitlab.com/gitlab-org/api/client-go v0.160.0 // indirect + go.mongodb.org/mongo-driver v1.17.6 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.1 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/arch v0.20.0 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect + golang.org/x/mod v0.30.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.33.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.14.0 // indirect + google.golang.org/genai v1.22.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect + google.golang.org/grpc v1.77.0 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.34.2 // indirect + k8s.io/apimachinery v0.34.2 // indirect + k8s.io/client-go v0.34.2 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d // indirect + lukechampine.com/blake3 v1.4.0 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect +) diff --git a/importer/go.sum b/importer/go.sum index 2df1fb76a..bfebb4258 100644 --- a/importer/go.sum +++ b/importer/go.sum @@ -1,991 +1,991 @@ -al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= -al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= -buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 h1:THc6uLCGTpU393vVD5Eu5JHUdikvaP1+dqAclQe8pOE= -buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1/go.mod h1:xkbAJMbZuuebIblSFnLrfTpvmfjarhKsIid+Q9snDQ0= -buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 h1:ZObM/Cdu5dZO4ibBXNRSy+rFwG4oV86mYfKbI0Z7AAI= -buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1/go.mod h1:yJHswa2p3J+WxGLpgzuWNWn3I1CIkxdOu80Y/vN5lbE= -cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= -cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= -cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= -cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= -cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= -cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= -cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= -cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= -cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= -cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= -cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= -cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= -cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= -cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= -github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= -github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0 h1:E4MgwLBGeVB5f2MdcIVD3ELVAWpr+WD6MUe1i+tM/PA= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0/go.mod h1:Y2b/1clN4zsAoUd/pgNAQHjLDnTis/6ROkUfyob6psM= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA= -github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= -github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= -github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= -github.com/JohannesKaufmann/html-to-markdown v1.6.0 h1:04VXMiE50YYfCfLboJCLcgqF5x+rHJnb1ssNmqpLH/k= -github.com/JohannesKaufmann/html-to-markdown v1.6.0/go.mod h1:NUI78lGg/a7vpEJTz/0uOcYMaibytE4BUOQS8k78yPQ= -github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= -github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= -github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= -github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/PuerkitoBio/goquery v1.9.2/go.mod h1:GHPCaP0ODyyxqcNoFGYlAprUFH81NuRPd0GX3Zu2Mvk= -github.com/PuerkitoBio/goquery v1.10.3 h1:pFYcNSqHxBD06Fpj/KsbStFRsgRATgnf3LeXiUkhzPo= -github.com/PuerkitoBio/goquery v1.10.3/go.mod h1:tMUX0zDMHXYlAQk6p35XxQMqMweEKB7iK7iLNd4RH4Y= -github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= -github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= -github.com/agntcy/oasf-sdk/pkg v0.0.14 h1:DNKQNf4R4SMDbnaawoSl6FVOBvkSy4O9MyqKd7iHE8I= -github.com/agntcy/oasf-sdk/pkg v0.0.14/go.mod h1:FvcEB49gsvK+JO5i6l/pt5QgTK0LZeR7KYKsdcI6ZIM= -github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o= -github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= -github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= -github.com/alecthomas/chroma/v2 v2.20.0 h1:sfIHpxPyR07/Oylvmcai3X/exDlE8+FA820NTz+9sGw= -github.com/alecthomas/chroma/v2 v2.20.0/go.mod h1:e7tViK0xh/Nf4BYHl00ycY6rV7b8iXBksI9E359yNmA= -github.com/alecthomas/repr v0.5.1 h1:E3G4t2QbHTSNpPKBgMTln5KLkZHLOcU7r37J4pXBuIg= -github.com/alecthomas/repr v0.5.1/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= -github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= -github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM= -github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA= -github.com/anthropics/anthropic-sdk-go v1.10.0 h1:jDKQTfC0miIEj21eMmPrNSLKTNdNa3nHZOhd4wZz1cI= -github.com/anthropics/anthropic-sdk-go v1.10.0/go.mod h1:WTz31rIUHUHqai2UslPpw5CwXrQP3geYBioRV4WOLvE= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= -github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= -github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= -github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= -github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc= -github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 h1:t9yYsydLYNBk9cJ73rgPhPWqOh/52fcWDQB5b1JsKSY= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2/go.mod h1:IusfVNTmiSN3t4rhxWFaBAqn+mcNdwKtPcV16eYdgko= -github.com/aws/aws-sdk-go-v2/config v1.32.2 h1:4liUsdEpUUPZs5WVapsJLx5NPmQhQdez7nYFcovrytk= -github.com/aws/aws-sdk-go-v2/config v1.32.2/go.mod h1:l0hs06IFz1eCT+jTacU/qZtC33nvcnLADAPL/XyrkZI= -github.com/aws/aws-sdk-go-v2/credentials v1.19.2 h1:qZry8VUyTK4VIo5aEdUcBjPZHL2v4FyQ3QEOaWcFLu4= -github.com/aws/aws-sdk-go-v2/credentials v1.19.2/go.mod h1:YUqm5a1/kBnoK+/NY5WEiMocZihKSo15/tJdmdXnM5g= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 h1:WZVR5DbDgxzA0BJeudId89Kmgy6DIU4ORpxwsVHz0qA= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14/go.mod h1:Dadl9QO0kHgbrH1GRqGiZdYtW5w+IXXaBNCHTIaheM4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 h1:FIouAnCE46kyYqyhs0XEBDFFSREtdnr8HQuLPQPLCrY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14/go.mod h1:UTwDc5COa5+guonQU8qBikJo1ZJ4ln2r1MkF7Dqag1E= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.1 h1:U0asSZ3ifpuIehDPkRI2rxHbmFUMplDA2VeR9Uogrmw= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.1/go.mod h1:NZo9WJqQ0sxQ1Yqu1IwCHQFQunTms2MlVgejg16S1rY= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 h1:MxMBdKTYBjPQChlJhi4qlEueqB1p1KcbTEa7tD5aqPs= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.2/go.mod h1:iS6EPmNeqCsGo+xQmXv0jIMjyYtQfnwg36zl2FwEouk= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 h1:ksUT5KtgpZd3SAiFJNJ0AFEJVva3gjBmN7eXUZjzUwQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.5/go.mod h1:av+ArJpoYf3pgyrj6tcehSFW+y9/QvAY8kMooR9bZCw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 h1:GtsxyiF3Nd3JahRBJbxLCCdYW9ltGQYrFWg8XdkGDd8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10/go.mod h1:/j67Z5XBVDx8nZVp9EuFM9/BS5dvBznbqILGuu73hug= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 h1:a5UTtD4mHBU3t0o6aHQZFJTNKVfxFWfPX7J0Lr7G+uY= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.2/go.mod h1:6TxbXoDSgBQ225Qd8Q+MbxUxUh6TtNKwbRt/EPS9xso= -github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= -github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= -github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= -github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= -github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8= -github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA= -github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= -github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= -github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= -github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M= -github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM= -github.com/bytedance/mockey v1.2.14 h1:KZaFgPdiUwW+jOWFieo3Lr7INM1P+6adO3hxZhDswY8= -github.com/bytedance/mockey v1.2.14/go.mod h1:1BPHF9sol5R1ud/+0VEHGQq/+i2lN+GTsr3O2Q9IENY= -github.com/bytedance/sonic v1.14.1 h1:FBMC0zVz5XUmE4z9wF4Jey0An5FueFvOsTKKKtwIl7w= -github.com/bytedance/sonic v1.14.1/go.mod h1:gi6uhQLMbTdeP0muCnrjHLeCUPyb70ujhnNlhOylAFc= -github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= -github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= -github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= -github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs= -github.com/charmbracelet/bubbles v0.21.0/go.mod h1:HF+v6QUR4HkEpz62dx7ym2xc71/KBHg+zKwJtMw+qtg= -github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw= -github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4= -github.com/charmbracelet/colorprofile v0.3.2 h1:9J27WdztfJQVAQKX2WOlSSRB+5gaKqqITmrvb1uTIiI= -github.com/charmbracelet/colorprofile v0.3.2/go.mod h1:mTD5XzNeWHj8oqHb+S1bssQb7vIHbepiebQ2kPKVKbI= -github.com/charmbracelet/glamour v0.10.0 h1:MtZvfwsYCx8jEPFJm3rIBFIMZUfUJ765oX8V6kXldcY= -github.com/charmbracelet/glamour v0.10.0/go.mod h1:f+uf+I/ChNmqo087elLnVdCiVgjSKWuXa/l6NU2ndYk= -github.com/charmbracelet/harmonica v0.2.0 h1:8NxJWRWg/bzKqqEaaeFNipOu77YR5t8aSwG4pgaUBiQ= -github.com/charmbracelet/harmonica v0.2.0/go.mod h1:KSri/1RMQOZLbw7AHqgcBycp8pgJnQMYYT8QZRqZ1Ao= -github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 h1:ZR7e0ro+SZZiIZD7msJyA+NjkCNNavuiPBLgerbOziE= -github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834/go.mod h1:aKC/t2arECF6rNOnaKaVU6y4t4ZeHQzqfxedE/VkVhA= -github.com/charmbracelet/x/ansi v0.10.2 h1:ith2ArZS0CJG30cIUfID1LXN7ZFXRCww6RUvAPA+Pzw= -github.com/charmbracelet/x/ansi v0.10.2/go.mod h1:HbLdJjQH4UH4AqA2HpRWuWNluRE6zxJH/yteYEYCFa8= -github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k= -github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= -github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ= -github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= -github.com/charmbracelet/x/exp/slice v0.0.0-20250902204034-1cdc10c66d5b h1:DZ2Li1O0j+wWw6AgEUDrODB7PAIKpmOy65yu1UBPYc4= -github.com/charmbracelet/x/exp/slice v0.0.0-20250902204034-1cdc10c66d5b/go.mod h1:vI5nDVMWi6veaYH+0Fmvpbe/+cv/iJfMntdh+N0+Tms= -github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= -github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= -github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= -github.com/cloudwego/eino v0.5.0-alpha.11 h1:KhjJ8JTAI/Ed5iCHWKUn1v4j1sDCxqV26HRoUQpSRFc= -github.com/cloudwego/eino v0.5.0-alpha.11/go.mod h1:S38tlNO4cNqFfGJKQSJZimxjzc9JDJKdf2eW3FEEfdc= -github.com/cloudwego/eino-ext/components/model/claude v0.1.0 h1:UZVwYzV7gOBCBKHGdAT2fZzm/+2TBEfDDYn713EvLF0= -github.com/cloudwego/eino-ext/components/model/claude v0.1.0/go.mod h1:lacy0WE3yKuOSxrhJQKqWAxn3LiUy/CJ91jU7nLDNNQ= -github.com/cloudwego/eino-ext/components/model/ollama v0.1.2 h1:WxJ+7oXnr3AhM6u4VbFF3L2ionxCrPfmLetx7V+zthw= -github.com/cloudwego/eino-ext/components/model/ollama v0.1.2/go.mod h1:OgGMCiR/G/RnOWaJvdK8pVSxAzoz2SlCqim43oFTuwo= -github.com/cloudwego/eino-ext/components/model/openai v0.0.0-20250903035842-96774a3ec845 h1:nxflfiBwWNPoKS9X4SMhmT+si7rtYv+lQzIyPJik4DM= -github.com/cloudwego/eino-ext/components/model/openai v0.0.0-20250903035842-96774a3ec845/go.mod h1:QQhCuQxuBAVWvu/YAZBhs/RsR76mUigw59Tl0kh04C8= -github.com/cloudwego/eino-ext/libs/acl/openai v0.0.0-20250826113018-8c6f6358d4bb h1:RMslzyijc3bi9EkqCulpS0hZupTl1y/wayR3+fVRN/c= -github.com/cloudwego/eino-ext/libs/acl/openai v0.0.0-20250826113018-8c6f6358d4bb/go.mod h1:fHn/6OqPPY1iLLx9wzz+MEVT5Dl9gwuZte1oLEnCoYw= -github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= -github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= -github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= -github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= -github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= -github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= -github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= -github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/danieljoos/wincred v1.2.3 h1:v7dZC2x32Ut3nEfRH+vhoZGvN72+dQ/snVXo/vMFLdQ= -github.com/danieljoos/wincred v1.2.3/go.mod h1:6qqX0WNrS4RzPZ1tnroDzq9kY3fu1KwE7MRLQK4X0bs= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= -github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= -github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= -github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= -github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= -github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= -github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= -github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ= -github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= -github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= -github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= -github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/eino-contrib/jsonschema v1.0.0 h1:dXxbhGNZuI3+xNi8x3JT8AGyoXz6Pff6mRvmpjVl5Ww= -github.com/eino-contrib/jsonschema v1.0.0/go.mod h1:cpnX4SyKjWjGC7iN2EbhxaTdLqGjCi0e9DxpLYxddD4= -github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= -github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= -github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= -github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= -github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= -github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= -github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= -github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= -github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0= -github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= -github.com/getkin/kin-openapi v0.120.0 h1:MqJcNJFrMDFNc07iwE8iFC5eT2k/NPUFDIpNeiZv8Jg= -github.com/getkin/kin-openapi v0.120.0/go.mod h1:PCWw/lfBrJY4HcdqE3jj+QFkaFK8ABoqo7PvqVhXXqw= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= -github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= -github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= -github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rBOesYM= -github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84= -github.com/go-openapi/errors v0.22.4 h1:oi2K9mHTOb5DPW2Zjdzs/NIvwi2N3fARKaTJLdNabaM= -github.com/go-openapi/errors v0.22.4/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= -github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= -github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= -github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc= -github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4= -github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4= -github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY= -github.com/go-openapi/runtime v0.29.2 h1:UmwSGWNmWQqKm1c2MGgXVpC2FTGwPDQeUsBMufc5Yj0= -github.com/go-openapi/runtime v0.29.2/go.mod h1:biq5kJXRJKBJxTDJXAa00DOTa/anflQPhT0/wmjuy+0= -github.com/go-openapi/spec v0.22.1 h1:beZMa5AVQzRspNjvhe5aG1/XyBSMeX1eEOs7dMoXh/k= -github.com/go-openapi/spec v0.22.1/go.mod h1:c7aeIQT175dVowfp7FeCvXXnjN/MrpaONStibD2WtDA= -github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= -github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= -github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= -github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= -github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= -github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= -github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= -github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= -github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= -github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= -github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= -github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= -github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= -github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= -github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= -github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= -github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= -github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= -github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= -github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= -github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= -github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= -github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= -github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= -github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= -github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= -github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= -github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= -github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw= -github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc= -github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= -github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= -github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= -github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= -github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= -github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= -github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= -github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= -github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= -github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= -github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= -github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= -github.com/google/go-github/v73 v73.0.0 h1:aR+Utnh+Y4mMkS+2qLQwcQ/cF9mOTpdwnzlaw//rG24= -github.com/google/go-github/v73 v73.0.0/go.mod h1:fa6w8+/V+edSU0muqdhCVY7Beh1M8F1IlQPZIANKIYw= -github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= -github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e h1:FJta/0WsADCe1r9vQjdHbd3KuiLPu7Y9WlyLGwMUNyE= -github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= -github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= -github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= -github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= -github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= -github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= -github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= -github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= -github.com/goph/emperror v0.17.2 h1:yLapQcmEsO0ipe9p5TaN22djm3OFV/TfM/fcYP0/J18= -github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic= -github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= -github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= -github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= -github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= -github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= -github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= -github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= -github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= -github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= -github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= -github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/vault/api v1.22.0 h1:+HYFquE35/B74fHoIeXlZIP2YADVboaPjaSicHEZiH0= -github.com/hashicorp/vault/api v1.22.0/go.mod h1:IUZA2cDvr4Ok3+NtK2Oq/r+lJeXkeCrHRmqdyWfpmGM= -github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= -github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= -github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= -github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E= -github.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM= -github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= -github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= -github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= -github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= -github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= -github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= -github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= -github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk= -github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= -github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= -github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= -github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= -github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= -github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= -github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= -github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= -github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= -github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= -github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/letsencrypt/boulder v0.20251110.0 h1:J8MnKICeilO91dyQ2n5eBbab24neHzUpYMUIOdOtbjc= -github.com/letsencrypt/boulder v0.20251110.0/go.mod h1:ogKCJQwll82m7OVHWyTuf8eeFCjuzdRQlgnZcCl0V+8= -github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag= -github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= -github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= -github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= -github.com/mark3labs/mcp-filesystem-server v0.11.1 h1:7uKIZRMaKWfgvtDj/uLAvo0+7Mwb8gxo5DJywhqFW88= -github.com/mark3labs/mcp-filesystem-server v0.11.1/go.mod h1:xDqJizVYWZ5a31Mt4xuYbVku2AR/kT56H3O0SbpANoQ= -github.com/mark3labs/mcp-go v0.41.1 h1:w78eWfiQam2i8ICL7AL0WFiq7KHNJQ6UB53ZVtH4KGA= -github.com/mark3labs/mcp-go v0.41.1/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g= -github.com/mark3labs/mcphost v0.31.3 h1:v8kWozQXPXHTBKT2GMo1CCtjz5yZWKMJdXSl9awH3pM= -github.com/mark3labs/mcphost v0.31.3/go.mod h1:rJ5SEO4eo+Vs3XfUAJdxgioB+CVXt02sl+37r0Erato= -github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= -github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= -github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= -github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= -github.com/mattn/go-runewidth v0.0.17 h1:78v8ZlW0bP43XfmAfPsdXcoNCelfMHsDmd/pkENfrjQ= -github.com/mattn/go-runewidth v0.0.17/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/meguminnnnnnnnn/go-openai v0.0.0-20250821095446-07791bea23a0 h1:nIohpHs1ViKR0SVgW/cbBstHjmnqFZDM9RqgX9m9Xu8= -github.com/meguminnnnnnnnn/go-openai v0.0.0-20250821095446-07791bea23a0/go.mod h1:qs96ysDmxhE4BZoU45I43zcyfnaYxU3X+aRzLko/htY= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk= -github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA= -github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= -github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= -github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= -github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= -github.com/modelcontextprotocol/registry v1.2.3 h1:PaQTn7VxJ0xlgiI+OJUHrG7H12x8uP27wepYKJRaD88= -github.com/modelcontextprotocol/registry v1.2.3/go.mod h1:WcvDr/Cn7JS7MHdSsNPVlLZYwfmzG1/3zTtuW23IRCc= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= -github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= -github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI= -github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo= -github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA= -github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= -github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= -github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= -github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= -github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= -github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= -github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= -github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= -github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= -github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= -github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= -github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= -github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= -github.com/nikolalohinski/gonja v1.5.3 h1:GsA+EEaZDZPGJ8JtpeGN78jidhOlxeJROpqMT9fTj9c= -github.com/nikolalohinski/gonja v1.5.3/go.mod h1:RmjwxNiXAEqcq1HeK5SSMmqFJvKOfTfXhkJv6YBtPa4= -github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= -github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= -github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/ollama/ollama v0.12.9 h1:qvhEcBZtaTTiXoe/elPnKsbf3z0s0bmU9urCIYUkV54= -github.com/ollama/ollama v0.12.9/go.mod h1:9+1//yWPsDE2u+l1a5mpaKrYw4VdnSsRU3ioq5BvMms= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= -github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= -github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= -github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= -github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= -github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= -github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= -github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= -github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= -github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= -github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= -github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= -github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= -github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= -github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= -github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= -github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= -github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= -github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= -github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= -github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= -github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= -github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= -github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= -github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= -github.com/sigstore/cosign/v3 v3.0.3 h1:IknuTUYM+tZ/ToghM7mvg9V0O31NG3rev97u1IJIuYA= -github.com/sigstore/cosign/v3 v3.0.3/go.mod h1:poeQqwvpDNIDyim7a2ljUhonVKpCys+fx3SY0Lkmi/4= -github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= -github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= -github.com/sigstore/rekor v1.4.3 h1:2+aw4Gbgumv8vYM/QVg6b+hvr4x4Cukur8stJrVPKU0= -github.com/sigstore/rekor v1.4.3/go.mod h1:o0zgY087Q21YwohVvGwV9vK1/tliat5mfnPiVI3i75o= -github.com/sigstore/rekor-tiles/v2 v2.0.1 h1:1Wfz15oSRNGF5Dzb0lWn5W8+lfO50ork4PGIfEKjZeo= -github.com/sigstore/rekor-tiles/v2 v2.0.1/go.mod h1:Pjsbhzj5hc3MKY8FfVTYHBUHQEnP0ozC4huatu4x7OU= -github.com/sigstore/sigstore v1.10.0 h1:lQrmdzqlR8p9SCfWIpFoGUqdXEzJSZT2X+lTXOMPaQI= -github.com/sigstore/sigstore v1.10.0/go.mod h1:Ygq+L/y9Bm3YnjpJTlQrOk/gXyrjkpn3/AEJpmk1n9Y= -github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 h1:K8hnZhun6XacjxAdCdxkowSi7+FpmfYnAcMhTXZQyPg= -github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894/go.mod h1:uuR+Edo6P+iwi0HKscycUm8mxXL748nAureqSg6jFLA= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0 h1:UOHpiyezCj5RuixgIvCV3QyuxIGQT+N6nGZEXA7OTTY= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0/go.mod h1:U0CZmA2psabDa8DdiV7yXab0AHODzfKqvD2isH7Hrvw= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0 h1:fq4+8Y4YadxeF8mzhoMRPZ1mVvDYXmI3BfS0vlkPT7M= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0/go.mod h1:u05nqPWY05lmcdHhv2lPaWTH3FGUhJzO7iW2hbboK3Q= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0 h1:iUEf5MZYOuXGnXxdF/WrarJrk0DTVHqeIOjYdtpVXtc= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0/go.mod h1:i6vg5JfEQix46R1rhQlrKmUtJoeH91drltyYOJEk1T4= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0 h1:dUvPv/MP23ZPIXZUW45kvCIgC0ZRfYxEof57AB6bAtU= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0/go.mod h1:fR/gDdPvJWGWL70/NgBBIL1O0/3Wma6JHs3tSSYg3s4= -github.com/sigstore/timestamp-authority/v2 v2.0.3 h1:sRyYNtdED/ttLCMdaYnwpf0zre1A9chvjTnCmWWxN8Y= -github.com/sigstore/timestamp-authority/v2 v2.0.3/go.mod h1:mDaHxkt3HmZYoIlwYj4QWo0RUr7VjYU52aVO5f5Qb3I= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= -github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/slongfield/pyfmt v0.0.0-20220222012616-ea85ff4c361f h1:Z2cODYsUxQPofhpYRMQVwWz4yUVpHF+vPi+eUdruUYI= -github.com/slongfield/pyfmt v0.0.0-20220222012616-ea85ff4c361f/go.mod h1:JqzWyvTuI2X4+9wOHmKSQCYxybB/8j6Ko43qVmXDuZg= -github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= -github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= -github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= -github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= -github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= -github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= -github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= -github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= -github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= -github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= -github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= -github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= -github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= -github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= -github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= -github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= -github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= -github.com/theupdateframework/go-tuf/v2 v2.3.0 h1:gt3X8xT8qu/HT4w+n1jgv+p7koi5ad8XEkLXXZqG9AA= -github.com/theupdateframework/go-tuf/v2 v2.3.0/go.mod h1:xW8yNvgXRncmovMLvBxKwrKpsOwJZu/8x+aB0KtFcdw= -github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= -github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= -github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= -github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= -github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= -github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= -github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= -github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= -github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= -github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= -github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= -github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= -github.com/tink-crypto/tink-go/v2 v2.5.0 h1:B8KLF6AofxdBIE4UJIaFbmoj5/1ehEtt7/MmzfI4Zpw= -github.com/tink-crypto/tink-go/v2 v2.5.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8= -github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= -github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= -github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c h1:5a2XDQ2LiAUV+/RjckMyq9sXudfrPSuCY4FuPC1NyAw= -github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c/go.mod h1:g85IafeFJZLxlzZCDRu4JLpfS7HKzR+Hw9qRh3bVzDI= -github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= -github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= -github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= -github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= -github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= -github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= -github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= -github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= -github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= -github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= -github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= -github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= -github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= -github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= -github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= -github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= -github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= -github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= -github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= -github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= -github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= -github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= -github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= -github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= -github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= -github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= -github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= -github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA= -github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg= -github.com/yuin/goldmark-emoji v1.0.6 h1:QWfF2FYaXwL74tfGOW5izeiZepUDroDJfWubQI9HTHs= -github.com/yuin/goldmark-emoji v1.0.6/go.mod h1:ukxJDKFpdFb5x0a5HqbdlcKtebh086iJpI31LTKmWuA= -github.com/zalando/go-keyring v0.2.6 h1:r7Yc3+H+Ux0+M72zacZoItR3UDxeWfKTcabvkI8ua9s= -github.com/zalando/go-keyring v0.2.6/go.mod h1:2TCrxYrbUNYfNS/Kgy/LSrkSQzZ5UPVH85RwfczwvcI= -gitlab.com/gitlab-org/api/client-go v0.160.0 h1:aMQzbcE8zFe0lR/J+a3zneEgH+/EBFs8rD8Chrr4Snw= -gitlab.com/gitlab-org/api/client-go v0.160.0/go.mod h1:ooCNtKB7OyP7GBa279+HrUS3eeJF6Yi6XABZZy7RTSk= -go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= -go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= -go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= -go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= -go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= -go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= -go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= -go.step.sm/crypto v0.74.0 h1:/APBEv45yYR4qQFg47HA8w1nesIGcxh44pGyQNw6JRA= -go.step.sm/crypto v0.74.0/go.mod h1:UoXqCAJjjRgzPte0Llaqen7O9P7XjPmgjgTHQGkKCDk= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= -go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= -go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= -go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= -go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= -go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= -golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c= -golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= -golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= -golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= -golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= -golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= -golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= -golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= -golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= -golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= -golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= -golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= -gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI= -google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964= -google.golang.org/genai v1.22.0 h1:5hrEhXXWJQZa3tdPocl4vQ/0w6myEAxdNns2Kmx0f4Y= -google.golang.org/genai v1.22.0/go.mod h1:QPj5NGJw+3wEOHg+PrsWwJKvG6UC84ex5FR7qAYsN/M= -google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 h1:LvZVVaPE0JSqL+ZWb6ErZfnEOKIqqFWUJE2D0fObSmc= -google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9/go.mod h1:QFOrLhdAe2PsTp3vQY4quuLKTi9j3XG3r6JPPaw7MSc= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= -google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= -google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= -google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 h1:ExN12ndbJ608cboPYflpTny6mXSzPrDLh0iTaVrRrds= -google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6/go.mod h1:6ytKWczdvnpnO+m+JiG9NjEDzR1FJfsnmJdG7B8QVZ8= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= -google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= -gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= -k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY= -k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw= -k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= -k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= -k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M= -k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE= -k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= -k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= -k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= -k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= -lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= -sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= -sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= -sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= -software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= -software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= +al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= +al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= +buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 h1:THc6uLCGTpU393vVD5Eu5JHUdikvaP1+dqAclQe8pOE= +buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1/go.mod h1:xkbAJMbZuuebIblSFnLrfTpvmfjarhKsIid+Q9snDQ0= +buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 h1:ZObM/Cdu5dZO4ibBXNRSy+rFwG4oV86mYfKbI0Z7AAI= +buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1/go.mod h1:yJHswa2p3J+WxGLpgzuWNWn3I1CIkxdOu80Y/vN5lbE= +cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= +cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= +cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= +cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= +cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= +cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= +cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0 h1:E4MgwLBGeVB5f2MdcIVD3ELVAWpr+WD6MUe1i+tM/PA= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0/go.mod h1:Y2b/1clN4zsAoUd/pgNAQHjLDnTis/6ROkUfyob6psM= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= +github.com/JohannesKaufmann/html-to-markdown v1.6.0 h1:04VXMiE50YYfCfLboJCLcgqF5x+rHJnb1ssNmqpLH/k= +github.com/JohannesKaufmann/html-to-markdown v1.6.0/go.mod h1:NUI78lGg/a7vpEJTz/0uOcYMaibytE4BUOQS8k78yPQ= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/PuerkitoBio/goquery v1.9.2/go.mod h1:GHPCaP0ODyyxqcNoFGYlAprUFH81NuRPd0GX3Zu2Mvk= +github.com/PuerkitoBio/goquery v1.10.3 h1:pFYcNSqHxBD06Fpj/KsbStFRsgRATgnf3LeXiUkhzPo= +github.com/PuerkitoBio/goquery v1.10.3/go.mod h1:tMUX0zDMHXYlAQk6p35XxQMqMweEKB7iK7iLNd4RH4Y= +github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= +github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= +github.com/agntcy/oasf-sdk/pkg v0.0.14 h1:DNKQNf4R4SMDbnaawoSl6FVOBvkSy4O9MyqKd7iHE8I= +github.com/agntcy/oasf-sdk/pkg v0.0.14/go.mod h1:FvcEB49gsvK+JO5i6l/pt5QgTK0LZeR7KYKsdcI6ZIM= +github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o= +github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= +github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/chroma/v2 v2.20.0 h1:sfIHpxPyR07/Oylvmcai3X/exDlE8+FA820NTz+9sGw= +github.com/alecthomas/chroma/v2 v2.20.0/go.mod h1:e7tViK0xh/Nf4BYHl00ycY6rV7b8iXBksI9E359yNmA= +github.com/alecthomas/repr v0.5.1 h1:E3G4t2QbHTSNpPKBgMTln5KLkZHLOcU7r37J4pXBuIg= +github.com/alecthomas/repr v0.5.1/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= +github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM= +github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA= +github.com/anthropics/anthropic-sdk-go v1.10.0 h1:jDKQTfC0miIEj21eMmPrNSLKTNdNa3nHZOhd4wZz1cI= +github.com/anthropics/anthropic-sdk-go v1.10.0/go.mod h1:WTz31rIUHUHqai2UslPpw5CwXrQP3geYBioRV4WOLvE= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= +github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= +github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= +github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= +github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc= +github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2 h1:t9yYsydLYNBk9cJ73rgPhPWqOh/52fcWDQB5b1JsKSY= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.2/go.mod h1:IusfVNTmiSN3t4rhxWFaBAqn+mcNdwKtPcV16eYdgko= +github.com/aws/aws-sdk-go-v2/config v1.32.2 h1:4liUsdEpUUPZs5WVapsJLx5NPmQhQdez7nYFcovrytk= +github.com/aws/aws-sdk-go-v2/config v1.32.2/go.mod h1:l0hs06IFz1eCT+jTacU/qZtC33nvcnLADAPL/XyrkZI= +github.com/aws/aws-sdk-go-v2/credentials v1.19.2 h1:qZry8VUyTK4VIo5aEdUcBjPZHL2v4FyQ3QEOaWcFLu4= +github.com/aws/aws-sdk-go-v2/credentials v1.19.2/go.mod h1:YUqm5a1/kBnoK+/NY5WEiMocZihKSo15/tJdmdXnM5g= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 h1:WZVR5DbDgxzA0BJeudId89Kmgy6DIU4ORpxwsVHz0qA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14/go.mod h1:Dadl9QO0kHgbrH1GRqGiZdYtW5w+IXXaBNCHTIaheM4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 h1:FIouAnCE46kyYqyhs0XEBDFFSREtdnr8HQuLPQPLCrY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14/go.mod h1:UTwDc5COa5+guonQU8qBikJo1ZJ4ln2r1MkF7Dqag1E= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.1 h1:U0asSZ3ifpuIehDPkRI2rxHbmFUMplDA2VeR9Uogrmw= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.1/go.mod h1:NZo9WJqQ0sxQ1Yqu1IwCHQFQunTms2MlVgejg16S1rY= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 h1:MxMBdKTYBjPQChlJhi4qlEueqB1p1KcbTEa7tD5aqPs= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.2/go.mod h1:iS6EPmNeqCsGo+xQmXv0jIMjyYtQfnwg36zl2FwEouk= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 h1:ksUT5KtgpZd3SAiFJNJ0AFEJVva3gjBmN7eXUZjzUwQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.5/go.mod h1:av+ArJpoYf3pgyrj6tcehSFW+y9/QvAY8kMooR9bZCw= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 h1:GtsxyiF3Nd3JahRBJbxLCCdYW9ltGQYrFWg8XdkGDd8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10/go.mod h1:/j67Z5XBVDx8nZVp9EuFM9/BS5dvBznbqILGuu73hug= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 h1:a5UTtD4mHBU3t0o6aHQZFJTNKVfxFWfPX7J0Lr7G+uY= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.2/go.mod h1:6TxbXoDSgBQ225Qd8Q+MbxUxUh6TtNKwbRt/EPS9xso= +github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= +github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= +github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8= +github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA= +github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= +github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= +github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M= +github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM= +github.com/bytedance/mockey v1.2.14 h1:KZaFgPdiUwW+jOWFieo3Lr7INM1P+6adO3hxZhDswY8= +github.com/bytedance/mockey v1.2.14/go.mod h1:1BPHF9sol5R1ud/+0VEHGQq/+i2lN+GTsr3O2Q9IENY= +github.com/bytedance/sonic v1.14.1 h1:FBMC0zVz5XUmE4z9wF4Jey0An5FueFvOsTKKKtwIl7w= +github.com/bytedance/sonic v1.14.1/go.mod h1:gi6uhQLMbTdeP0muCnrjHLeCUPyb70ujhnNlhOylAFc= +github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= +github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs= +github.com/charmbracelet/bubbles v0.21.0/go.mod h1:HF+v6QUR4HkEpz62dx7ym2xc71/KBHg+zKwJtMw+qtg= +github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw= +github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4= +github.com/charmbracelet/colorprofile v0.3.2 h1:9J27WdztfJQVAQKX2WOlSSRB+5gaKqqITmrvb1uTIiI= +github.com/charmbracelet/colorprofile v0.3.2/go.mod h1:mTD5XzNeWHj8oqHb+S1bssQb7vIHbepiebQ2kPKVKbI= +github.com/charmbracelet/glamour v0.10.0 h1:MtZvfwsYCx8jEPFJm3rIBFIMZUfUJ765oX8V6kXldcY= +github.com/charmbracelet/glamour v0.10.0/go.mod h1:f+uf+I/ChNmqo087elLnVdCiVgjSKWuXa/l6NU2ndYk= +github.com/charmbracelet/harmonica v0.2.0 h1:8NxJWRWg/bzKqqEaaeFNipOu77YR5t8aSwG4pgaUBiQ= +github.com/charmbracelet/harmonica v0.2.0/go.mod h1:KSri/1RMQOZLbw7AHqgcBycp8pgJnQMYYT8QZRqZ1Ao= +github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 h1:ZR7e0ro+SZZiIZD7msJyA+NjkCNNavuiPBLgerbOziE= +github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834/go.mod h1:aKC/t2arECF6rNOnaKaVU6y4t4ZeHQzqfxedE/VkVhA= +github.com/charmbracelet/x/ansi v0.10.2 h1:ith2ArZS0CJG30cIUfID1LXN7ZFXRCww6RUvAPA+Pzw= +github.com/charmbracelet/x/ansi v0.10.2/go.mod h1:HbLdJjQH4UH4AqA2HpRWuWNluRE6zxJH/yteYEYCFa8= +github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k= +github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= +github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ= +github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= +github.com/charmbracelet/x/exp/slice v0.0.0-20250902204034-1cdc10c66d5b h1:DZ2Li1O0j+wWw6AgEUDrODB7PAIKpmOy65yu1UBPYc4= +github.com/charmbracelet/x/exp/slice v0.0.0-20250902204034-1cdc10c66d5b/go.mod h1:vI5nDVMWi6veaYH+0Fmvpbe/+cv/iJfMntdh+N0+Tms= +github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= +github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= +github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= +github.com/cloudwego/eino v0.5.0-alpha.11 h1:KhjJ8JTAI/Ed5iCHWKUn1v4j1sDCxqV26HRoUQpSRFc= +github.com/cloudwego/eino v0.5.0-alpha.11/go.mod h1:S38tlNO4cNqFfGJKQSJZimxjzc9JDJKdf2eW3FEEfdc= +github.com/cloudwego/eino-ext/components/model/claude v0.1.0 h1:UZVwYzV7gOBCBKHGdAT2fZzm/+2TBEfDDYn713EvLF0= +github.com/cloudwego/eino-ext/components/model/claude v0.1.0/go.mod h1:lacy0WE3yKuOSxrhJQKqWAxn3LiUy/CJ91jU7nLDNNQ= +github.com/cloudwego/eino-ext/components/model/ollama v0.1.2 h1:WxJ+7oXnr3AhM6u4VbFF3L2ionxCrPfmLetx7V+zthw= +github.com/cloudwego/eino-ext/components/model/ollama v0.1.2/go.mod h1:OgGMCiR/G/RnOWaJvdK8pVSxAzoz2SlCqim43oFTuwo= +github.com/cloudwego/eino-ext/components/model/openai v0.0.0-20250903035842-96774a3ec845 h1:nxflfiBwWNPoKS9X4SMhmT+si7rtYv+lQzIyPJik4DM= +github.com/cloudwego/eino-ext/components/model/openai v0.0.0-20250903035842-96774a3ec845/go.mod h1:QQhCuQxuBAVWvu/YAZBhs/RsR76mUigw59Tl0kh04C8= +github.com/cloudwego/eino-ext/libs/acl/openai v0.0.0-20250826113018-8c6f6358d4bb h1:RMslzyijc3bi9EkqCulpS0hZupTl1y/wayR3+fVRN/c= +github.com/cloudwego/eino-ext/libs/acl/openai v0.0.0-20250826113018-8c6f6358d4bb/go.mod h1:fHn/6OqPPY1iLLx9wzz+MEVT5Dl9gwuZte1oLEnCoYw= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= +github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= +github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= +github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= +github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/danieljoos/wincred v1.2.3 h1:v7dZC2x32Ut3nEfRH+vhoZGvN72+dQ/snVXo/vMFLdQ= +github.com/danieljoos/wincred v1.2.3/go.mod h1:6qqX0WNrS4RzPZ1tnroDzq9kY3fu1KwE7MRLQK4X0bs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= +github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= +github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= +github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ= +github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= +github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= +github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/eino-contrib/jsonschema v1.0.0 h1:dXxbhGNZuI3+xNi8x3JT8AGyoXz6Pff6mRvmpjVl5Ww= +github.com/eino-contrib/jsonschema v1.0.0/go.mod h1:cpnX4SyKjWjGC7iN2EbhxaTdLqGjCi0e9DxpLYxddD4= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= +github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0= +github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/getkin/kin-openapi v0.120.0 h1:MqJcNJFrMDFNc07iwE8iFC5eT2k/NPUFDIpNeiZv8Jg= +github.com/getkin/kin-openapi v0.120.0/go.mod h1:PCWw/lfBrJY4HcdqE3jj+QFkaFK8ABoqo7PvqVhXXqw= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rBOesYM= +github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84= +github.com/go-openapi/errors v0.22.4 h1:oi2K9mHTOb5DPW2Zjdzs/NIvwi2N3fARKaTJLdNabaM= +github.com/go-openapi/errors v0.22.4/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= +github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= +github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= +github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc= +github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4= +github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4= +github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY= +github.com/go-openapi/runtime v0.29.2 h1:UmwSGWNmWQqKm1c2MGgXVpC2FTGwPDQeUsBMufc5Yj0= +github.com/go-openapi/runtime v0.29.2/go.mod h1:biq5kJXRJKBJxTDJXAa00DOTa/anflQPhT0/wmjuy+0= +github.com/go-openapi/spec v0.22.1 h1:beZMa5AVQzRspNjvhe5aG1/XyBSMeX1eEOs7dMoXh/k= +github.com/go-openapi/spec v0.22.1/go.mod h1:c7aeIQT175dVowfp7FeCvXXnjN/MrpaONStibD2WtDA= +github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= +github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= +github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= +github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= +github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= +github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= +github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= +github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= +github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= +github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= +github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= +github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= +github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= +github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= +github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= +github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= +github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= +github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= +github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= +github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= +github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= +github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw= +github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc= +github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= +github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= +github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= +github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= +github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= +github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= +github.com/google/go-github/v73 v73.0.0 h1:aR+Utnh+Y4mMkS+2qLQwcQ/cF9mOTpdwnzlaw//rG24= +github.com/google/go-github/v73 v73.0.0/go.mod h1:fa6w8+/V+edSU0muqdhCVY7Beh1M8F1IlQPZIANKIYw= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e h1:FJta/0WsADCe1r9vQjdHbd3KuiLPu7Y9WlyLGwMUNyE= +github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= +github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= +github.com/goph/emperror v0.17.2 h1:yLapQcmEsO0ipe9p5TaN22djm3OFV/TfM/fcYP0/J18= +github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic= +github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= +github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= +github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/vault/api v1.22.0 h1:+HYFquE35/B74fHoIeXlZIP2YADVboaPjaSicHEZiH0= +github.com/hashicorp/vault/api v1.22.0/go.mod h1:IUZA2cDvr4Ok3+NtK2Oq/r+lJeXkeCrHRmqdyWfpmGM= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E= +github.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM= +github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= +github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= +github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= +github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= +github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= +github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= +github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk= +github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= +github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= +github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= +github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= +github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/letsencrypt/boulder v0.20251110.0 h1:J8MnKICeilO91dyQ2n5eBbab24neHzUpYMUIOdOtbjc= +github.com/letsencrypt/boulder v0.20251110.0/go.mod h1:ogKCJQwll82m7OVHWyTuf8eeFCjuzdRQlgnZcCl0V+8= +github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag= +github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mark3labs/mcp-filesystem-server v0.11.1 h1:7uKIZRMaKWfgvtDj/uLAvo0+7Mwb8gxo5DJywhqFW88= +github.com/mark3labs/mcp-filesystem-server v0.11.1/go.mod h1:xDqJizVYWZ5a31Mt4xuYbVku2AR/kT56H3O0SbpANoQ= +github.com/mark3labs/mcp-go v0.41.1 h1:w78eWfiQam2i8ICL7AL0WFiq7KHNJQ6UB53ZVtH4KGA= +github.com/mark3labs/mcp-go v0.41.1/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g= +github.com/mark3labs/mcphost v0.31.3 h1:v8kWozQXPXHTBKT2GMo1CCtjz5yZWKMJdXSl9awH3pM= +github.com/mark3labs/mcphost v0.31.3/go.mod h1:rJ5SEO4eo+Vs3XfUAJdxgioB+CVXt02sl+37r0Erato= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= +github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-runewidth v0.0.17 h1:78v8ZlW0bP43XfmAfPsdXcoNCelfMHsDmd/pkENfrjQ= +github.com/mattn/go-runewidth v0.0.17/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/meguminnnnnnnnn/go-openai v0.0.0-20250821095446-07791bea23a0 h1:nIohpHs1ViKR0SVgW/cbBstHjmnqFZDM9RqgX9m9Xu8= +github.com/meguminnnnnnnnn/go-openai v0.0.0-20250821095446-07791bea23a0/go.mod h1:qs96ysDmxhE4BZoU45I43zcyfnaYxU3X+aRzLko/htY= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk= +github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA= +github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/modelcontextprotocol/registry v1.2.3 h1:PaQTn7VxJ0xlgiI+OJUHrG7H12x8uP27wepYKJRaD88= +github.com/modelcontextprotocol/registry v1.2.3/go.mod h1:WcvDr/Cn7JS7MHdSsNPVlLZYwfmzG1/3zTtuW23IRCc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI= +github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo= +github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA= +github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= +github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= +github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= +github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= +github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= +github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= +github.com/nikolalohinski/gonja v1.5.3 h1:GsA+EEaZDZPGJ8JtpeGN78jidhOlxeJROpqMT9fTj9c= +github.com/nikolalohinski/gonja v1.5.3/go.mod h1:RmjwxNiXAEqcq1HeK5SSMmqFJvKOfTfXhkJv6YBtPa4= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/ollama/ollama v0.12.9 h1:qvhEcBZtaTTiXoe/elPnKsbf3z0s0bmU9urCIYUkV54= +github.com/ollama/ollama v0.12.9/go.mod h1:9+1//yWPsDE2u+l1a5mpaKrYw4VdnSsRU3ioq5BvMms= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= +github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= +github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= +github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= +github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= +github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= +github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= +github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= +github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= +github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= +github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= +github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= +github.com/sigstore/cosign/v3 v3.0.3 h1:IknuTUYM+tZ/ToghM7mvg9V0O31NG3rev97u1IJIuYA= +github.com/sigstore/cosign/v3 v3.0.3/go.mod h1:poeQqwvpDNIDyim7a2ljUhonVKpCys+fx3SY0Lkmi/4= +github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= +github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.4.3 h1:2+aw4Gbgumv8vYM/QVg6b+hvr4x4Cukur8stJrVPKU0= +github.com/sigstore/rekor v1.4.3/go.mod h1:o0zgY087Q21YwohVvGwV9vK1/tliat5mfnPiVI3i75o= +github.com/sigstore/rekor-tiles/v2 v2.0.1 h1:1Wfz15oSRNGF5Dzb0lWn5W8+lfO50ork4PGIfEKjZeo= +github.com/sigstore/rekor-tiles/v2 v2.0.1/go.mod h1:Pjsbhzj5hc3MKY8FfVTYHBUHQEnP0ozC4huatu4x7OU= +github.com/sigstore/sigstore v1.10.0 h1:lQrmdzqlR8p9SCfWIpFoGUqdXEzJSZT2X+lTXOMPaQI= +github.com/sigstore/sigstore v1.10.0/go.mod h1:Ygq+L/y9Bm3YnjpJTlQrOk/gXyrjkpn3/AEJpmk1n9Y= +github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 h1:K8hnZhun6XacjxAdCdxkowSi7+FpmfYnAcMhTXZQyPg= +github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894/go.mod h1:uuR+Edo6P+iwi0HKscycUm8mxXL748nAureqSg6jFLA= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0 h1:UOHpiyezCj5RuixgIvCV3QyuxIGQT+N6nGZEXA7OTTY= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0/go.mod h1:U0CZmA2psabDa8DdiV7yXab0AHODzfKqvD2isH7Hrvw= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0 h1:fq4+8Y4YadxeF8mzhoMRPZ1mVvDYXmI3BfS0vlkPT7M= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0/go.mod h1:u05nqPWY05lmcdHhv2lPaWTH3FGUhJzO7iW2hbboK3Q= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0 h1:iUEf5MZYOuXGnXxdF/WrarJrk0DTVHqeIOjYdtpVXtc= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0/go.mod h1:i6vg5JfEQix46R1rhQlrKmUtJoeH91drltyYOJEk1T4= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0 h1:dUvPv/MP23ZPIXZUW45kvCIgC0ZRfYxEof57AB6bAtU= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0/go.mod h1:fR/gDdPvJWGWL70/NgBBIL1O0/3Wma6JHs3tSSYg3s4= +github.com/sigstore/timestamp-authority/v2 v2.0.3 h1:sRyYNtdED/ttLCMdaYnwpf0zre1A9chvjTnCmWWxN8Y= +github.com/sigstore/timestamp-authority/v2 v2.0.3/go.mod h1:mDaHxkt3HmZYoIlwYj4QWo0RUr7VjYU52aVO5f5Qb3I= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/slongfield/pyfmt v0.0.0-20220222012616-ea85ff4c361f h1:Z2cODYsUxQPofhpYRMQVwWz4yUVpHF+vPi+eUdruUYI= +github.com/slongfield/pyfmt v0.0.0-20220222012616-ea85ff4c361f/go.mod h1:JqzWyvTuI2X4+9wOHmKSQCYxybB/8j6Ko43qVmXDuZg= +github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= +github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= +github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= +github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= +github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= +github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= +github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= +github.com/theupdateframework/go-tuf/v2 v2.3.0 h1:gt3X8xT8qu/HT4w+n1jgv+p7koi5ad8XEkLXXZqG9AA= +github.com/theupdateframework/go-tuf/v2 v2.3.0/go.mod h1:xW8yNvgXRncmovMLvBxKwrKpsOwJZu/8x+aB0KtFcdw= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= +github.com/tink-crypto/tink-go/v2 v2.5.0 h1:B8KLF6AofxdBIE4UJIaFbmoj5/1ehEtt7/MmzfI4Zpw= +github.com/tink-crypto/tink-go/v2 v2.5.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c h1:5a2XDQ2LiAUV+/RjckMyq9sXudfrPSuCY4FuPC1NyAw= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c/go.mod h1:g85IafeFJZLxlzZCDRu4JLpfS7HKzR+Hw9qRh3bVzDI= +github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= +github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= +github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= +github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= +github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= +github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= +github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= +github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= +github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= +github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= +github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= +github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= +github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= +github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= +github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= +github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= +github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= +github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= +github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= +github.com/yuin/goldmark v1.7.13 h1:GPddIs617DnBLFFVJFgpo1aBfe/4xcvMc3SB5t/D0pA= +github.com/yuin/goldmark v1.7.13/go.mod h1:ip/1k0VRfGynBgxOz0yCqHrbZXhcjxyuS66Brc7iBKg= +github.com/yuin/goldmark-emoji v1.0.6 h1:QWfF2FYaXwL74tfGOW5izeiZepUDroDJfWubQI9HTHs= +github.com/yuin/goldmark-emoji v1.0.6/go.mod h1:ukxJDKFpdFb5x0a5HqbdlcKtebh086iJpI31LTKmWuA= +github.com/zalando/go-keyring v0.2.6 h1:r7Yc3+H+Ux0+M72zacZoItR3UDxeWfKTcabvkI8ua9s= +github.com/zalando/go-keyring v0.2.6/go.mod h1:2TCrxYrbUNYfNS/Kgy/LSrkSQzZ5UPVH85RwfczwvcI= +gitlab.com/gitlab-org/api/client-go v0.160.0 h1:aMQzbcE8zFe0lR/J+a3zneEgH+/EBFs8rD8Chrr4Snw= +gitlab.com/gitlab-org/api/client-go v0.160.0/go.mod h1:ooCNtKB7OyP7GBa279+HrUS3eeJF6Yi6XABZZy7RTSk= +go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= +go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.step.sm/crypto v0.74.0 h1:/APBEv45yYR4qQFg47HA8w1nesIGcxh44pGyQNw6JRA= +go.step.sm/crypto v0.74.0/go.mod h1:UoXqCAJjjRgzPte0Llaqen7O9P7XjPmgjgTHQGkKCDk= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= +go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c= +golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= +golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI= +google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964= +google.golang.org/genai v1.22.0 h1:5hrEhXXWJQZa3tdPocl4vQ/0w6myEAxdNns2Kmx0f4Y= +google.golang.org/genai v1.22.0/go.mod h1:QPj5NGJw+3wEOHg+PrsWwJKvG6UC84ex5FR7qAYsN/M= +google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 h1:LvZVVaPE0JSqL+ZWb6ErZfnEOKIqqFWUJE2D0fObSmc= +google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9/go.mod h1:QFOrLhdAe2PsTp3vQY4quuLKTi9j3XG3r6JPPaw7MSc= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 h1:ExN12ndbJ608cboPYflpTny6mXSzPrDLh0iTaVrRrds= +google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6/go.mod h1:6ytKWczdvnpnO+m+JiG9NjEDzR1FJfsnmJdG7B8QVZ8= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY= +k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw= +k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= +k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M= +k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= +lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/importer/mcp/fetcher.go b/importer/mcp/fetcher.go index d0a63b0d2..d0ed176f3 100644 --- a/importer/mcp/fetcher.go +++ b/importer/mcp/fetcher.go @@ -1,191 +1,191 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package mcp - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "slices" - "strconv" - "time" - - mcpapiv0 "github.com/modelcontextprotocol/registry/pkg/api/v0" -) - -const ( - // defaultPageLimit is the default number of servers to fetch per page. - defaultPageLimit = 30 -) - -// Supported filters https://registry.modelcontextprotocol.io/docs#/operations/list-servers#Query-Parameters -// - search: Filter by server name (substring match) -// - version: Filter by version ('latest' for latest version, or an exact version like '1.2.3') -// - updated_since: Filter by updated time (RFC3339 datetime) -// - limit: Number of servers per page (default 30) -// - cursor: Pagination cursor -var supportedFilters = []string{ - "search", - "version", - "updated_since", - "limit", - "cursor", -} - -// Fetcher implements the pipeline.Fetcher interface for MCP registry. -type Fetcher struct { - url *url.URL - httpClient *http.Client - filters map[string]string - limit int -} - -// NewFetcher creates a new MCP fetcher. -func NewFetcher(baseURL string, filters map[string]string, limit int) (*Fetcher, error) { - // Parse and validate base URL - u, err := url.Parse(baseURL + "/servers") - if err != nil { - return nil, fmt.Errorf("failed to parse base URL: %w", err) - } - - // Validate filters - for key := range filters { - if !slices.Contains(supportedFilters, key) { - return nil, fmt.Errorf("unsupported filter: %s", key) - } - } - - return &Fetcher{ - url: u, - httpClient: &http.Client{ - Timeout: 30 * time.Second, //nolint:mnd - }, - filters: filters, - limit: limit, - }, nil -} - -// Fetch retrieves servers from the MCP registry and sends them to the output channel. -func (f *Fetcher) Fetch(ctx context.Context) (<-chan interface{}, <-chan error) { - // Use buffered channel to allow fetcher to work ahead of transformers - outputCh := make(chan interface{}, 50) //nolint:mnd - errCh := make(chan error, 1) - - go func() { - defer close(outputCh) - defer close(errCh) - - cursor := "" - count := 0 - - for { - // Check context cancellation - select { - case <-ctx.Done(): - errCh <- ctx.Err() - - return - default: - } - - // Fetch one page - page, nextCursor, err := f.listServersPage(ctx, cursor) - if err != nil { - errCh <- err - - return - } - - // Stream each server as soon as it's available - for _, server := range page { - // Check if limit is reached (limit <= 0 means no limit) - if f.limit > 0 && count >= f.limit { - return - } - - select { - case <-ctx.Done(): - errCh <- ctx.Err() - - return - case outputCh <- server: - count++ - } - } - - // Check if there are more pages - if nextCursor == "" { - break - } - - cursor = nextCursor - } - }() - - return outputCh, errCh -} - -// listServersPage fetches a single page of servers from the MCP registry. -func (f *Fetcher) listServersPage(ctx context.Context, cursor string) ([]mcpapiv0.ServerResponse, string, error) { - // Add filters as query parameters - query := f.url.Query() - - for key, value := range f.filters { - if value != "" { - query.Set(key, value) - } - } - - // Add cursor if provided - if cursor != "" { - query.Set("cursor", cursor) - } - - // Add limit parameter to control page size - query.Set("limit", strconv.Itoa(defaultPageLimit)) - - f.url.RawQuery = query.Encode() - - // Create request - req, err := http.NewRequestWithContext(ctx, http.MethodGet, f.url.String(), nil) - if err != nil { - return nil, "", fmt.Errorf("failed to create request: %w", err) - } - - req.Header.Set("Accept", "application/json") - - // TODO: Implement retry logic for transient failures - // Execute request - resp, err := f.httpClient.Do(req) - if err != nil { - return nil, "", fmt.Errorf("failed to fetch servers: %w", err) - } - defer resp.Body.Close() - - // Check status code - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - - return nil, "", fmt.Errorf("unexpected status code %d: %s", resp.StatusCode, string(body)) - } - - // Parse response - var registryResp mcpapiv0.ServerListResponse - if err := json.NewDecoder(resp.Body).Decode(®istryResp); err != nil { - return nil, "", fmt.Errorf("failed to decode response: %w", err) - } - - return registryResp.Servers, registryResp.Metadata.NextCursor, nil -} - -// ServerResponseFromInterface converts an interface{} back to ServerResponse. -// This is a helper for the transformer stage. -func ServerResponseFromInterface(i interface{}) (mcpapiv0.ServerResponse, bool) { - resp, ok := i.(mcpapiv0.ServerResponse) - - return resp, ok -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package mcp + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "slices" + "strconv" + "time" + + mcpapiv0 "github.com/modelcontextprotocol/registry/pkg/api/v0" +) + +const ( + // defaultPageLimit is the default number of servers to fetch per page. + defaultPageLimit = 30 +) + +// Supported filters https://registry.modelcontextprotocol.io/docs#/operations/list-servers#Query-Parameters +// - search: Filter by server name (substring match) +// - version: Filter by version ('latest' for latest version, or an exact version like '1.2.3') +// - updated_since: Filter by updated time (RFC3339 datetime) +// - limit: Number of servers per page (default 30) +// - cursor: Pagination cursor +var supportedFilters = []string{ + "search", + "version", + "updated_since", + "limit", + "cursor", +} + +// Fetcher implements the pipeline.Fetcher interface for MCP registry. +type Fetcher struct { + url *url.URL + httpClient *http.Client + filters map[string]string + limit int +} + +// NewFetcher creates a new MCP fetcher. +func NewFetcher(baseURL string, filters map[string]string, limit int) (*Fetcher, error) { + // Parse and validate base URL + u, err := url.Parse(baseURL + "/servers") + if err != nil { + return nil, fmt.Errorf("failed to parse base URL: %w", err) + } + + // Validate filters + for key := range filters { + if !slices.Contains(supportedFilters, key) { + return nil, fmt.Errorf("unsupported filter: %s", key) + } + } + + return &Fetcher{ + url: u, + httpClient: &http.Client{ + Timeout: 30 * time.Second, //nolint:mnd + }, + filters: filters, + limit: limit, + }, nil +} + +// Fetch retrieves servers from the MCP registry and sends them to the output channel. +func (f *Fetcher) Fetch(ctx context.Context) (<-chan interface{}, <-chan error) { + // Use buffered channel to allow fetcher to work ahead of transformers + outputCh := make(chan interface{}, 50) //nolint:mnd + errCh := make(chan error, 1) + + go func() { + defer close(outputCh) + defer close(errCh) + + cursor := "" + count := 0 + + for { + // Check context cancellation + select { + case <-ctx.Done(): + errCh <- ctx.Err() + + return + default: + } + + // Fetch one page + page, nextCursor, err := f.listServersPage(ctx, cursor) + if err != nil { + errCh <- err + + return + } + + // Stream each server as soon as it's available + for _, server := range page { + // Check if limit is reached (limit <= 0 means no limit) + if f.limit > 0 && count >= f.limit { + return + } + + select { + case <-ctx.Done(): + errCh <- ctx.Err() + + return + case outputCh <- server: + count++ + } + } + + // Check if there are more pages + if nextCursor == "" { + break + } + + cursor = nextCursor + } + }() + + return outputCh, errCh +} + +// listServersPage fetches a single page of servers from the MCP registry. +func (f *Fetcher) listServersPage(ctx context.Context, cursor string) ([]mcpapiv0.ServerResponse, string, error) { + // Add filters as query parameters + query := f.url.Query() + + for key, value := range f.filters { + if value != "" { + query.Set(key, value) + } + } + + // Add cursor if provided + if cursor != "" { + query.Set("cursor", cursor) + } + + // Add limit parameter to control page size + query.Set("limit", strconv.Itoa(defaultPageLimit)) + + f.url.RawQuery = query.Encode() + + // Create request + req, err := http.NewRequestWithContext(ctx, http.MethodGet, f.url.String(), nil) + if err != nil { + return nil, "", fmt.Errorf("failed to create request: %w", err) + } + + req.Header.Set("Accept", "application/json") + + // TODO: Implement retry logic for transient failures + // Execute request + resp, err := f.httpClient.Do(req) + if err != nil { + return nil, "", fmt.Errorf("failed to fetch servers: %w", err) + } + defer resp.Body.Close() + + // Check status code + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + + return nil, "", fmt.Errorf("unexpected status code %d: %s", resp.StatusCode, string(body)) + } + + // Parse response + var registryResp mcpapiv0.ServerListResponse + if err := json.NewDecoder(resp.Body).Decode(®istryResp); err != nil { + return nil, "", fmt.Errorf("failed to decode response: %w", err) + } + + return registryResp.Servers, registryResp.Metadata.NextCursor, nil +} + +// ServerResponseFromInterface converts an interface{} back to ServerResponse. +// This is a helper for the transformer stage. +func ServerResponseFromInterface(i interface{}) (mcpapiv0.ServerResponse, bool) { + resp, ok := i.(mcpapiv0.ServerResponse) + + return resp, ok +} diff --git a/importer/mcp/fetcher_test.go b/importer/mcp/fetcher_test.go index 6070eebc2..e5c8753e7 100644 --- a/importer/mcp/fetcher_test.go +++ b/importer/mcp/fetcher_test.go @@ -1,76 +1,76 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package mcp - -import ( - "context" - "testing" -) - -func TestFetcher_Fetch(t *testing.T) { - // Note: This is an integration-style test that would require a real MCP registry - // or a mock HTTP server. For now, we'll just test the basic structure. - ctx := context.Background() - - // Create a fetcher pointing to a non-existent URL (will fail but tests structure) - fetcher, err := NewFetcher("http://localhost:9999", nil, 1) - if err != nil { - t.Fatalf("failed to create fetcher: %v", err) - } - - dataCh, errCh := fetcher.Fetch(ctx) - - // Verify channels are created - if dataCh == nil { - t.Error("expected data channel, got nil") - } - - if errCh == nil { - t.Error("expected error channel, got nil") - } - - // Drain channels (will likely get connection error) - go func() { - for range dataCh { - // Consume data - } - }() - - for range errCh { - // Consume errors - expected in this test - } -} - -func TestServerResponseFromInterface(t *testing.T) { - tests := []struct { - name string - input interface{} - expectOk bool - }{ - { - name: "nil input", - input: nil, - expectOk: false, - }, - { - name: "wrong type", - input: "not a server response", - expectOk: false, - }, - { - name: "wrong type - int", - input: 42, - expectOk: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, ok := ServerResponseFromInterface(tt.input) - if ok != tt.expectOk { - t.Errorf("expected ok=%v, got ok=%v", tt.expectOk, ok) - } - }) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package mcp + +import ( + "context" + "testing" +) + +func TestFetcher_Fetch(t *testing.T) { + // Note: This is an integration-style test that would require a real MCP registry + // or a mock HTTP server. For now, we'll just test the basic structure. + ctx := context.Background() + + // Create a fetcher pointing to a non-existent URL (will fail but tests structure) + fetcher, err := NewFetcher("http://localhost:9999", nil, 1) + if err != nil { + t.Fatalf("failed to create fetcher: %v", err) + } + + dataCh, errCh := fetcher.Fetch(ctx) + + // Verify channels are created + if dataCh == nil { + t.Error("expected data channel, got nil") + } + + if errCh == nil { + t.Error("expected error channel, got nil") + } + + // Drain channels (will likely get connection error) + go func() { + for range dataCh { + // Consume data + } + }() + + for range errCh { + // Consume errors - expected in this test + } +} + +func TestServerResponseFromInterface(t *testing.T) { + tests := []struct { + name string + input interface{} + expectOk bool + }{ + { + name: "nil input", + input: nil, + expectOk: false, + }, + { + name: "wrong type", + input: "not a server response", + expectOk: false, + }, + { + name: "wrong type - int", + input: 42, + expectOk: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, ok := ServerResponseFromInterface(tt.input) + if ok != tt.expectOk { + t.Errorf("expected ok=%v, got ok=%v", tt.expectOk, ok) + } + }) + } +} diff --git a/importer/mcp/importer.go b/importer/mcp/importer.go index 74374c27b..5ea29c365 100644 --- a/importer/mcp/importer.go +++ b/importer/mcp/importer.go @@ -1,99 +1,99 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package mcp - -import ( - "context" - "fmt" - - "github.com/agntcy/dir/importer/config" - "github.com/agntcy/dir/importer/pipeline" - "github.com/agntcy/dir/importer/types" -) - -// Importer implements the Importer interface for MCP registry using a pipeline architecture. -type Importer struct { - client config.ClientInterface - registryURL string -} - -// NewImporter creates a new MCP importer instance. -// The client parameter is used for pushing records to DIR. -func NewImporter(client config.ClientInterface, cfg config.Config) (types.Importer, error) { - return &Importer{ - client: client, - registryURL: cfg.RegistryURL, - }, nil -} - -// Run executes the import operation for the MCP registry using a pipeline: -// - Normal mode: Three-stage pipeline (Fetcher -> Transformer -> Pusher) -// - Dry-run mode: Two-stage pipeline (Fetcher -> Transformer). -func (i *Importer) Run(ctx context.Context, cfg config.Config) (*types.ImportResult, error) { - // Create pipeline stages - fetcher, err := NewFetcher(i.registryURL, cfg.Filters, cfg.Limit) - if err != nil { - return nil, fmt.Errorf("failed to create fetcher: %w", err) - } - - transformer, err := NewTransformer(ctx, cfg) - if err != nil { - return nil, fmt.Errorf("failed to create transformer: %w", err) - } - - // Configure pipeline with concurrency settings - pipelineConfig := pipeline.Config{ - TransformerWorkers: cfg.Concurrency, - } - - // Create and run the appropriate pipeline based on dry-run mode - var pipelineResult *pipeline.Result - - //nolint:nestif // Complexity is acceptable for pipeline setup with different modes - if cfg.DryRun { - // Create duplicate checker for accurate dry-run preview (unless --force is set) - var duplicateChecker pipeline.DuplicateChecker - if !cfg.Force { - duplicateChecker, err = pipeline.NewMCPDuplicateChecker(ctx, i.client, cfg.Debug) - if err != nil { - return nil, fmt.Errorf("failed to create duplicate checker: %w", err) - } - } - - // Use dry-run pipeline (fetch, filter duplicates, and transform only - no push) - p := pipeline.NewDryRun(fetcher, duplicateChecker, transformer, pipelineConfig) - pipelineResult, err = p.Run(ctx) - } else { - pusher := pipeline.NewClientPusher(i.client, cfg.Debug) - - // If --force is set, duplicateChecker will be nil (no deduplication) - // Otherwise, build cache of existing records for deduplication - var duplicateChecker pipeline.DuplicateChecker - if !cfg.Force { - duplicateChecker, err = pipeline.NewMCPDuplicateChecker(ctx, i.client, cfg.Debug) - if err != nil { - return nil, fmt.Errorf("failed to create duplicate checker: %w", err) - } - } - - // Create full pipeline with optional duplicate checker - p := pipeline.New(fetcher, duplicateChecker, transformer, pusher, pipelineConfig) - pipelineResult, err = p.Run(ctx) - } - - if err != nil { - return nil, fmt.Errorf("failed to run pipeline: %w", err) - } - - // Convert pipeline result to import result - result := &types.ImportResult{ - TotalRecords: pipelineResult.TotalRecords, - ImportedCount: pipelineResult.ImportedCount, - SkippedCount: pipelineResult.SkippedCount, - FailedCount: pipelineResult.FailedCount, - Errors: pipelineResult.Errors, - } - - return result, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package mcp + +import ( + "context" + "fmt" + + "github.com/agntcy/dir/importer/config" + "github.com/agntcy/dir/importer/pipeline" + "github.com/agntcy/dir/importer/types" +) + +// Importer implements the Importer interface for MCP registry using a pipeline architecture. +type Importer struct { + client config.ClientInterface + registryURL string +} + +// NewImporter creates a new MCP importer instance. +// The client parameter is used for pushing records to DIR. +func NewImporter(client config.ClientInterface, cfg config.Config) (types.Importer, error) { + return &Importer{ + client: client, + registryURL: cfg.RegistryURL, + }, nil +} + +// Run executes the import operation for the MCP registry using a pipeline: +// - Normal mode: Three-stage pipeline (Fetcher -> Transformer -> Pusher) +// - Dry-run mode: Two-stage pipeline (Fetcher -> Transformer). +func (i *Importer) Run(ctx context.Context, cfg config.Config) (*types.ImportResult, error) { + // Create pipeline stages + fetcher, err := NewFetcher(i.registryURL, cfg.Filters, cfg.Limit) + if err != nil { + return nil, fmt.Errorf("failed to create fetcher: %w", err) + } + + transformer, err := NewTransformer(ctx, cfg) + if err != nil { + return nil, fmt.Errorf("failed to create transformer: %w", err) + } + + // Configure pipeline with concurrency settings + pipelineConfig := pipeline.Config{ + TransformerWorkers: cfg.Concurrency, + } + + // Create and run the appropriate pipeline based on dry-run mode + var pipelineResult *pipeline.Result + + //nolint:nestif // Complexity is acceptable for pipeline setup with different modes + if cfg.DryRun { + // Create duplicate checker for accurate dry-run preview (unless --force is set) + var duplicateChecker pipeline.DuplicateChecker + if !cfg.Force { + duplicateChecker, err = pipeline.NewMCPDuplicateChecker(ctx, i.client, cfg.Debug) + if err != nil { + return nil, fmt.Errorf("failed to create duplicate checker: %w", err) + } + } + + // Use dry-run pipeline (fetch, filter duplicates, and transform only - no push) + p := pipeline.NewDryRun(fetcher, duplicateChecker, transformer, pipelineConfig) + pipelineResult, err = p.Run(ctx) + } else { + pusher := pipeline.NewClientPusher(i.client, cfg.Debug) + + // If --force is set, duplicateChecker will be nil (no deduplication) + // Otherwise, build cache of existing records for deduplication + var duplicateChecker pipeline.DuplicateChecker + if !cfg.Force { + duplicateChecker, err = pipeline.NewMCPDuplicateChecker(ctx, i.client, cfg.Debug) + if err != nil { + return nil, fmt.Errorf("failed to create duplicate checker: %w", err) + } + } + + // Create full pipeline with optional duplicate checker + p := pipeline.New(fetcher, duplicateChecker, transformer, pusher, pipelineConfig) + pipelineResult, err = p.Run(ctx) + } + + if err != nil { + return nil, fmt.Errorf("failed to run pipeline: %w", err) + } + + // Convert pipeline result to import result + result := &types.ImportResult{ + TotalRecords: pipelineResult.TotalRecords, + ImportedCount: pipelineResult.ImportedCount, + SkippedCount: pipelineResult.SkippedCount, + FailedCount: pipelineResult.FailedCount, + Errors: pipelineResult.Errors, + } + + return result, nil +} diff --git a/importer/mcp/importer_test.go b/importer/mcp/importer_test.go index 1ae4ff985..08a2cc657 100644 --- a/importer/mcp/importer_test.go +++ b/importer/mcp/importer_test.go @@ -1,46 +1,46 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package mcp - -import ( - "testing" - - "github.com/agntcy/dir/client" - "github.com/agntcy/dir/importer/config" -) - -func TestNewImporter(t *testing.T) { - tests := []struct { - name string - config config.Config - wantErr bool - }{ - { - name: "valid config", - config: config.Config{ - RegistryType: config.RegistryTypeMCP, - RegistryURL: "https://registry.example.com", - }, - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create a mock client - mockClient := &client.Client{} - - importer, err := NewImporter(mockClient, tt.config) - if (err != nil) != tt.wantErr { - t.Errorf("NewImporter() error = %v, wantErr %v", err, tt.wantErr) - - return - } - - if importer == nil { - t.Error("NewImporter() returned nil importer") - } - }) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package mcp + +import ( + "testing" + + "github.com/agntcy/dir/client" + "github.com/agntcy/dir/importer/config" +) + +func TestNewImporter(t *testing.T) { + tests := []struct { + name string + config config.Config + wantErr bool + }{ + { + name: "valid config", + config: config.Config{ + RegistryType: config.RegistryTypeMCP, + RegistryURL: "https://registry.example.com", + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a mock client + mockClient := &client.Client{} + + importer, err := NewImporter(mockClient, tt.config) + if (err != nil) != tt.wantErr { + t.Errorf("NewImporter() error = %v, wantErr %v", err, tt.wantErr) + + return + } + + if importer == nil { + t.Error("NewImporter() returned nil importer") + } + }) + } +} diff --git a/importer/mcp/register.go b/importer/mcp/register.go index 85b69c3d0..54defc78c 100644 --- a/importer/mcp/register.go +++ b/importer/mcp/register.go @@ -1,14 +1,14 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package mcp - -import ( - "github.com/agntcy/dir/importer/config" - "github.com/agntcy/dir/importer/types/factory" -) - -// Register the MCP importer with the factory on package init. -func init() { - factory.Register(config.RegistryTypeMCP, NewImporter) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package mcp + +import ( + "github.com/agntcy/dir/importer/config" + "github.com/agntcy/dir/importer/types/factory" +) + +// Register the MCP importer with the factory on package init. +func init() { + factory.Register(config.RegistryTypeMCP, NewImporter) +} diff --git a/importer/mcp/transformer.go b/importer/mcp/transformer.go index 56dd27d20..6213b1a2e 100644 --- a/importer/mcp/transformer.go +++ b/importer/mcp/transformer.go @@ -1,252 +1,252 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package mcp - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "os" - "time" - - typesv1alpha1 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha1" - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/importer/config" - "github.com/agntcy/dir/importer/enricher" - "github.com/agntcy/oasf-sdk/pkg/translator" - mcpapiv0 "github.com/modelcontextprotocol/registry/pkg/api/v0" - "google.golang.org/protobuf/encoding/protojson" - "google.golang.org/protobuf/types/known/structpb" -) - -const ( - // DefaultSchemaVersion is the default version of the OASF schema. - DefaultOASFVersion = "0.8.0" -) - -// Transformer implements the pipeline.Transformer interface for MCP records. -type Transformer struct { - host *enricher.MCPHostClient -} - -// NewTransformer creates a new MCP transformer. -// If cfg.Enrich is true, it initializes an enricher client using cfg.EnricherConfig. -func NewTransformer(ctx context.Context, cfg config.Config) (*Transformer, error) { - var host *enricher.MCPHostClient - - if cfg.Enrich { - // Create enricher configuration - enricherCfg := enricher.Config{ - ConfigFile: cfg.EnricherConfigFile, - SkillsPromptTemplate: cfg.EnricherSkillsPromptTemplate, - DomainsPromptTemplate: cfg.EnricherDomainsPromptTemplate, - } - - var err error - - host, err = enricher.NewMCPHost(ctx, enricherCfg) - if err != nil { - return nil, fmt.Errorf("failed to create MCPHost client: %w", err) - } - } - - return &Transformer{ - host: host, - }, nil -} - -// Transform converts an MCP server response to OASF format. -func (t *Transformer) Transform(ctx context.Context, source interface{}) (*corev1.Record, error) { - // Convert interface{} to ServerResponse - response, ok := ServerResponseFromInterface(source) - if !ok { - return nil, fmt.Errorf("invalid source type: expected mcpapiv0.ServerResponse, got %T", source) - } - - // Convert to OASF format - record, err := t.convertToOASF(ctx, response) - if err != nil { - return nil, fmt.Errorf("failed to convert server %s:%s to OASF: %w", - response.Server.Name, response.Server.Version, err) - } - - // Attach MCP source for debugging push failures - // Store in a way that won't interfere with the record - if record.GetData() != nil && record.Data.Fields != nil { - if mcpBytes, err := json.Marshal(response.Server); err == nil { - // Store as a JSON string for later retrieval - record.Data.Fields["__mcp_debug_source"] = structpb.NewStringValue(string(mcpBytes)) - } - } - - return record, nil -} - -// convertToOASF converts an MCP server response to OASF format. -// -//nolint:unparam -func (t *Transformer) convertToOASF(ctx context.Context, response mcpapiv0.ServerResponse) (*corev1.Record, error) { - server := response.Server - - // Convert the MCP ServerJSON to a structpb.Struct - serverBytes, err := json.Marshal(server) - if err != nil { - return nil, fmt.Errorf("failed to marshal server to JSON: %w", err) - } - - var serverMap map[string]interface{} - if err := json.Unmarshal(serverBytes, &serverMap); err != nil { - return nil, fmt.Errorf("failed to unmarshal server JSON to map: %w", err) - } - - serverStruct, err := structpb.NewStruct(serverMap) - if err != nil { - return nil, fmt.Errorf("failed to convert server map to structpb.Struct: %w", err) - } - - mcpData := &structpb.Struct{ - Fields: map[string]*structpb.Value{ - "server": structpb.NewStructValue(serverStruct), - }, - } - - // Translate MCP struct to OASF record struct - recordStruct, err := translator.MCPToRecord(mcpData) - if err != nil { - // Print MCP source on translation failure - if mcpBytes, jsonErr := json.MarshalIndent(server, "", " "); jsonErr == nil { - fmt.Fprintf(os.Stderr, "\n========================================\n") - fmt.Fprintf(os.Stderr, "TRANSLATION FAILED for: %s@%s\n", server.Name, server.Version) - fmt.Fprintf(os.Stderr, "========================================\n") - fmt.Fprintf(os.Stderr, "MCP Source:\n%s\n", string(mcpBytes)) - fmt.Fprintf(os.Stderr, "========================================\n\n") - os.Stderr.Sync() - } - - return nil, fmt.Errorf("failed to convert MCP data to OASF record: %w", err) - } - - // Enrich the record with proper OASF skills and domains if enrichment is enabled - if t.host != nil { - if err := t.enrichRecord(ctx, recordStruct); err != nil { - return nil, err - } - } - - return &corev1.Record{ - Data: recordStruct, - }, nil -} - -// enrichRecord handles the enrichment of a record with skills and domains. -func (t *Transformer) enrichRecord(ctx context.Context, recordStruct *structpb.Struct) error { - // Convert structpb.Struct to typesv1alpha1.Record for enrichment - oasfRecord, err := structToOASFRecord(recordStruct) - if err != nil { - return fmt.Errorf("failed to convert struct to OASF record for enrichment: %w", err) - } - - // Clear default skills and domains before enrichment - let the LLM select appropriate ones - oasfRecord.Skills = nil - oasfRecord.Domains = nil - - // Context with timeout for enrichment operations - ctxWithTimeout, cancel := context.WithTimeout(ctx, 5*time.Minute) //nolint:mnd - defer cancel() - - // Enrich with skills - enrichedRecord, err := t.host.EnrichWithSkills(ctxWithTimeout, oasfRecord) - if err != nil { - return fmt.Errorf("failed to enrich record with skills: %w", err) - } - - // Enrich with domains (using the already skill-enriched record) - enrichedRecord, err = t.host.EnrichWithDomains(ctxWithTimeout, enrichedRecord) - if err != nil { - return fmt.Errorf("failed to enrich record with domains: %w", err) - } - - // Update both skills and domains fields, preserve everything else from the original record - if err := updateSkillsInStruct(recordStruct, enrichedRecord.GetSkills()); err != nil { - return fmt.Errorf("failed to update skills in record: %w", err) - } - - if err := updateDomainsInStruct(recordStruct, enrichedRecord.GetDomains()); err != nil { - return fmt.Errorf("failed to update domains in record: %w", err) - } - - return nil -} - -// structToOASFRecord converts a structpb.Struct to typesv1alpha1.Record for enrichment. -func structToOASFRecord(s *structpb.Struct) (*typesv1alpha1.Record, error) { - // Marshal struct to JSON - jsonBytes, err := protojson.Marshal(s) - if err != nil { - return nil, fmt.Errorf("failed to marshal struct to JSON: %w", err) - } - - // Unmarshal JSON into typesv1alpha1.Record - var record typesv1alpha1.Record - if err := protojson.Unmarshal(jsonBytes, &record); err != nil { - return nil, fmt.Errorf("failed to unmarshal JSON to OASF record: %w", err) - } - - return &record, nil -} - -// enrichedItem represents any enriched field (skill or domain) with name and id. -type enrichedItem interface { - GetName() string - GetId() uint32 -} - -// updateFieldsInStruct is a generic helper that updates a field in a structpb.Struct with enriched items. -// This preserves all other fields including schema_version, name, version, etc. -func updateFieldsInStruct[T enrichedItem](recordStruct *structpb.Struct, fieldName string, enrichedItems []T) error { - if recordStruct.Fields == nil { - return errors.New("record struct has no fields") - } - - // Convert enriched items to structpb.ListValue - itemsList := &structpb.ListValue{ - Values: make([]*structpb.Value, 0, len(enrichedItems)), - } - - for _, item := range enrichedItems { - itemStruct := &structpb.Struct{ - Fields: make(map[string]*structpb.Value), - } - - // Add name field (required) - if item.GetName() != "" { - itemStruct.Fields["name"] = structpb.NewStringValue(item.GetName()) - } - - // Add id field if present - if item.GetId() != 0 { - itemStruct.Fields["id"] = structpb.NewNumberValue(float64(item.GetId())) - } - - itemsList.Values = append(itemsList.Values, structpb.NewStructValue(itemStruct)) - } - - // Update the field in the record - recordStruct.Fields[fieldName] = structpb.NewListValue(itemsList) - - return nil -} - -// updateSkillsInStruct updates the skills field in a structpb.Struct with enriched skills. -// This preserves all other fields including schema_version, name, version, etc. -func updateSkillsInStruct(recordStruct *structpb.Struct, enrichedSkills []*typesv1alpha1.Skill) error { - return updateFieldsInStruct(recordStruct, "skills", enrichedSkills) -} - -// updateDomainsInStruct updates the domains field in a structpb.Struct with enriched domains. -// This preserves all other fields including schema_version, name, version, etc. -func updateDomainsInStruct(recordStruct *structpb.Struct, enrichedDomains []*typesv1alpha1.Domain) error { - return updateFieldsInStruct(recordStruct, "domains", enrichedDomains) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package mcp + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "time" + + typesv1alpha1 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha1" + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/importer/config" + "github.com/agntcy/dir/importer/enricher" + "github.com/agntcy/oasf-sdk/pkg/translator" + mcpapiv0 "github.com/modelcontextprotocol/registry/pkg/api/v0" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/structpb" +) + +const ( + // DefaultSchemaVersion is the default version of the OASF schema. + DefaultOASFVersion = "0.8.0" +) + +// Transformer implements the pipeline.Transformer interface for MCP records. +type Transformer struct { + host *enricher.MCPHostClient +} + +// NewTransformer creates a new MCP transformer. +// If cfg.Enrich is true, it initializes an enricher client using cfg.EnricherConfig. +func NewTransformer(ctx context.Context, cfg config.Config) (*Transformer, error) { + var host *enricher.MCPHostClient + + if cfg.Enrich { + // Create enricher configuration + enricherCfg := enricher.Config{ + ConfigFile: cfg.EnricherConfigFile, + SkillsPromptTemplate: cfg.EnricherSkillsPromptTemplate, + DomainsPromptTemplate: cfg.EnricherDomainsPromptTemplate, + } + + var err error + + host, err = enricher.NewMCPHost(ctx, enricherCfg) + if err != nil { + return nil, fmt.Errorf("failed to create MCPHost client: %w", err) + } + } + + return &Transformer{ + host: host, + }, nil +} + +// Transform converts an MCP server response to OASF format. +func (t *Transformer) Transform(ctx context.Context, source interface{}) (*corev1.Record, error) { + // Convert interface{} to ServerResponse + response, ok := ServerResponseFromInterface(source) + if !ok { + return nil, fmt.Errorf("invalid source type: expected mcpapiv0.ServerResponse, got %T", source) + } + + // Convert to OASF format + record, err := t.convertToOASF(ctx, response) + if err != nil { + return nil, fmt.Errorf("failed to convert server %s:%s to OASF: %w", + response.Server.Name, response.Server.Version, err) + } + + // Attach MCP source for debugging push failures + // Store in a way that won't interfere with the record + if record.GetData() != nil && record.Data.Fields != nil { + if mcpBytes, err := json.Marshal(response.Server); err == nil { + // Store as a JSON string for later retrieval + record.Data.Fields["__mcp_debug_source"] = structpb.NewStringValue(string(mcpBytes)) + } + } + + return record, nil +} + +// convertToOASF converts an MCP server response to OASF format. +// +//nolint:unparam +func (t *Transformer) convertToOASF(ctx context.Context, response mcpapiv0.ServerResponse) (*corev1.Record, error) { + server := response.Server + + // Convert the MCP ServerJSON to a structpb.Struct + serverBytes, err := json.Marshal(server) + if err != nil { + return nil, fmt.Errorf("failed to marshal server to JSON: %w", err) + } + + var serverMap map[string]interface{} + if err := json.Unmarshal(serverBytes, &serverMap); err != nil { + return nil, fmt.Errorf("failed to unmarshal server JSON to map: %w", err) + } + + serverStruct, err := structpb.NewStruct(serverMap) + if err != nil { + return nil, fmt.Errorf("failed to convert server map to structpb.Struct: %w", err) + } + + mcpData := &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "server": structpb.NewStructValue(serverStruct), + }, + } + + // Translate MCP struct to OASF record struct + recordStruct, err := translator.MCPToRecord(mcpData) + if err != nil { + // Print MCP source on translation failure + if mcpBytes, jsonErr := json.MarshalIndent(server, "", " "); jsonErr == nil { + fmt.Fprintf(os.Stderr, "\n========================================\n") + fmt.Fprintf(os.Stderr, "TRANSLATION FAILED for: %s@%s\n", server.Name, server.Version) + fmt.Fprintf(os.Stderr, "========================================\n") + fmt.Fprintf(os.Stderr, "MCP Source:\n%s\n", string(mcpBytes)) + fmt.Fprintf(os.Stderr, "========================================\n\n") + os.Stderr.Sync() + } + + return nil, fmt.Errorf("failed to convert MCP data to OASF record: %w", err) + } + + // Enrich the record with proper OASF skills and domains if enrichment is enabled + if t.host != nil { + if err := t.enrichRecord(ctx, recordStruct); err != nil { + return nil, err + } + } + + return &corev1.Record{ + Data: recordStruct, + }, nil +} + +// enrichRecord handles the enrichment of a record with skills and domains. +func (t *Transformer) enrichRecord(ctx context.Context, recordStruct *structpb.Struct) error { + // Convert structpb.Struct to typesv1alpha1.Record for enrichment + oasfRecord, err := structToOASFRecord(recordStruct) + if err != nil { + return fmt.Errorf("failed to convert struct to OASF record for enrichment: %w", err) + } + + // Clear default skills and domains before enrichment - let the LLM select appropriate ones + oasfRecord.Skills = nil + oasfRecord.Domains = nil + + // Context with timeout for enrichment operations + ctxWithTimeout, cancel := context.WithTimeout(ctx, 5*time.Minute) //nolint:mnd + defer cancel() + + // Enrich with skills + enrichedRecord, err := t.host.EnrichWithSkills(ctxWithTimeout, oasfRecord) + if err != nil { + return fmt.Errorf("failed to enrich record with skills: %w", err) + } + + // Enrich with domains (using the already skill-enriched record) + enrichedRecord, err = t.host.EnrichWithDomains(ctxWithTimeout, enrichedRecord) + if err != nil { + return fmt.Errorf("failed to enrich record with domains: %w", err) + } + + // Update both skills and domains fields, preserve everything else from the original record + if err := updateSkillsInStruct(recordStruct, enrichedRecord.GetSkills()); err != nil { + return fmt.Errorf("failed to update skills in record: %w", err) + } + + if err := updateDomainsInStruct(recordStruct, enrichedRecord.GetDomains()); err != nil { + return fmt.Errorf("failed to update domains in record: %w", err) + } + + return nil +} + +// structToOASFRecord converts a structpb.Struct to typesv1alpha1.Record for enrichment. +func structToOASFRecord(s *structpb.Struct) (*typesv1alpha1.Record, error) { + // Marshal struct to JSON + jsonBytes, err := protojson.Marshal(s) + if err != nil { + return nil, fmt.Errorf("failed to marshal struct to JSON: %w", err) + } + + // Unmarshal JSON into typesv1alpha1.Record + var record typesv1alpha1.Record + if err := protojson.Unmarshal(jsonBytes, &record); err != nil { + return nil, fmt.Errorf("failed to unmarshal JSON to OASF record: %w", err) + } + + return &record, nil +} + +// enrichedItem represents any enriched field (skill or domain) with name and id. +type enrichedItem interface { + GetName() string + GetId() uint32 +} + +// updateFieldsInStruct is a generic helper that updates a field in a structpb.Struct with enriched items. +// This preserves all other fields including schema_version, name, version, etc. +func updateFieldsInStruct[T enrichedItem](recordStruct *structpb.Struct, fieldName string, enrichedItems []T) error { + if recordStruct.Fields == nil { + return errors.New("record struct has no fields") + } + + // Convert enriched items to structpb.ListValue + itemsList := &structpb.ListValue{ + Values: make([]*structpb.Value, 0, len(enrichedItems)), + } + + for _, item := range enrichedItems { + itemStruct := &structpb.Struct{ + Fields: make(map[string]*structpb.Value), + } + + // Add name field (required) + if item.GetName() != "" { + itemStruct.Fields["name"] = structpb.NewStringValue(item.GetName()) + } + + // Add id field if present + if item.GetId() != 0 { + itemStruct.Fields["id"] = structpb.NewNumberValue(float64(item.GetId())) + } + + itemsList.Values = append(itemsList.Values, structpb.NewStructValue(itemStruct)) + } + + // Update the field in the record + recordStruct.Fields[fieldName] = structpb.NewListValue(itemsList) + + return nil +} + +// updateSkillsInStruct updates the skills field in a structpb.Struct with enriched skills. +// This preserves all other fields including schema_version, name, version, etc. +func updateSkillsInStruct(recordStruct *structpb.Struct, enrichedSkills []*typesv1alpha1.Skill) error { + return updateFieldsInStruct(recordStruct, "skills", enrichedSkills) +} + +// updateDomainsInStruct updates the domains field in a structpb.Struct with enriched domains. +// This preserves all other fields including schema_version, name, version, etc. +func updateDomainsInStruct(recordStruct *structpb.Struct, enrichedDomains []*typesv1alpha1.Domain) error { + return updateFieldsInStruct(recordStruct, "domains", enrichedDomains) +} diff --git a/importer/mcp/transformer_test.go b/importer/mcp/transformer_test.go index e907084e9..9c18bb037 100644 --- a/importer/mcp/transformer_test.go +++ b/importer/mcp/transformer_test.go @@ -1,168 +1,168 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package mcp - -import ( - "testing" - - "github.com/agntcy/dir/importer/config" - mcpapiv0 "github.com/modelcontextprotocol/registry/pkg/api/v0" - "github.com/modelcontextprotocol/registry/pkg/model" -) - -//nolint:nestif -func TestTransformer_Transform(t *testing.T) { - // Create transformer with enrichment disabled for testing - cfg := config.Config{ - Enrich: false, - } - - transformer, err := NewTransformer(t.Context(), cfg) - if err != nil { - t.Fatalf("failed to create transformer: %v", err) - } - - tests := []struct { - name string - source interface{} - wantErr bool - errString string - }{ - { - name: "valid server response", - source: mcpapiv0.ServerResponse{ - Server: mcpapiv0.ServerJSON{ - Name: "test-server", - Version: "1.0.0", - Description: "Test server", - }, - }, - wantErr: false, - }, - { - name: "invalid source type - string", - source: "not a server response", - wantErr: true, - errString: "invalid source type", - }, - { - name: "invalid source type - nil", - source: nil, - wantErr: true, - errString: "invalid source type", - }, - { - name: "invalid source type - int", - source: 42, - wantErr: true, - errString: "invalid source type", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - record, err := transformer.Transform(t.Context(), tt.source) - - if tt.wantErr { - if err == nil { - t.Errorf("expected error containing %q, got nil", tt.errString) - } - - if record != nil { - t.Error("expected nil record on error") - } - } else { - if err != nil { - t.Errorf("expected no error, got %v", err) - } - - if record == nil { - t.Error("expected record, got nil") - } - } - }) - } -} - -//nolint:nestif -func TestTransformer_ConvertToOASF(t *testing.T) { - // Create transformer with enrichment disabled for testing - cfg := config.Config{ - Enrich: false, - } - - transformer, err := NewTransformer(t.Context(), cfg) - if err != nil { - t.Fatalf("failed to create transformer: %v", err) - } - - tests := []struct { - name string - response mcpapiv0.ServerResponse - wantErr bool - }{ - { - name: "basic server conversion", - response: mcpapiv0.ServerResponse{ - Server: mcpapiv0.ServerJSON{ - Name: "test-server", - Version: "1.0.0", - Description: "Test server description", - }, - }, - wantErr: false, - }, - { - name: "minimal server", - response: mcpapiv0.ServerResponse{ - Server: mcpapiv0.ServerJSON{ - Name: "minimal", - Version: "0.1.0", - }, - Meta: mcpapiv0.ResponseMeta{ - Official: &mcpapiv0.RegistryExtensions{ - Status: model.StatusActive, - IsLatest: true, - }, - }, - }, - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - record, err := transformer.convertToOASF(t.Context(), tt.response) - if (err != nil) != tt.wantErr { - t.Errorf("convertToOASF() error = %v, wantErr %v", err, tt.wantErr) - - return - } - - if !tt.wantErr { - if record == nil { - t.Error("convertToOASF() returned nil record") - - return - } - - if record.GetData() == nil { - t.Error("convertToOASF() returned record with nil Data") - - return - } - - // Verify basic fields - fields := record.GetData().GetFields() - if fields["name"].GetStringValue() != tt.response.Server.Name { - t.Errorf("name = %v, want %v", fields["name"].GetStringValue(), tt.response.Server.Name) - } - - if fields["version"].GetStringValue() != tt.response.Server.Version { - t.Errorf("version = %v, want %v", fields["version"].GetStringValue(), tt.response.Server.Version) - } - } - }) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package mcp + +import ( + "testing" + + "github.com/agntcy/dir/importer/config" + mcpapiv0 "github.com/modelcontextprotocol/registry/pkg/api/v0" + "github.com/modelcontextprotocol/registry/pkg/model" +) + +//nolint:nestif +func TestTransformer_Transform(t *testing.T) { + // Create transformer with enrichment disabled for testing + cfg := config.Config{ + Enrich: false, + } + + transformer, err := NewTransformer(t.Context(), cfg) + if err != nil { + t.Fatalf("failed to create transformer: %v", err) + } + + tests := []struct { + name string + source interface{} + wantErr bool + errString string + }{ + { + name: "valid server response", + source: mcpapiv0.ServerResponse{ + Server: mcpapiv0.ServerJSON{ + Name: "test-server", + Version: "1.0.0", + Description: "Test server", + }, + }, + wantErr: false, + }, + { + name: "invalid source type - string", + source: "not a server response", + wantErr: true, + errString: "invalid source type", + }, + { + name: "invalid source type - nil", + source: nil, + wantErr: true, + errString: "invalid source type", + }, + { + name: "invalid source type - int", + source: 42, + wantErr: true, + errString: "invalid source type", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + record, err := transformer.Transform(t.Context(), tt.source) + + if tt.wantErr { + if err == nil { + t.Errorf("expected error containing %q, got nil", tt.errString) + } + + if record != nil { + t.Error("expected nil record on error") + } + } else { + if err != nil { + t.Errorf("expected no error, got %v", err) + } + + if record == nil { + t.Error("expected record, got nil") + } + } + }) + } +} + +//nolint:nestif +func TestTransformer_ConvertToOASF(t *testing.T) { + // Create transformer with enrichment disabled for testing + cfg := config.Config{ + Enrich: false, + } + + transformer, err := NewTransformer(t.Context(), cfg) + if err != nil { + t.Fatalf("failed to create transformer: %v", err) + } + + tests := []struct { + name string + response mcpapiv0.ServerResponse + wantErr bool + }{ + { + name: "basic server conversion", + response: mcpapiv0.ServerResponse{ + Server: mcpapiv0.ServerJSON{ + Name: "test-server", + Version: "1.0.0", + Description: "Test server description", + }, + }, + wantErr: false, + }, + { + name: "minimal server", + response: mcpapiv0.ServerResponse{ + Server: mcpapiv0.ServerJSON{ + Name: "minimal", + Version: "0.1.0", + }, + Meta: mcpapiv0.ResponseMeta{ + Official: &mcpapiv0.RegistryExtensions{ + Status: model.StatusActive, + IsLatest: true, + }, + }, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + record, err := transformer.convertToOASF(t.Context(), tt.response) + if (err != nil) != tt.wantErr { + t.Errorf("convertToOASF() error = %v, wantErr %v", err, tt.wantErr) + + return + } + + if !tt.wantErr { + if record == nil { + t.Error("convertToOASF() returned nil record") + + return + } + + if record.GetData() == nil { + t.Error("convertToOASF() returned record with nil Data") + + return + } + + // Verify basic fields + fields := record.GetData().GetFields() + if fields["name"].GetStringValue() != tt.response.Server.Name { + t.Errorf("name = %v, want %v", fields["name"].GetStringValue(), tt.response.Server.Name) + } + + if fields["version"].GetStringValue() != tt.response.Server.Version { + t.Errorf("version = %v, want %v", fields["version"].GetStringValue(), tt.response.Server.Version) + } + } + }) + } +} diff --git a/importer/pipeline/dedup.go b/importer/pipeline/dedup.go index 99b5ebaa1..b6adcebf3 100644 --- a/importer/pipeline/dedup.go +++ b/importer/pipeline/dedup.go @@ -1,250 +1,250 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package pipeline - -import ( - "context" - "fmt" - "os" - "sync" - - corev1 "github.com/agntcy/dir/api/core/v1" - searchv1 "github.com/agntcy/dir/api/search/v1" - "github.com/agntcy/dir/importer/config" - "github.com/agntcy/dir/utils/logging" - mcpapiv0 "github.com/modelcontextprotocol/registry/pkg/api/v0" -) - -var dedupLogger = logging.Logger("importer/pipeline/dedup") - -// MCPDuplicateChecker checks for duplicate MCP records by comparing name@version -// against existing records in the directory. -type MCPDuplicateChecker struct { - client config.ClientInterface - debug bool - existingRecords map[string]string // map[name@version]cid - mu sync.RWMutex -} - -// NewMCPDuplicateChecker creates a new duplicate checker for MCP records. -// It queries the directory for all existing MCP records and builds an in-memory cache. -func NewMCPDuplicateChecker(ctx context.Context, client config.ClientInterface, debug bool) (*MCPDuplicateChecker, error) { - checker := &MCPDuplicateChecker{ - client: client, - debug: debug, - existingRecords: make(map[string]string), - } - - if err := checker.buildCache(ctx); err != nil { - return nil, fmt.Errorf("failed to build duplicate cache: %w", err) - } - - if debug { - fmt.Fprintf(os.Stderr, "[DEDUP] Cache built with %d existing MCP records\n", len(checker.existingRecords)) - os.Stderr.Sync() - } - - return checker, nil -} - -// buildCache queries the directory for all records with integration/mcp or runtime/mcp modules -// and builds an in-memory cache of name@version combinations using pagination. -// This ensures we don't reimport records that exist under the old runtime/mcp module name. -// -//nolint:gocognit,cyclop // Complexity is acceptable for building cache from multiple modules -func (c *MCPDuplicateChecker) buildCache(ctx context.Context) error { - const ( - batchSize = 1000 // Process 1000 records at a time - maxRecords = 50000 // Safety limit to prevent unbounded memory growth - ) - - // Search for both integration/mcp (new) and runtime/mcp (old) modules - modules := []string{"integration/mcp", "runtime/mcp"} - - totalProcessed := 0 - - for _, module := range modules { - offset := uint32(0) - - for { - // Search for records with this module with pagination - limit := uint32(batchSize) - searchReq := &searchv1.SearchCIDsRequest{ - Queries: []*searchv1.RecordQuery{ - { - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE_NAME, - Value: module, - }, - }, - Limit: &limit, - Offset: &offset, - } - - result, err := c.client.SearchCIDs(ctx, searchReq) - if err != nil { - return fmt.Errorf("search for existing %s records failed: %w", module, err) - } - - // Collect CIDs from this batch - cids := make([]string, 0, batchSize) - - L: - for { - select { - case resp := <-result.ResCh(): - cid := resp.GetRecordCid() - if cid != "" { - cids = append(cids, cid) - } - case err := <-result.ErrCh(): - return fmt.Errorf("search stream error for %s: %w", module, err) - case <-result.DoneCh(): - break L - case <-ctx.Done(): - return fmt.Errorf("context cancelled: %w", ctx.Err()) - } - } - - // No more results for this module - if len(cids) == 0 { - break - } - - // Convert CIDs to RecordRefs - refs := make([]*corev1.RecordRef, 0, len(cids)) - for _, cid := range cids { - refs = append(refs, &corev1.RecordRef{Cid: cid}) - } - - // Batch pull records from this batch - records, err := c.client.PullBatch(ctx, refs) - if err != nil { - return fmt.Errorf("failed to pull existing %s records: %w", module, err) - } - - // Build the cache: name@version -> cid - c.mu.Lock() - - for _, record := range records { - nameVersion, err := extractNameVersion(record) - if err != nil { - continue - } - - c.existingRecords[nameVersion] = record.GetCid() - } - - c.mu.Unlock() - - totalProcessed += len(cids) - - // Debug logging for batch progress - if c.debug { - fmt.Fprintf(os.Stderr, "[DEDUP] Processed %s batch: %d records (total: %d)\n", module, len(cids), totalProcessed) - os.Stderr.Sync() - } - - // Safety check: prevent unbounded memory growth - if totalProcessed >= maxRecords { - dedupLogger.Warn("Deduplication cache limit reached", - "max_records", maxRecords, - "message", "Some existing records may not be cached. Consider using --force to reimport.") - - return nil - } - - // If we got fewer results than requested, we've reached the end - if len(cids) < batchSize { - break - } - - // Move to next batch - offset += uint32(batchSize) - } - } - - return nil -} - -// FilterDuplicates implements the DuplicateChecker interface. -// It filters out duplicate records from the input channel and returns a channel -// with only non-duplicate records. It tracks only the skipped (duplicate) count. -// The transform stage will track the total records that are actually processed. -func (c *MCPDuplicateChecker) FilterDuplicates(ctx context.Context, inputCh <-chan interface{}, result *Result) <-chan interface{} { - outputCh := make(chan interface{}) - - go func() { - defer close(outputCh) - - for { - select { - case <-ctx.Done(): - return - case source, ok := <-inputCh: - if !ok { - return - } - - // Check if duplicate - if c.isDuplicate(source) { - result.mu.Lock() - result.TotalRecords++ - result.SkippedCount++ - result.mu.Unlock() - - continue - } - - // Not a duplicate - pass it through (transform stage will count it) - select { - case outputCh <- source: - case <-ctx.Done(): - return - } - } - } - }() - - return outputCh -} - -// isDuplicate checks if a source record (MCP ServerResponse) is a duplicate. -func (c *MCPDuplicateChecker) isDuplicate(source interface{}) bool { - // Try to extract name@version from the MCP source - nameVersion := extractNameVersionFromSource(source) - if nameVersion == "" { - // Can't determine - not a duplicate (will be processed) - return false - } - - // Check if record already exists - c.mu.RLock() - _, exists := c.existingRecords[nameVersion] - c.mu.RUnlock() - - if exists && c.debug { - fmt.Fprintf(os.Stderr, "[DEDUP] %s is a duplicate\n", nameVersion) - os.Stderr.Sync() - } - - return exists -} - -// extractNameVersionFromSource extracts "name@version" from a raw MCP source. -// This avoids the need to transform the record just to check for duplicates. -func extractNameVersionFromSource(source interface{}) string { - // Try to convert to MCP ServerResponse - switch s := source.(type) { - case mcpapiv0.ServerResponse: - if s.Server.Name != "" && s.Server.Version != "" { - return fmt.Sprintf("%s@%s", s.Server.Name, s.Server.Version) - } - case *mcpapiv0.ServerResponse: - if s != nil && s.Server.Name != "" && s.Server.Version != "" { - return fmt.Sprintf("%s@%s", s.Server.Name, s.Server.Version) - } - } - - return "" -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package pipeline + +import ( + "context" + "fmt" + "os" + "sync" + + corev1 "github.com/agntcy/dir/api/core/v1" + searchv1 "github.com/agntcy/dir/api/search/v1" + "github.com/agntcy/dir/importer/config" + "github.com/agntcy/dir/utils/logging" + mcpapiv0 "github.com/modelcontextprotocol/registry/pkg/api/v0" +) + +var dedupLogger = logging.Logger("importer/pipeline/dedup") + +// MCPDuplicateChecker checks for duplicate MCP records by comparing name@version +// against existing records in the directory. +type MCPDuplicateChecker struct { + client config.ClientInterface + debug bool + existingRecords map[string]string // map[name@version]cid + mu sync.RWMutex +} + +// NewMCPDuplicateChecker creates a new duplicate checker for MCP records. +// It queries the directory for all existing MCP records and builds an in-memory cache. +func NewMCPDuplicateChecker(ctx context.Context, client config.ClientInterface, debug bool) (*MCPDuplicateChecker, error) { + checker := &MCPDuplicateChecker{ + client: client, + debug: debug, + existingRecords: make(map[string]string), + } + + if err := checker.buildCache(ctx); err != nil { + return nil, fmt.Errorf("failed to build duplicate cache: %w", err) + } + + if debug { + fmt.Fprintf(os.Stderr, "[DEDUP] Cache built with %d existing MCP records\n", len(checker.existingRecords)) + os.Stderr.Sync() + } + + return checker, nil +} + +// buildCache queries the directory for all records with integration/mcp or runtime/mcp modules +// and builds an in-memory cache of name@version combinations using pagination. +// This ensures we don't reimport records that exist under the old runtime/mcp module name. +// +//nolint:gocognit,cyclop // Complexity is acceptable for building cache from multiple modules +func (c *MCPDuplicateChecker) buildCache(ctx context.Context) error { + const ( + batchSize = 1000 // Process 1000 records at a time + maxRecords = 50000 // Safety limit to prevent unbounded memory growth + ) + + // Search for both integration/mcp (new) and runtime/mcp (old) modules + modules := []string{"integration/mcp", "runtime/mcp"} + + totalProcessed := 0 + + for _, module := range modules { + offset := uint32(0) + + for { + // Search for records with this module with pagination + limit := uint32(batchSize) + searchReq := &searchv1.SearchCIDsRequest{ + Queries: []*searchv1.RecordQuery{ + { + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE_NAME, + Value: module, + }, + }, + Limit: &limit, + Offset: &offset, + } + + result, err := c.client.SearchCIDs(ctx, searchReq) + if err != nil { + return fmt.Errorf("search for existing %s records failed: %w", module, err) + } + + // Collect CIDs from this batch + cids := make([]string, 0, batchSize) + + L: + for { + select { + case resp := <-result.ResCh(): + cid := resp.GetRecordCid() + if cid != "" { + cids = append(cids, cid) + } + case err := <-result.ErrCh(): + return fmt.Errorf("search stream error for %s: %w", module, err) + case <-result.DoneCh(): + break L + case <-ctx.Done(): + return fmt.Errorf("context cancelled: %w", ctx.Err()) + } + } + + // No more results for this module + if len(cids) == 0 { + break + } + + // Convert CIDs to RecordRefs + refs := make([]*corev1.RecordRef, 0, len(cids)) + for _, cid := range cids { + refs = append(refs, &corev1.RecordRef{Cid: cid}) + } + + // Batch pull records from this batch + records, err := c.client.PullBatch(ctx, refs) + if err != nil { + return fmt.Errorf("failed to pull existing %s records: %w", module, err) + } + + // Build the cache: name@version -> cid + c.mu.Lock() + + for _, record := range records { + nameVersion, err := extractNameVersion(record) + if err != nil { + continue + } + + c.existingRecords[nameVersion] = record.GetCid() + } + + c.mu.Unlock() + + totalProcessed += len(cids) + + // Debug logging for batch progress + if c.debug { + fmt.Fprintf(os.Stderr, "[DEDUP] Processed %s batch: %d records (total: %d)\n", module, len(cids), totalProcessed) + os.Stderr.Sync() + } + + // Safety check: prevent unbounded memory growth + if totalProcessed >= maxRecords { + dedupLogger.Warn("Deduplication cache limit reached", + "max_records", maxRecords, + "message", "Some existing records may not be cached. Consider using --force to reimport.") + + return nil + } + + // If we got fewer results than requested, we've reached the end + if len(cids) < batchSize { + break + } + + // Move to next batch + offset += uint32(batchSize) + } + } + + return nil +} + +// FilterDuplicates implements the DuplicateChecker interface. +// It filters out duplicate records from the input channel and returns a channel +// with only non-duplicate records. It tracks only the skipped (duplicate) count. +// The transform stage will track the total records that are actually processed. +func (c *MCPDuplicateChecker) FilterDuplicates(ctx context.Context, inputCh <-chan interface{}, result *Result) <-chan interface{} { + outputCh := make(chan interface{}) + + go func() { + defer close(outputCh) + + for { + select { + case <-ctx.Done(): + return + case source, ok := <-inputCh: + if !ok { + return + } + + // Check if duplicate + if c.isDuplicate(source) { + result.mu.Lock() + result.TotalRecords++ + result.SkippedCount++ + result.mu.Unlock() + + continue + } + + // Not a duplicate - pass it through (transform stage will count it) + select { + case outputCh <- source: + case <-ctx.Done(): + return + } + } + } + }() + + return outputCh +} + +// isDuplicate checks if a source record (MCP ServerResponse) is a duplicate. +func (c *MCPDuplicateChecker) isDuplicate(source interface{}) bool { + // Try to extract name@version from the MCP source + nameVersion := extractNameVersionFromSource(source) + if nameVersion == "" { + // Can't determine - not a duplicate (will be processed) + return false + } + + // Check if record already exists + c.mu.RLock() + _, exists := c.existingRecords[nameVersion] + c.mu.RUnlock() + + if exists && c.debug { + fmt.Fprintf(os.Stderr, "[DEDUP] %s is a duplicate\n", nameVersion) + os.Stderr.Sync() + } + + return exists +} + +// extractNameVersionFromSource extracts "name@version" from a raw MCP source. +// This avoids the need to transform the record just to check for duplicates. +func extractNameVersionFromSource(source interface{}) string { + // Try to convert to MCP ServerResponse + switch s := source.(type) { + case mcpapiv0.ServerResponse: + if s.Server.Name != "" && s.Server.Version != "" { + return fmt.Sprintf("%s@%s", s.Server.Name, s.Server.Version) + } + case *mcpapiv0.ServerResponse: + if s != nil && s.Server.Name != "" && s.Server.Version != "" { + return fmt.Sprintf("%s@%s", s.Server.Name, s.Server.Version) + } + } + + return "" +} diff --git a/importer/pipeline/pipeline.go b/importer/pipeline/pipeline.go index d98341cde..ae110a4ef 100644 --- a/importer/pipeline/pipeline.go +++ b/importer/pipeline/pipeline.go @@ -1,330 +1,330 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package pipeline - -import ( - "context" - "fmt" - "sync" - - corev1 "github.com/agntcy/dir/api/core/v1" -) - -// Fetcher is an interface for fetching records from an external source. -// Each importer implements this interface to fetch data from their specific registry. -type Fetcher interface { - // Fetch retrieves records from the external source and sends them to the output channel. - // It should close the output channel when done and send any errors to the error channel. - Fetch(ctx context.Context) (<-chan interface{}, <-chan error) -} - -// Transformer is an interface for transforming records from one format to another. -// For example, converting MCP servers to OASF format. -type Transformer interface { - // Transform converts a source record to a target format. - Transform(ctx context.Context, source interface{}) (*corev1.Record, error) -} - -// Pusher is an interface for pushing records to the destination (DIR). -type Pusher interface { - // Push pushes records to the destination and returns the result channel and error channel. - Push(ctx context.Context, inputCh <-chan *corev1.Record) (<-chan *corev1.RecordRef, <-chan error) -} - -// DuplicateChecker is an interface for checking and filtering duplicate records. -// This allows filtering duplicates before transformation/enrichment. -type DuplicateChecker interface { - // FilterDuplicates filters out duplicate records from the input channel. - // It tracks total and skipped counts in the provided result. - // Returns a channel with only non-duplicate records. - FilterDuplicates(ctx context.Context, inputCh <-chan interface{}, result *Result) <-chan interface{} -} - -// Config contains configuration for the pipeline. -type Config struct { - // TransformerWorkers is the number of concurrent workers for the transformer stage. - TransformerWorkers int -} - -// Result contains the results of the pipeline execution. -type Result struct { - TotalRecords int - ImportedCount int - SkippedCount int - FailedCount int - Errors []error - mu sync.Mutex -} - -// Pipeline represents a three-stage data processing pipeline. -type Pipeline struct { - fetcher Fetcher - duplicateChecker DuplicateChecker - transformer Transformer - pusher Pusher - config Config -} - -// New creates a new pipeline instance. -// If duplicateChecker is nil, no duplicate filtering will be performed before transformation. -func New(fetcher Fetcher, duplicateChecker DuplicateChecker, transformer Transformer, pusher Pusher, config Config) *Pipeline { - // Set defaults - if config.TransformerWorkers <= 0 { - config.TransformerWorkers = 5 - } - - return &Pipeline{ - fetcher: fetcher, - duplicateChecker: duplicateChecker, - transformer: transformer, - pusher: pusher, - config: config, - } -} - -// Run executes the full pipeline with four stages. -func (p *Pipeline) Run(ctx context.Context) (*Result, error) { - result := &Result{} - - // Stage 1: Fetch records - fetchedCh, fetchErrCh := p.fetcher.Fetch(ctx) - - // Stage 2: Filter duplicates (optional - only if duplicate checker is available) - var filteredCh <-chan interface{} - - if p.duplicateChecker != nil { - filteredCh = p.duplicateChecker.FilterDuplicates(ctx, fetchedCh, result) - } else { - filteredCh = fetchedCh - } - - // Stage 3: Transform records (non-duplicates) - transformedCh, transformErrCh := runTransformStage(ctx, p.transformer, p.config.TransformerWorkers, filteredCh, result) - - // Stage 4: Push records - refCh, pushErrCh := p.pusher.Push(ctx, transformedCh) - - // Collect errors from all stages - var wg sync.WaitGroup - - // Fetch errors, transform errors, push errors, and ref counting - wg.Add(4) //nolint:mnd - - // Collect fetch errors - go func() { - defer wg.Done() - - for err := range fetchErrCh { - if err != nil { - result.mu.Lock() - result.Errors = append(result.Errors, fmt.Errorf("fetch error: %w", err)) - result.mu.Unlock() - } - } - }() - - // Collect transform errors - go func() { - defer wg.Done() - - for err := range transformErrCh { - if err != nil { - result.mu.Lock() - result.Errors = append(result.Errors, err) - result.mu.Unlock() - } - } - }() - - // Track successful pushes - go func() { - defer wg.Done() - - for ref := range refCh { - if ref != nil && ref.GetCid() != "" { - // Valid CID - record successfully imported - result.mu.Lock() - result.ImportedCount++ - result.mu.Unlock() - } - } - }() - - // Track push errors - go func() { - defer wg.Done() - - for err := range pushErrCh { - if err != nil { - result.mu.Lock() - result.FailedCount++ - result.Errors = append(result.Errors, err) - result.mu.Unlock() - } - } - }() - - wg.Wait() - - return result, nil -} - -// DryRunPipeline represents a two-stage pipeline for dry-run mode (fetch and transform only). -type DryRunPipeline struct { - fetcher Fetcher - duplicateChecker DuplicateChecker // Optional: provides accurate preview of what would be skipped - transformer Transformer - config Config -} - -// NewDryRun creates a new dry-run pipeline instance that only fetches and transforms. -// If duplicateChecker is provided, it will filter duplicates for an accurate preview. -func NewDryRun(fetcher Fetcher, duplicateChecker DuplicateChecker, transformer Transformer, config Config) *DryRunPipeline { - // Set defaults - if config.TransformerWorkers <= 0 { - config.TransformerWorkers = 5 - } - - return &DryRunPipeline{ - fetcher: fetcher, - duplicateChecker: duplicateChecker, - transformer: transformer, - config: config, - } -} - -// Run executes the dry-run pipeline with only fetch and transform stages. -func (p *DryRunPipeline) Run(ctx context.Context) (*Result, error) { - result := &Result{} - - // Stage 1: Fetch records - fetchedCh, fetchErrCh := p.fetcher.Fetch(ctx) - - // Stage 2: Filter duplicates (optional - provides accurate preview) - var filteredCh <-chan interface{} - - if p.duplicateChecker != nil { - // Duplicate checker will filter and track skipped records for accurate preview - filteredCh = p.duplicateChecker.FilterDuplicates(ctx, fetchedCh, result) - } else { - // No duplicate checker - pass through directly - filteredCh = fetchedCh - } - - // Stage 3: Transform records - // Transform stage always tracks all records it processes - transformedCh, transformErrCh := runTransformStage(ctx, p.transformer, p.config.TransformerWorkers, filteredCh, result) - - // Drain the transformed channel to prevent blocking - go func() { - for range transformedCh { - // Just drain, records are counted but not pushed - } - }() - - // Collect errors from fetch and transform stages - var wg sync.WaitGroup - - // Fetch errors and transform errors - wg.Add(2) //nolint:mnd - - // Collect fetch errors - go func() { - defer wg.Done() - - for err := range fetchErrCh { - if err != nil { - result.mu.Lock() - result.Errors = append(result.Errors, fmt.Errorf("fetch error: %w", err)) - result.mu.Unlock() - } - } - }() - - // Collect transform errors - go func() { - defer wg.Done() - - for err := range transformErrCh { - if err != nil { - result.mu.Lock() - result.Errors = append(result.Errors, err) - result.mu.Unlock() - } - } - }() - - wg.Wait() - - return result, nil -} - -// runTransformStage runs the transformation stage with concurrent workers. -// This is a shared function used by both Pipeline and DryRunPipeline. -// It always tracks the total records it processes (non-duplicates after filtering). -// -//nolint:gocognit // Complexity is acceptable for concurrent pipeline stage -func runTransformStage(ctx context.Context, transformer Transformer, numWorkers int, inputCh <-chan interface{}, result *Result) (<-chan *corev1.Record, <-chan error) { - outputCh := make(chan *corev1.Record) - errCh := make(chan error) - - var wg sync.WaitGroup - - // Start transformer workers - for range numWorkers { - wg.Add(1) - - go func() { - defer wg.Done() - - for { - select { - case <-ctx.Done(): - return - case source, ok := <-inputCh: - if !ok { - return - } - - // Track total records processed by this stage - result.mu.Lock() - result.TotalRecords++ - result.mu.Unlock() - - // Transform the record - record, err := transformer.Transform(ctx, source) - if err != nil { - result.mu.Lock() - result.FailedCount++ - result.mu.Unlock() - - select { - case errCh <- fmt.Errorf("transform error: %w", err): - case <-ctx.Done(): - return - } - - continue - } - - // Send transformed record to output channel - select { - case outputCh <- record: - case <-ctx.Done(): - return - } - } - } - }() - } - - // Close output channel when all workers are done - go func() { - wg.Wait() - close(outputCh) - close(errCh) - }() - - return outputCh, errCh -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package pipeline + +import ( + "context" + "fmt" + "sync" + + corev1 "github.com/agntcy/dir/api/core/v1" +) + +// Fetcher is an interface for fetching records from an external source. +// Each importer implements this interface to fetch data from their specific registry. +type Fetcher interface { + // Fetch retrieves records from the external source and sends them to the output channel. + // It should close the output channel when done and send any errors to the error channel. + Fetch(ctx context.Context) (<-chan interface{}, <-chan error) +} + +// Transformer is an interface for transforming records from one format to another. +// For example, converting MCP servers to OASF format. +type Transformer interface { + // Transform converts a source record to a target format. + Transform(ctx context.Context, source interface{}) (*corev1.Record, error) +} + +// Pusher is an interface for pushing records to the destination (DIR). +type Pusher interface { + // Push pushes records to the destination and returns the result channel and error channel. + Push(ctx context.Context, inputCh <-chan *corev1.Record) (<-chan *corev1.RecordRef, <-chan error) +} + +// DuplicateChecker is an interface for checking and filtering duplicate records. +// This allows filtering duplicates before transformation/enrichment. +type DuplicateChecker interface { + // FilterDuplicates filters out duplicate records from the input channel. + // It tracks total and skipped counts in the provided result. + // Returns a channel with only non-duplicate records. + FilterDuplicates(ctx context.Context, inputCh <-chan interface{}, result *Result) <-chan interface{} +} + +// Config contains configuration for the pipeline. +type Config struct { + // TransformerWorkers is the number of concurrent workers for the transformer stage. + TransformerWorkers int +} + +// Result contains the results of the pipeline execution. +type Result struct { + TotalRecords int + ImportedCount int + SkippedCount int + FailedCount int + Errors []error + mu sync.Mutex +} + +// Pipeline represents a three-stage data processing pipeline. +type Pipeline struct { + fetcher Fetcher + duplicateChecker DuplicateChecker + transformer Transformer + pusher Pusher + config Config +} + +// New creates a new pipeline instance. +// If duplicateChecker is nil, no duplicate filtering will be performed before transformation. +func New(fetcher Fetcher, duplicateChecker DuplicateChecker, transformer Transformer, pusher Pusher, config Config) *Pipeline { + // Set defaults + if config.TransformerWorkers <= 0 { + config.TransformerWorkers = 5 + } + + return &Pipeline{ + fetcher: fetcher, + duplicateChecker: duplicateChecker, + transformer: transformer, + pusher: pusher, + config: config, + } +} + +// Run executes the full pipeline with four stages. +func (p *Pipeline) Run(ctx context.Context) (*Result, error) { + result := &Result{} + + // Stage 1: Fetch records + fetchedCh, fetchErrCh := p.fetcher.Fetch(ctx) + + // Stage 2: Filter duplicates (optional - only if duplicate checker is available) + var filteredCh <-chan interface{} + + if p.duplicateChecker != nil { + filteredCh = p.duplicateChecker.FilterDuplicates(ctx, fetchedCh, result) + } else { + filteredCh = fetchedCh + } + + // Stage 3: Transform records (non-duplicates) + transformedCh, transformErrCh := runTransformStage(ctx, p.transformer, p.config.TransformerWorkers, filteredCh, result) + + // Stage 4: Push records + refCh, pushErrCh := p.pusher.Push(ctx, transformedCh) + + // Collect errors from all stages + var wg sync.WaitGroup + + // Fetch errors, transform errors, push errors, and ref counting + wg.Add(4) //nolint:mnd + + // Collect fetch errors + go func() { + defer wg.Done() + + for err := range fetchErrCh { + if err != nil { + result.mu.Lock() + result.Errors = append(result.Errors, fmt.Errorf("fetch error: %w", err)) + result.mu.Unlock() + } + } + }() + + // Collect transform errors + go func() { + defer wg.Done() + + for err := range transformErrCh { + if err != nil { + result.mu.Lock() + result.Errors = append(result.Errors, err) + result.mu.Unlock() + } + } + }() + + // Track successful pushes + go func() { + defer wg.Done() + + for ref := range refCh { + if ref != nil && ref.GetCid() != "" { + // Valid CID - record successfully imported + result.mu.Lock() + result.ImportedCount++ + result.mu.Unlock() + } + } + }() + + // Track push errors + go func() { + defer wg.Done() + + for err := range pushErrCh { + if err != nil { + result.mu.Lock() + result.FailedCount++ + result.Errors = append(result.Errors, err) + result.mu.Unlock() + } + } + }() + + wg.Wait() + + return result, nil +} + +// DryRunPipeline represents a two-stage pipeline for dry-run mode (fetch and transform only). +type DryRunPipeline struct { + fetcher Fetcher + duplicateChecker DuplicateChecker // Optional: provides accurate preview of what would be skipped + transformer Transformer + config Config +} + +// NewDryRun creates a new dry-run pipeline instance that only fetches and transforms. +// If duplicateChecker is provided, it will filter duplicates for an accurate preview. +func NewDryRun(fetcher Fetcher, duplicateChecker DuplicateChecker, transformer Transformer, config Config) *DryRunPipeline { + // Set defaults + if config.TransformerWorkers <= 0 { + config.TransformerWorkers = 5 + } + + return &DryRunPipeline{ + fetcher: fetcher, + duplicateChecker: duplicateChecker, + transformer: transformer, + config: config, + } +} + +// Run executes the dry-run pipeline with only fetch and transform stages. +func (p *DryRunPipeline) Run(ctx context.Context) (*Result, error) { + result := &Result{} + + // Stage 1: Fetch records + fetchedCh, fetchErrCh := p.fetcher.Fetch(ctx) + + // Stage 2: Filter duplicates (optional - provides accurate preview) + var filteredCh <-chan interface{} + + if p.duplicateChecker != nil { + // Duplicate checker will filter and track skipped records for accurate preview + filteredCh = p.duplicateChecker.FilterDuplicates(ctx, fetchedCh, result) + } else { + // No duplicate checker - pass through directly + filteredCh = fetchedCh + } + + // Stage 3: Transform records + // Transform stage always tracks all records it processes + transformedCh, transformErrCh := runTransformStage(ctx, p.transformer, p.config.TransformerWorkers, filteredCh, result) + + // Drain the transformed channel to prevent blocking + go func() { + for range transformedCh { + // Just drain, records are counted but not pushed + } + }() + + // Collect errors from fetch and transform stages + var wg sync.WaitGroup + + // Fetch errors and transform errors + wg.Add(2) //nolint:mnd + + // Collect fetch errors + go func() { + defer wg.Done() + + for err := range fetchErrCh { + if err != nil { + result.mu.Lock() + result.Errors = append(result.Errors, fmt.Errorf("fetch error: %w", err)) + result.mu.Unlock() + } + } + }() + + // Collect transform errors + go func() { + defer wg.Done() + + for err := range transformErrCh { + if err != nil { + result.mu.Lock() + result.Errors = append(result.Errors, err) + result.mu.Unlock() + } + } + }() + + wg.Wait() + + return result, nil +} + +// runTransformStage runs the transformation stage with concurrent workers. +// This is a shared function used by both Pipeline and DryRunPipeline. +// It always tracks the total records it processes (non-duplicates after filtering). +// +//nolint:gocognit // Complexity is acceptable for concurrent pipeline stage +func runTransformStage(ctx context.Context, transformer Transformer, numWorkers int, inputCh <-chan interface{}, result *Result) (<-chan *corev1.Record, <-chan error) { + outputCh := make(chan *corev1.Record) + errCh := make(chan error) + + var wg sync.WaitGroup + + // Start transformer workers + for range numWorkers { + wg.Add(1) + + go func() { + defer wg.Done() + + for { + select { + case <-ctx.Done(): + return + case source, ok := <-inputCh: + if !ok { + return + } + + // Track total records processed by this stage + result.mu.Lock() + result.TotalRecords++ + result.mu.Unlock() + + // Transform the record + record, err := transformer.Transform(ctx, source) + if err != nil { + result.mu.Lock() + result.FailedCount++ + result.mu.Unlock() + + select { + case errCh <- fmt.Errorf("transform error: %w", err): + case <-ctx.Done(): + return + } + + continue + } + + // Send transformed record to output channel + select { + case outputCh <- record: + case <-ctx.Done(): + return + } + } + } + }() + } + + // Close output channel when all workers are done + go func() { + wg.Wait() + close(outputCh) + close(errCh) + }() + + return outputCh, errCh +} diff --git a/importer/pipeline/pipeline_test.go b/importer/pipeline/pipeline_test.go index a6cfffae2..2a25aaac7 100644 --- a/importer/pipeline/pipeline_test.go +++ b/importer/pipeline/pipeline_test.go @@ -1,458 +1,458 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package pipeline - -import ( - "context" - "errors" - "testing" - - corev1 "github.com/agntcy/dir/api/core/v1" -) - -// mockFetcher is a mock implementation of Fetcher for testing. -type mockFetcher struct { - items []interface{} - err error -} - -func (m *mockFetcher) Fetch(ctx context.Context) (<-chan interface{}, <-chan error) { - dataCh := make(chan interface{}) - errCh := make(chan error, 1) - - go func() { - defer close(dataCh) - defer close(errCh) - - if m.err != nil { - errCh <- m.err - - return - } - - for _, item := range m.items { - select { - case dataCh <- item: - case <-ctx.Done(): - return - } - } - }() - - return dataCh, errCh -} - -// mockTransformer is a mock implementation of Transformer for testing. -type mockTransformer struct { - shouldFail bool -} - -func (m *mockTransformer) Transform(ctx context.Context, source interface{}) (*corev1.Record, error) { - if m.shouldFail { - return nil, errors.New("transform failed") - } - - // Create a simple record - return &corev1.Record{}, nil -} - -// mockPusher is a mock implementation of Pusher for testing. -type mockPusher struct { - shouldFail bool - pushed []*corev1.Record -} - -func (m *mockPusher) Push(ctx context.Context, inputCh <-chan *corev1.Record) (<-chan *corev1.RecordRef, <-chan error) { - refCh := make(chan *corev1.RecordRef) - errCh := make(chan error) - - go func() { - defer close(refCh) - defer close(errCh) - - // Consume all records from the input channel - for record := range inputCh { - m.pushed = append(m.pushed, record) - - if m.shouldFail { - select { - case errCh <- errors.New("push failed"): - case <-ctx.Done(): - return - } - } else { - // Send success response with a valid CID - select { - case refCh <- &corev1.RecordRef{Cid: "bafytest123"}: - case <-ctx.Done(): - return - } - } - } - }() - - return refCh, errCh -} - -// mockDuplicateChecker is a mock implementation of DuplicateChecker for testing. -type mockDuplicateChecker struct { - duplicates map[string]bool // items to mark as duplicates -} - -func (m *mockDuplicateChecker) FilterDuplicates(ctx context.Context, inputCh <-chan interface{}, result *Result) <-chan interface{} { - outputCh := make(chan interface{}) - - go func() { - defer close(outputCh) - - for { - select { - case <-ctx.Done(): - return - case source, ok := <-inputCh: - if !ok { - return - } - - // Check if this item is marked as duplicate - itemStr, ok := source.(string) - if ok && m.duplicates[itemStr] { - // Mark as duplicate - increment both total and skipped - result.mu.Lock() - result.TotalRecords++ - result.SkippedCount++ - result.mu.Unlock() - - continue - } - - // Not a duplicate - pass through (transform stage will count it) - select { - case outputCh <- source: - case <-ctx.Done(): - return - } - } - } - }() - - return outputCh -} - -func TestPipeline_Run_Success(t *testing.T) { - ctx := context.Background() - - // Create mock stages - fetcher := &mockFetcher{ - items: []interface{}{"item1", "item2", "item3"}, - } - transformer := &mockTransformer{} - pusher := &mockPusher{} - - // Create pipeline - config := Config{ - TransformerWorkers: 2, - } - p := New(fetcher, nil, transformer, pusher, config) - - // Run pipeline - result, err := p.Run(ctx) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Verify results - if result.TotalRecords != 3 { - t.Errorf("expected 3 total records, got %d", result.TotalRecords) - } - - if result.ImportedCount != 3 { - t.Errorf("expected 3 imported records, got %d", result.ImportedCount) - } - - if result.FailedCount != 0 { - t.Errorf("expected 0 failed records, got %d", result.FailedCount) - } - - if len(pusher.pushed) != 3 { - t.Errorf("expected 3 pushed records, got %d", len(pusher.pushed)) - } -} - -func TestDryRunPipeline_Run(t *testing.T) { - ctx := context.Background() - - // Create mock stages - fetcher := &mockFetcher{ - items: []interface{}{"item1", "item2"}, - } - transformer := &mockTransformer{} - - // Create dry-run pipeline (no pusher, no duplicate checker) - config := Config{ - TransformerWorkers: 2, - } - p := NewDryRun(fetcher, nil, transformer, config) - - // Run pipeline - result, err := p.Run(ctx) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Verify results - records should be counted but not pushed - if result.TotalRecords != 2 { - t.Errorf("expected 2 total records, got %d", result.TotalRecords) - } - - if result.ImportedCount != 0 { - t.Errorf("expected 0 imported records (dry-run), got %d", result.ImportedCount) - } -} - -func TestDryRunPipeline_Run_WithDuplicateChecker(t *testing.T) { - ctx := context.Background() - - // Create mock stages - fetcher := &mockFetcher{ - items: []interface{}{"item1", "item2", "item3", "item4"}, - } - transformer := &mockTransformer{} - - // Create duplicate checker that marks item2 and item4 as duplicates - duplicateChecker := &mockDuplicateChecker{ - duplicates: map[string]bool{ - "item2": true, - "item4": true, - }, - } - - // Create dry-run pipeline with duplicate checker for accurate preview - config := Config{ - TransformerWorkers: 2, - } - p := NewDryRun(fetcher, duplicateChecker, transformer, config) - - // Run pipeline - result, err := p.Run(ctx) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Verify results - should show accurate preview with duplicates filtered - // Total: 4 items - // Skipped: 2 duplicates (item2, item4) - // Processed: 2 items (item1, item3) - // Imported: 0 (dry-run doesn't actually import) - if result.TotalRecords != 4 { - t.Errorf("expected 4 total records, got %d", result.TotalRecords) - } - - if result.SkippedCount != 2 { - t.Errorf("expected 2 skipped records (duplicates), got %d", result.SkippedCount) - } - - if result.ImportedCount != 0 { - t.Errorf("expected 0 imported records (dry-run), got %d", result.ImportedCount) - } - - if result.FailedCount != 0 { - t.Errorf("expected 0 failed records, got %d", result.FailedCount) - } - - // Verify the math: TotalRecords = SkippedCount + (records that would be processed) - // In dry-run: processed records aren't imported, they're just validated - expectedTotal := result.SkippedCount + result.ImportedCount + result.FailedCount + 2 // 2 would be processed (item1, item3) - if result.TotalRecords != expectedTotal { - t.Logf("In dry-run: total=%d, skipped=%d, would process=%d", - result.TotalRecords, result.SkippedCount, result.TotalRecords-result.SkippedCount) - } -} - -func TestPipeline_Run_TransformError(t *testing.T) { - ctx := context.Background() - - // Create mock stages with transformer that fails - fetcher := &mockFetcher{ - items: []interface{}{"item1", "item2"}, - } - transformer := &mockTransformer{shouldFail: true} - pusher := &mockPusher{} - - // Create pipeline - config := Config{ - TransformerWorkers: 2, - } - p := New(fetcher, nil, transformer, pusher, config) - - // Run pipeline - result, err := p.Run(ctx) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Verify results - all should fail transformation - if result.TotalRecords != 2 { - t.Errorf("expected 2 total records, got %d", result.TotalRecords) - } - - if result.FailedCount != 2 { - t.Errorf("expected 2 failed records, got %d", result.FailedCount) - } - - if result.ImportedCount != 0 { - t.Errorf("expected 0 imported records, got %d", result.ImportedCount) - } - - if len(result.Errors) != 2 { - t.Errorf("expected 2 errors, got %d", len(result.Errors)) - } -} - -func TestPipeline_Run_PushError(t *testing.T) { - ctx := context.Background() - - // Create mock stages with pusher that fails - fetcher := &mockFetcher{ - items: []interface{}{"item1", "item2"}, - } - transformer := &mockTransformer{} - pusher := &mockPusher{shouldFail: true} - - // Create pipeline - config := Config{ - TransformerWorkers: 2, - } - p := New(fetcher, nil, transformer, pusher, config) - - // Run pipeline - result, err := p.Run(ctx) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Verify results - all should fail push - if result.TotalRecords != 2 { - t.Errorf("expected 2 total records, got %d", result.TotalRecords) - } - - if result.FailedCount != 2 { - t.Errorf("expected 2 failed records, got %d", result.FailedCount) - } - - if result.ImportedCount != 0 { - t.Errorf("expected 0 imported records, got %d", result.ImportedCount) - } - - if len(result.Errors) != 2 { - t.Errorf("expected 2 errors, got %d", len(result.Errors)) - } -} - -func TestPipeline_Run_FetchError(t *testing.T) { - ctx := context.Background() - - // Create mock stages with fetcher that fails - fetcher := &mockFetcher{ - err: errors.New("fetch failed"), - } - transformer := &mockTransformer{} - pusher := &mockPusher{} - - // Create pipeline - config := Config{ - TransformerWorkers: 2, - } - p := New(fetcher, nil, transformer, pusher, config) - - // Run pipeline - result, err := p.Run(ctx) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Verify results - should have fetch error - if len(result.Errors) == 0 { - t.Error("expected at least one error") - } -} - -func TestPipeline_ConfigDefaults(t *testing.T) { - fetcher := &mockFetcher{} - transformer := &mockTransformer{} - pusher := &mockPusher{} - - // Create pipeline with zero config values - config := Config{} - p := New(fetcher, nil, transformer, pusher, config) - - // Verify defaults are set - if p.config.TransformerWorkers != 5 { - t.Errorf("expected default TransformerWorkers=5, got %d", p.config.TransformerWorkers) - } -} - -func TestPipeline_Run_WithDuplicateChecker(t *testing.T) { - ctx := context.Background() - - // Create mock stages - fetcher := &mockFetcher{ - items: []interface{}{"item1", "item2", "item3", "item4", "item5"}, - } - transformer := &mockTransformer{} - pusher := &mockPusher{} - - // Create duplicate checker that marks item2 and item4 as duplicates - duplicateChecker := &mockDuplicateChecker{ - duplicates: map[string]bool{ - "item2": true, - "item4": true, - }, - } - - // Create pipeline with duplicate checker - config := Config{ - TransformerWorkers: 2, - } - p := New(fetcher, duplicateChecker, transformer, pusher, config) - - // Run pipeline - result, err := p.Run(ctx) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - - // Verify results - // Total: 5 items - // Skipped: 2 duplicates (item2, item4) - // Processed: 3 items (item1, item3, item5) - // Imported: 3 (all processed items succeeded) - if result.TotalRecords != 5 { - t.Errorf("expected 5 total records, got %d", result.TotalRecords) - } - - if result.SkippedCount != 2 { - t.Errorf("expected 2 skipped records, got %d", result.SkippedCount) - } - - if result.ImportedCount != 3 { - t.Errorf("expected 3 imported records, got %d", result.ImportedCount) - } - - if result.FailedCount != 0 { - t.Errorf("expected 0 failed records, got %d", result.FailedCount) - } - - // Verify only 3 records were pushed (duplicates filtered before transformation) - if len(pusher.pushed) != 3 { - t.Errorf("expected 3 pushed records, got %d", len(pusher.pushed)) - } - - // Verify the math: TotalRecords = SkippedCount + ImportedCount + FailedCount - expectedTotal := result.SkippedCount + result.ImportedCount + result.FailedCount - if result.TotalRecords != expectedTotal { - t.Errorf("total records mismatch: %d != %d (skipped) + %d (imported) + %d (failed)", - result.TotalRecords, result.SkippedCount, result.ImportedCount, result.FailedCount) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package pipeline + +import ( + "context" + "errors" + "testing" + + corev1 "github.com/agntcy/dir/api/core/v1" +) + +// mockFetcher is a mock implementation of Fetcher for testing. +type mockFetcher struct { + items []interface{} + err error +} + +func (m *mockFetcher) Fetch(ctx context.Context) (<-chan interface{}, <-chan error) { + dataCh := make(chan interface{}) + errCh := make(chan error, 1) + + go func() { + defer close(dataCh) + defer close(errCh) + + if m.err != nil { + errCh <- m.err + + return + } + + for _, item := range m.items { + select { + case dataCh <- item: + case <-ctx.Done(): + return + } + } + }() + + return dataCh, errCh +} + +// mockTransformer is a mock implementation of Transformer for testing. +type mockTransformer struct { + shouldFail bool +} + +func (m *mockTransformer) Transform(ctx context.Context, source interface{}) (*corev1.Record, error) { + if m.shouldFail { + return nil, errors.New("transform failed") + } + + // Create a simple record + return &corev1.Record{}, nil +} + +// mockPusher is a mock implementation of Pusher for testing. +type mockPusher struct { + shouldFail bool + pushed []*corev1.Record +} + +func (m *mockPusher) Push(ctx context.Context, inputCh <-chan *corev1.Record) (<-chan *corev1.RecordRef, <-chan error) { + refCh := make(chan *corev1.RecordRef) + errCh := make(chan error) + + go func() { + defer close(refCh) + defer close(errCh) + + // Consume all records from the input channel + for record := range inputCh { + m.pushed = append(m.pushed, record) + + if m.shouldFail { + select { + case errCh <- errors.New("push failed"): + case <-ctx.Done(): + return + } + } else { + // Send success response with a valid CID + select { + case refCh <- &corev1.RecordRef{Cid: "bafytest123"}: + case <-ctx.Done(): + return + } + } + } + }() + + return refCh, errCh +} + +// mockDuplicateChecker is a mock implementation of DuplicateChecker for testing. +type mockDuplicateChecker struct { + duplicates map[string]bool // items to mark as duplicates +} + +func (m *mockDuplicateChecker) FilterDuplicates(ctx context.Context, inputCh <-chan interface{}, result *Result) <-chan interface{} { + outputCh := make(chan interface{}) + + go func() { + defer close(outputCh) + + for { + select { + case <-ctx.Done(): + return + case source, ok := <-inputCh: + if !ok { + return + } + + // Check if this item is marked as duplicate + itemStr, ok := source.(string) + if ok && m.duplicates[itemStr] { + // Mark as duplicate - increment both total and skipped + result.mu.Lock() + result.TotalRecords++ + result.SkippedCount++ + result.mu.Unlock() + + continue + } + + // Not a duplicate - pass through (transform stage will count it) + select { + case outputCh <- source: + case <-ctx.Done(): + return + } + } + } + }() + + return outputCh +} + +func TestPipeline_Run_Success(t *testing.T) { + ctx := context.Background() + + // Create mock stages + fetcher := &mockFetcher{ + items: []interface{}{"item1", "item2", "item3"}, + } + transformer := &mockTransformer{} + pusher := &mockPusher{} + + // Create pipeline + config := Config{ + TransformerWorkers: 2, + } + p := New(fetcher, nil, transformer, pusher, config) + + // Run pipeline + result, err := p.Run(ctx) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + // Verify results + if result.TotalRecords != 3 { + t.Errorf("expected 3 total records, got %d", result.TotalRecords) + } + + if result.ImportedCount != 3 { + t.Errorf("expected 3 imported records, got %d", result.ImportedCount) + } + + if result.FailedCount != 0 { + t.Errorf("expected 0 failed records, got %d", result.FailedCount) + } + + if len(pusher.pushed) != 3 { + t.Errorf("expected 3 pushed records, got %d", len(pusher.pushed)) + } +} + +func TestDryRunPipeline_Run(t *testing.T) { + ctx := context.Background() + + // Create mock stages + fetcher := &mockFetcher{ + items: []interface{}{"item1", "item2"}, + } + transformer := &mockTransformer{} + + // Create dry-run pipeline (no pusher, no duplicate checker) + config := Config{ + TransformerWorkers: 2, + } + p := NewDryRun(fetcher, nil, transformer, config) + + // Run pipeline + result, err := p.Run(ctx) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + // Verify results - records should be counted but not pushed + if result.TotalRecords != 2 { + t.Errorf("expected 2 total records, got %d", result.TotalRecords) + } + + if result.ImportedCount != 0 { + t.Errorf("expected 0 imported records (dry-run), got %d", result.ImportedCount) + } +} + +func TestDryRunPipeline_Run_WithDuplicateChecker(t *testing.T) { + ctx := context.Background() + + // Create mock stages + fetcher := &mockFetcher{ + items: []interface{}{"item1", "item2", "item3", "item4"}, + } + transformer := &mockTransformer{} + + // Create duplicate checker that marks item2 and item4 as duplicates + duplicateChecker := &mockDuplicateChecker{ + duplicates: map[string]bool{ + "item2": true, + "item4": true, + }, + } + + // Create dry-run pipeline with duplicate checker for accurate preview + config := Config{ + TransformerWorkers: 2, + } + p := NewDryRun(fetcher, duplicateChecker, transformer, config) + + // Run pipeline + result, err := p.Run(ctx) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + // Verify results - should show accurate preview with duplicates filtered + // Total: 4 items + // Skipped: 2 duplicates (item2, item4) + // Processed: 2 items (item1, item3) + // Imported: 0 (dry-run doesn't actually import) + if result.TotalRecords != 4 { + t.Errorf("expected 4 total records, got %d", result.TotalRecords) + } + + if result.SkippedCount != 2 { + t.Errorf("expected 2 skipped records (duplicates), got %d", result.SkippedCount) + } + + if result.ImportedCount != 0 { + t.Errorf("expected 0 imported records (dry-run), got %d", result.ImportedCount) + } + + if result.FailedCount != 0 { + t.Errorf("expected 0 failed records, got %d", result.FailedCount) + } + + // Verify the math: TotalRecords = SkippedCount + (records that would be processed) + // In dry-run: processed records aren't imported, they're just validated + expectedTotal := result.SkippedCount + result.ImportedCount + result.FailedCount + 2 // 2 would be processed (item1, item3) + if result.TotalRecords != expectedTotal { + t.Logf("In dry-run: total=%d, skipped=%d, would process=%d", + result.TotalRecords, result.SkippedCount, result.TotalRecords-result.SkippedCount) + } +} + +func TestPipeline_Run_TransformError(t *testing.T) { + ctx := context.Background() + + // Create mock stages with transformer that fails + fetcher := &mockFetcher{ + items: []interface{}{"item1", "item2"}, + } + transformer := &mockTransformer{shouldFail: true} + pusher := &mockPusher{} + + // Create pipeline + config := Config{ + TransformerWorkers: 2, + } + p := New(fetcher, nil, transformer, pusher, config) + + // Run pipeline + result, err := p.Run(ctx) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + // Verify results - all should fail transformation + if result.TotalRecords != 2 { + t.Errorf("expected 2 total records, got %d", result.TotalRecords) + } + + if result.FailedCount != 2 { + t.Errorf("expected 2 failed records, got %d", result.FailedCount) + } + + if result.ImportedCount != 0 { + t.Errorf("expected 0 imported records, got %d", result.ImportedCount) + } + + if len(result.Errors) != 2 { + t.Errorf("expected 2 errors, got %d", len(result.Errors)) + } +} + +func TestPipeline_Run_PushError(t *testing.T) { + ctx := context.Background() + + // Create mock stages with pusher that fails + fetcher := &mockFetcher{ + items: []interface{}{"item1", "item2"}, + } + transformer := &mockTransformer{} + pusher := &mockPusher{shouldFail: true} + + // Create pipeline + config := Config{ + TransformerWorkers: 2, + } + p := New(fetcher, nil, transformer, pusher, config) + + // Run pipeline + result, err := p.Run(ctx) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + // Verify results - all should fail push + if result.TotalRecords != 2 { + t.Errorf("expected 2 total records, got %d", result.TotalRecords) + } + + if result.FailedCount != 2 { + t.Errorf("expected 2 failed records, got %d", result.FailedCount) + } + + if result.ImportedCount != 0 { + t.Errorf("expected 0 imported records, got %d", result.ImportedCount) + } + + if len(result.Errors) != 2 { + t.Errorf("expected 2 errors, got %d", len(result.Errors)) + } +} + +func TestPipeline_Run_FetchError(t *testing.T) { + ctx := context.Background() + + // Create mock stages with fetcher that fails + fetcher := &mockFetcher{ + err: errors.New("fetch failed"), + } + transformer := &mockTransformer{} + pusher := &mockPusher{} + + // Create pipeline + config := Config{ + TransformerWorkers: 2, + } + p := New(fetcher, nil, transformer, pusher, config) + + // Run pipeline + result, err := p.Run(ctx) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + // Verify results - should have fetch error + if len(result.Errors) == 0 { + t.Error("expected at least one error") + } +} + +func TestPipeline_ConfigDefaults(t *testing.T) { + fetcher := &mockFetcher{} + transformer := &mockTransformer{} + pusher := &mockPusher{} + + // Create pipeline with zero config values + config := Config{} + p := New(fetcher, nil, transformer, pusher, config) + + // Verify defaults are set + if p.config.TransformerWorkers != 5 { + t.Errorf("expected default TransformerWorkers=5, got %d", p.config.TransformerWorkers) + } +} + +func TestPipeline_Run_WithDuplicateChecker(t *testing.T) { + ctx := context.Background() + + // Create mock stages + fetcher := &mockFetcher{ + items: []interface{}{"item1", "item2", "item3", "item4", "item5"}, + } + transformer := &mockTransformer{} + pusher := &mockPusher{} + + // Create duplicate checker that marks item2 and item4 as duplicates + duplicateChecker := &mockDuplicateChecker{ + duplicates: map[string]bool{ + "item2": true, + "item4": true, + }, + } + + // Create pipeline with duplicate checker + config := Config{ + TransformerWorkers: 2, + } + p := New(fetcher, duplicateChecker, transformer, pusher, config) + + // Run pipeline + result, err := p.Run(ctx) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + // Verify results + // Total: 5 items + // Skipped: 2 duplicates (item2, item4) + // Processed: 3 items (item1, item3, item5) + // Imported: 3 (all processed items succeeded) + if result.TotalRecords != 5 { + t.Errorf("expected 5 total records, got %d", result.TotalRecords) + } + + if result.SkippedCount != 2 { + t.Errorf("expected 2 skipped records, got %d", result.SkippedCount) + } + + if result.ImportedCount != 3 { + t.Errorf("expected 3 imported records, got %d", result.ImportedCount) + } + + if result.FailedCount != 0 { + t.Errorf("expected 0 failed records, got %d", result.FailedCount) + } + + // Verify only 3 records were pushed (duplicates filtered before transformation) + if len(pusher.pushed) != 3 { + t.Errorf("expected 3 pushed records, got %d", len(pusher.pushed)) + } + + // Verify the math: TotalRecords = SkippedCount + ImportedCount + FailedCount + expectedTotal := result.SkippedCount + result.ImportedCount + result.FailedCount + if result.TotalRecords != expectedTotal { + t.Errorf("total records mismatch: %d != %d (skipped) + %d (imported) + %d (failed)", + result.TotalRecords, result.SkippedCount, result.ImportedCount, result.FailedCount) + } +} diff --git a/importer/pipeline/pusher.go b/importer/pipeline/pusher.go index af97f39ca..984594513 100644 --- a/importer/pipeline/pusher.go +++ b/importer/pipeline/pusher.go @@ -1,181 +1,181 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package pipeline - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "os" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/importer/config" - "github.com/agntcy/dir/utils/logging" - "google.golang.org/protobuf/encoding/protojson" -) - -var logger = logging.Logger("importer/pipeline") - -// ClientPusher is a Pusher implementation that uses the DIR client. -type ClientPusher struct { - client config.ClientInterface - debug bool -} - -// NewClientPusher creates a new ClientPusher. -func NewClientPusher(client config.ClientInterface, debug bool) *ClientPusher { - return &ClientPusher{ - client: client, - debug: debug, - } -} - -// Push sends records to DIR using the client. -// -// IMPLEMENTATION NOTE: -// This implementation pushes records sequentially (one-by-one) instead of using -// batch/streaming push. This is a temporary workaround because the current gRPC -// streaming implementation terminates the entire stream when a single record fails -// validation, preventing subsequent records from being processed. -// -// TODO: Switch back to streaming/batch push (PushStream) once the server-side -// implementation is updated to: -// 1. Return per-record error responses instead of terminating the stream -// 2. Allow the stream to continue processing remaining records after individual failures -// 3. This will require updating the proto to support a response type that can carry -// either a RecordRef (success) or an error message (failure) -// -// The sequential approach ensures all records are attempted, even if some fail, -// at the cost of reduced throughput and increased latency. -func (p *ClientPusher) Push(ctx context.Context, inputCh <-chan *corev1.Record) (<-chan *corev1.RecordRef, <-chan error) { - refCh := make(chan *corev1.RecordRef) - errCh := make(chan error) - - go func() { - defer close(refCh) - defer close(errCh) - - // Push records one-by-one to ensure all records are processed - // even if some fail validation - for record := range inputCh { - // Extract and remove debug source before pushing - var mcpSourceJSON string - - if record.GetData() != nil && record.Data.Fields != nil { - if debugField, ok := record.GetData().GetFields()["__mcp_debug_source"]; ok { - mcpSourceJSON = debugField.GetStringValue() - // Remove debug field before validation - delete(record.GetData().GetFields(), "__mcp_debug_source") - } - } - - ref, err := p.client.Push(ctx, record) - if err != nil { - p.handlePushError(err, record, mcpSourceJSON, errCh, ctx) - - continue - } - - // Send reference (success) - select { - case refCh <- ref: - case <-ctx.Done(): - return - } - } - }() - - return refCh, errCh -} - -// handlePushError handles push errors and sends them to the error channel. -func (p *ClientPusher) handlePushError(err error, record *corev1.Record, mcpSourceJSON string, errCh chan<- error, ctx context.Context) { - logger.Debug("Failed to push record", "error", err, "record", record) - - // Print detailed debug output if debug flag is set - if p.debug && mcpSourceJSON != "" { - p.printPushFailure(record, mcpSourceJSON, err.Error()) - } - - // Send error but continue processing remaining records - select { - case errCh <- err: - case <-ctx.Done(): - } -} - -// printPushFailure prints detailed debug information about a push failure. -func (p *ClientPusher) printPushFailure(record *corev1.Record, mcpSourceJSON, errorMsg string) { - // Extract name@version for header - nameVersion, _ := extractNameVersion(record) - if nameVersion == "" { - nameVersion = "unknown" - } - - fmt.Fprintf(os.Stderr, "\n========================================\n") - fmt.Fprintf(os.Stderr, "PUSH FAILED for: %s\n", nameVersion) - fmt.Fprintf(os.Stderr, "Error: %s\n", errorMsg) - fmt.Fprintf(os.Stderr, "========================================\n") - fmt.Fprintf(os.Stderr, "Original MCP Source:\n%s\n", formatJSON(mcpSourceJSON)) - fmt.Fprintf(os.Stderr, "----------------------------------------\n") - - // Print the generated OASF record - if recordBytes, err := protojson.Marshal(record.GetData()); err == nil { - fmt.Fprintf(os.Stderr, "Generated OASF Record:\n%s\n", formatJSON(string(recordBytes))) - } - - fmt.Fprintf(os.Stderr, "========================================\n\n") - os.Stderr.Sync() -} - -// formatJSON attempts to pretty-print JSON, fallback to raw string. -func formatJSON(jsonStr string) string { - var obj interface{} - if err := json.Unmarshal([]byte(jsonStr), &obj); err != nil { - return jsonStr - } - - if pretty, err := json.MarshalIndent(obj, "", " "); err == nil { - return string(pretty) - } - - return jsonStr -} - -// extractNameVersion extracts "name@version" from a record. -func extractNameVersion(record *corev1.Record) (string, error) { - if record == nil || record.GetData() == nil { - return "", errors.New("record or record data is nil") - } - - fields := record.GetData().GetFields() - if fields == nil { - return "", errors.New("record data fields are nil") - } - - // Extract name - nameVal, ok := fields["name"] - if !ok { - return "", errors.New("record missing 'name' field") - } - - name := nameVal.GetStringValue() - if name == "" { - return "", errors.New("record 'name' field is empty") - } - - // Extract version - versionVal, ok := fields["version"] - if !ok { - return "", errors.New("record missing 'version' field") - } - - version := versionVal.GetStringValue() - if version == "" { - return "", errors.New("record 'version' field is empty") - } - - return fmt.Sprintf("%s@%s", name, version), nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package pipeline + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/importer/config" + "github.com/agntcy/dir/utils/logging" + "google.golang.org/protobuf/encoding/protojson" +) + +var logger = logging.Logger("importer/pipeline") + +// ClientPusher is a Pusher implementation that uses the DIR client. +type ClientPusher struct { + client config.ClientInterface + debug bool +} + +// NewClientPusher creates a new ClientPusher. +func NewClientPusher(client config.ClientInterface, debug bool) *ClientPusher { + return &ClientPusher{ + client: client, + debug: debug, + } +} + +// Push sends records to DIR using the client. +// +// IMPLEMENTATION NOTE: +// This implementation pushes records sequentially (one-by-one) instead of using +// batch/streaming push. This is a temporary workaround because the current gRPC +// streaming implementation terminates the entire stream when a single record fails +// validation, preventing subsequent records from being processed. +// +// TODO: Switch back to streaming/batch push (PushStream) once the server-side +// implementation is updated to: +// 1. Return per-record error responses instead of terminating the stream +// 2. Allow the stream to continue processing remaining records after individual failures +// 3. This will require updating the proto to support a response type that can carry +// either a RecordRef (success) or an error message (failure) +// +// The sequential approach ensures all records are attempted, even if some fail, +// at the cost of reduced throughput and increased latency. +func (p *ClientPusher) Push(ctx context.Context, inputCh <-chan *corev1.Record) (<-chan *corev1.RecordRef, <-chan error) { + refCh := make(chan *corev1.RecordRef) + errCh := make(chan error) + + go func() { + defer close(refCh) + defer close(errCh) + + // Push records one-by-one to ensure all records are processed + // even if some fail validation + for record := range inputCh { + // Extract and remove debug source before pushing + var mcpSourceJSON string + + if record.GetData() != nil && record.Data.Fields != nil { + if debugField, ok := record.GetData().GetFields()["__mcp_debug_source"]; ok { + mcpSourceJSON = debugField.GetStringValue() + // Remove debug field before validation + delete(record.GetData().GetFields(), "__mcp_debug_source") + } + } + + ref, err := p.client.Push(ctx, record) + if err != nil { + p.handlePushError(err, record, mcpSourceJSON, errCh, ctx) + + continue + } + + // Send reference (success) + select { + case refCh <- ref: + case <-ctx.Done(): + return + } + } + }() + + return refCh, errCh +} + +// handlePushError handles push errors and sends them to the error channel. +func (p *ClientPusher) handlePushError(err error, record *corev1.Record, mcpSourceJSON string, errCh chan<- error, ctx context.Context) { + logger.Debug("Failed to push record", "error", err, "record", record) + + // Print detailed debug output if debug flag is set + if p.debug && mcpSourceJSON != "" { + p.printPushFailure(record, mcpSourceJSON, err.Error()) + } + + // Send error but continue processing remaining records + select { + case errCh <- err: + case <-ctx.Done(): + } +} + +// printPushFailure prints detailed debug information about a push failure. +func (p *ClientPusher) printPushFailure(record *corev1.Record, mcpSourceJSON, errorMsg string) { + // Extract name@version for header + nameVersion, _ := extractNameVersion(record) + if nameVersion == "" { + nameVersion = "unknown" + } + + fmt.Fprintf(os.Stderr, "\n========================================\n") + fmt.Fprintf(os.Stderr, "PUSH FAILED for: %s\n", nameVersion) + fmt.Fprintf(os.Stderr, "Error: %s\n", errorMsg) + fmt.Fprintf(os.Stderr, "========================================\n") + fmt.Fprintf(os.Stderr, "Original MCP Source:\n%s\n", formatJSON(mcpSourceJSON)) + fmt.Fprintf(os.Stderr, "----------------------------------------\n") + + // Print the generated OASF record + if recordBytes, err := protojson.Marshal(record.GetData()); err == nil { + fmt.Fprintf(os.Stderr, "Generated OASF Record:\n%s\n", formatJSON(string(recordBytes))) + } + + fmt.Fprintf(os.Stderr, "========================================\n\n") + os.Stderr.Sync() +} + +// formatJSON attempts to pretty-print JSON, fallback to raw string. +func formatJSON(jsonStr string) string { + var obj interface{} + if err := json.Unmarshal([]byte(jsonStr), &obj); err != nil { + return jsonStr + } + + if pretty, err := json.MarshalIndent(obj, "", " "); err == nil { + return string(pretty) + } + + return jsonStr +} + +// extractNameVersion extracts "name@version" from a record. +func extractNameVersion(record *corev1.Record) (string, error) { + if record == nil || record.GetData() == nil { + return "", errors.New("record or record data is nil") + } + + fields := record.GetData().GetFields() + if fields == nil { + return "", errors.New("record data fields are nil") + } + + // Extract name + nameVal, ok := fields["name"] + if !ok { + return "", errors.New("record missing 'name' field") + } + + name := nameVal.GetStringValue() + if name == "" { + return "", errors.New("record 'name' field is empty") + } + + // Extract version + versionVal, ok := fields["version"] + if !ok { + return "", errors.New("record missing 'version' field") + } + + version := versionVal.GetStringValue() + if version == "" { + return "", errors.New("record 'version' field is empty") + } + + return fmt.Sprintf("%s@%s", name, version), nil +} diff --git a/importer/types/factory/factory.go b/importer/types/factory/factory.go index 5a1ad400f..195077ddc 100644 --- a/importer/types/factory/factory.go +++ b/importer/types/factory/factory.go @@ -1,79 +1,79 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package factory - -import ( - "fmt" - "sync" - - "github.com/agntcy/dir/importer/config" - "github.com/agntcy/dir/importer/types" -) - -// ImporterFunc is a function that creates an Importer instance. -type ImporterFunc func(client config.ClientInterface, cfg config.Config) (types.Importer, error) - -var ( - importers = make(map[config.RegistryType]ImporterFunc) - mu sync.RWMutex -) - -// Register registers a function that creates an Importer instance for a given registry type. -// It panics if the same registry type is registered twice to prevent duplications at compile-time. -func Register(registryType config.RegistryType, fn ImporterFunc) { - mu.Lock() - defer mu.Unlock() - - if _, exists := importers[registryType]; exists { - panic(fmt.Sprintf("importer already registered for registry type: %s", registryType)) - } - - importers[registryType] = fn -} - -// Create creates a new Importer instance for the given client and configuration. -func Create(client config.ClientInterface, cfg config.Config) (types.Importer, error) { - mu.RLock() - - constructor, exists := importers[cfg.RegistryType] - - mu.RUnlock() - - if !exists { - return nil, fmt.Errorf("unsupported registry type: %s", cfg.RegistryType) - } - - return constructor(client, cfg) -} - -// RegisteredTypes returns a list of all registered registry types. -func RegisteredTypes() []config.RegistryType { - mu.RLock() - defer mu.RUnlock() - - types := make([]config.RegistryType, 0, len(importers)) - for t := range importers { - types = append(types, t) - } - - return types -} - -// IsRegistered checks if a registry type is registered. -func IsRegistered(registryType config.RegistryType) bool { - mu.RLock() - defer mu.RUnlock() - - _, exists := importers[registryType] - - return exists -} - -// Reset clears all registered importers. This is primarily useful for testing. -func Reset() { - mu.Lock() - defer mu.Unlock() - - importers = make(map[config.RegistryType]ImporterFunc) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package factory + +import ( + "fmt" + "sync" + + "github.com/agntcy/dir/importer/config" + "github.com/agntcy/dir/importer/types" +) + +// ImporterFunc is a function that creates an Importer instance. +type ImporterFunc func(client config.ClientInterface, cfg config.Config) (types.Importer, error) + +var ( + importers = make(map[config.RegistryType]ImporterFunc) + mu sync.RWMutex +) + +// Register registers a function that creates an Importer instance for a given registry type. +// It panics if the same registry type is registered twice to prevent duplications at compile-time. +func Register(registryType config.RegistryType, fn ImporterFunc) { + mu.Lock() + defer mu.Unlock() + + if _, exists := importers[registryType]; exists { + panic(fmt.Sprintf("importer already registered for registry type: %s", registryType)) + } + + importers[registryType] = fn +} + +// Create creates a new Importer instance for the given client and configuration. +func Create(client config.ClientInterface, cfg config.Config) (types.Importer, error) { + mu.RLock() + + constructor, exists := importers[cfg.RegistryType] + + mu.RUnlock() + + if !exists { + return nil, fmt.Errorf("unsupported registry type: %s", cfg.RegistryType) + } + + return constructor(client, cfg) +} + +// RegisteredTypes returns a list of all registered registry types. +func RegisteredTypes() []config.RegistryType { + mu.RLock() + defer mu.RUnlock() + + types := make([]config.RegistryType, 0, len(importers)) + for t := range importers { + types = append(types, t) + } + + return types +} + +// IsRegistered checks if a registry type is registered. +func IsRegistered(registryType config.RegistryType) bool { + mu.RLock() + defer mu.RUnlock() + + _, exists := importers[registryType] + + return exists +} + +// Reset clears all registered importers. This is primarily useful for testing. +func Reset() { + mu.Lock() + defer mu.Unlock() + + importers = make(map[config.RegistryType]ImporterFunc) +} diff --git a/importer/types/factory/factory_test.go b/importer/types/factory/factory_test.go index 8933e1f01..341dcfaa1 100644 --- a/importer/types/factory/factory_test.go +++ b/importer/types/factory/factory_test.go @@ -1,311 +1,311 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package factory - -import ( - "context" - "errors" - "strings" - "testing" - - corev1 "github.com/agntcy/dir/api/core/v1" - searchv1 "github.com/agntcy/dir/api/search/v1" - "github.com/agntcy/dir/client/streaming" - "github.com/agntcy/dir/importer/config" - "github.com/agntcy/dir/importer/types" -) - -// mockImporter is a mock implementation for testing. -type mockImporter struct { - runCalled bool -} - -func (m *mockImporter) Run(ctx context.Context, cfg config.Config) (*types.ImportResult, error) { - m.runCalled = true - - return &types.ImportResult{TotalRecords: 10}, nil -} - -// mockClient is a mock implementation for testing. -type mockClient struct{} - -func (m *mockClient) Push(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) { - return &corev1.RecordRef{}, nil -} - -func (m *mockClient) PullBatch(ctx context.Context, recordRefs []*corev1.RecordRef) ([]*corev1.Record, error) { - return []*corev1.Record{}, nil -} - -func (m *mockClient) SearchCIDs(ctx context.Context, req *searchv1.SearchCIDsRequest) (streaming.StreamResult[searchv1.SearchCIDsResponse], error) { - return &mockStreamResult{}, nil -} - -// mockStreamResult implements streaming.StreamResult for testing. -type mockStreamResult struct{} - -func (m *mockStreamResult) ResCh() <-chan *searchv1.SearchCIDsResponse { - ch := make(chan *searchv1.SearchCIDsResponse) - close(ch) - - return ch -} - -func (m *mockStreamResult) ErrCh() <-chan error { - ch := make(chan error) - close(ch) - - return ch -} - -func (m *mockStreamResult) DoneCh() <-chan struct{} { - ch := make(chan struct{}) - close(ch) - - return ch -} - -// Mock constructor functions. -func mockMCPConstructor(client config.ClientInterface, cfg config.Config) (types.Importer, error) { - return &mockImporter{}, nil -} - -func mockFailingConstructor(client config.ClientInterface, cfg config.Config) (types.Importer, error) { - return nil, errors.New("construction failed") -} - -func TestRegister(t *testing.T) { - // Reset registry before test - Reset() - defer Reset() - - // Register a constructor - Register(config.RegistryTypeMCP, mockMCPConstructor) - - // Verify it was registered - if !IsRegistered(config.RegistryTypeMCP) { - t.Error("Register() did not register constructor") - } -} - -func TestRegisterPanic(t *testing.T) { - // Reset registry before test - Reset() - defer Reset() - - // Register once should succeed - Register(config.RegistryTypeMCP, mockMCPConstructor) - - // Register again should panic - defer func() { - if r := recover(); r == nil { - t.Error("Register() should panic on duplicate registration") - } - }() - - Register(config.RegistryTypeMCP, mockMCPConstructor) -} - -func TestCreate(t *testing.T) { - tests := []struct { - name string - registryType config.RegistryType - registerFirst bool - wantErr bool - errContains string - }{ - { - name: "successful creation", - registryType: config.RegistryTypeMCP, - registerFirst: true, - wantErr: false, - }, - { - name: "unregistered registry type", - registryType: "unknown", - registerFirst: false, - wantErr: true, - errContains: "unsupported registry type", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Reset registry before each test - Reset() - defer Reset() - - if tt.registerFirst { - Register(tt.registryType, mockMCPConstructor) - } - - cfg := config.Config{ - RegistryType: tt.registryType, - RegistryURL: "https://example.com", - } - - mockCli := &mockClient{} - importer, err := Create(mockCli, cfg) - - if (err != nil) != tt.wantErr { - t.Errorf("Create() error = %v, wantErr %v", err, tt.wantErr) - - return - } - - if tt.wantErr { - if err == nil || !strings.Contains(err.Error(), tt.errContains) { - t.Errorf("Create() error = %v, want error containing %q", err, tt.errContains) - } - - return - } - - if importer == nil { - t.Error("Create() returned nil importer") - } - }) - } -} - -func TestCreateWithFailingConstructor(t *testing.T) { - // Reset registry before test - Reset() - defer Reset() - - Register(config.RegistryTypeMCP, mockFailingConstructor) - - cfg := config.Config{ - RegistryType: config.RegistryTypeMCP, - RegistryURL: "https://example.com", - } - - mockCli := &mockClient{} - - importer, err := Create(mockCli, cfg) - if err == nil { - t.Error("Create() with failing constructor should return error") - } - - if importer != nil { - t.Error("Create() with failing constructor should return nil importer") - } - - if err.Error() != "construction failed" { - t.Errorf("Create() error = %v, want 'construction failed'", err) - } -} - -func TestMultipleRegistrations(t *testing.T) { - // Reset registry before test - Reset() - defer Reset() - - // Register multiple types - Register(config.RegistryTypeMCP, mockMCPConstructor) - Register("custom", mockMCPConstructor) - - // Verify both are accessible - mockCli := &mockClient{} - cfg1 := config.Config{RegistryType: config.RegistryTypeMCP, RegistryURL: "https://mcp.example.com"} - - importer1, err := Create(mockCli, cfg1) - if err != nil { - t.Errorf("Create() for MCP failed: %v", err) - } - - if importer1 == nil { - t.Error("Create() for MCP returned nil") - } - - cfg2 := config.Config{RegistryType: "custom", RegistryURL: "https://custom.example.com"} - - importer2, err := Create(mockCli, cfg2) - if err != nil { - t.Errorf("Create() for custom failed: %v", err) - } - - if importer2 == nil { - t.Error("Create() for custom returned nil") - } -} - -func TestCreateMultipleInstancesWithDifferentURLs(t *testing.T) { - // Reset registry before test - Reset() - defer Reset() - - Register(config.RegistryTypeMCP, mockMCPConstructor) - - mockCli := &mockClient{} - - // Create two importers with different URLs - cfg1 := config.Config{ - RegistryType: config.RegistryTypeMCP, - RegistryURL: "https://registry1.example.com", - } - importer1, err1 := Create(mockCli, cfg1) - - cfg2 := config.Config{ - RegistryType: config.RegistryTypeMCP, - RegistryURL: "https://registry2.example.com", - } - importer2, err2 := Create(mockCli, cfg2) - - if err1 != nil || err2 != nil { - t.Errorf("Create() failed: err1=%v, err2=%v", err1, err2) - } - - if importer1 == nil || importer2 == nil { - t.Error("Create() returned nil importers") - } - - // Verify they are different instances - if importer1 == importer2 { - t.Error("Create() returned same instance for different configs") - } -} - -func TestRegisteredTypes(t *testing.T) { - // Reset registry before test - Reset() - defer Reset() - - // Initially should be empty - types := RegisteredTypes() - if len(types) != 0 { - t.Errorf("RegisteredTypes() = %v, want empty slice", types) - } - - // Register some types - Register(config.RegistryTypeMCP, mockMCPConstructor) - Register("custom", mockMCPConstructor) - - types = RegisteredTypes() - if len(types) != 2 { - t.Errorf("RegisteredTypes() returned %d types, want 2", len(types)) - } -} - -func TestIsRegistered(t *testing.T) { - // Reset registry before test - Reset() - defer Reset() - - // Initially nothing registered - if IsRegistered(config.RegistryTypeMCP) { - t.Error("IsRegistered() returned true for unregistered type") - } - - // Register and check - Register(config.RegistryTypeMCP, mockMCPConstructor) - - if !IsRegistered(config.RegistryTypeMCP) { - t.Error("IsRegistered() returned false for registered type") - } - - if IsRegistered("unknown") { - t.Error("IsRegistered() returned true for unregistered type") - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package factory + +import ( + "context" + "errors" + "strings" + "testing" + + corev1 "github.com/agntcy/dir/api/core/v1" + searchv1 "github.com/agntcy/dir/api/search/v1" + "github.com/agntcy/dir/client/streaming" + "github.com/agntcy/dir/importer/config" + "github.com/agntcy/dir/importer/types" +) + +// mockImporter is a mock implementation for testing. +type mockImporter struct { + runCalled bool +} + +func (m *mockImporter) Run(ctx context.Context, cfg config.Config) (*types.ImportResult, error) { + m.runCalled = true + + return &types.ImportResult{TotalRecords: 10}, nil +} + +// mockClient is a mock implementation for testing. +type mockClient struct{} + +func (m *mockClient) Push(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) { + return &corev1.RecordRef{}, nil +} + +func (m *mockClient) PullBatch(ctx context.Context, recordRefs []*corev1.RecordRef) ([]*corev1.Record, error) { + return []*corev1.Record{}, nil +} + +func (m *mockClient) SearchCIDs(ctx context.Context, req *searchv1.SearchCIDsRequest) (streaming.StreamResult[searchv1.SearchCIDsResponse], error) { + return &mockStreamResult{}, nil +} + +// mockStreamResult implements streaming.StreamResult for testing. +type mockStreamResult struct{} + +func (m *mockStreamResult) ResCh() <-chan *searchv1.SearchCIDsResponse { + ch := make(chan *searchv1.SearchCIDsResponse) + close(ch) + + return ch +} + +func (m *mockStreamResult) ErrCh() <-chan error { + ch := make(chan error) + close(ch) + + return ch +} + +func (m *mockStreamResult) DoneCh() <-chan struct{} { + ch := make(chan struct{}) + close(ch) + + return ch +} + +// Mock constructor functions. +func mockMCPConstructor(client config.ClientInterface, cfg config.Config) (types.Importer, error) { + return &mockImporter{}, nil +} + +func mockFailingConstructor(client config.ClientInterface, cfg config.Config) (types.Importer, error) { + return nil, errors.New("construction failed") +} + +func TestRegister(t *testing.T) { + // Reset registry before test + Reset() + defer Reset() + + // Register a constructor + Register(config.RegistryTypeMCP, mockMCPConstructor) + + // Verify it was registered + if !IsRegistered(config.RegistryTypeMCP) { + t.Error("Register() did not register constructor") + } +} + +func TestRegisterPanic(t *testing.T) { + // Reset registry before test + Reset() + defer Reset() + + // Register once should succeed + Register(config.RegistryTypeMCP, mockMCPConstructor) + + // Register again should panic + defer func() { + if r := recover(); r == nil { + t.Error("Register() should panic on duplicate registration") + } + }() + + Register(config.RegistryTypeMCP, mockMCPConstructor) +} + +func TestCreate(t *testing.T) { + tests := []struct { + name string + registryType config.RegistryType + registerFirst bool + wantErr bool + errContains string + }{ + { + name: "successful creation", + registryType: config.RegistryTypeMCP, + registerFirst: true, + wantErr: false, + }, + { + name: "unregistered registry type", + registryType: "unknown", + registerFirst: false, + wantErr: true, + errContains: "unsupported registry type", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Reset registry before each test + Reset() + defer Reset() + + if tt.registerFirst { + Register(tt.registryType, mockMCPConstructor) + } + + cfg := config.Config{ + RegistryType: tt.registryType, + RegistryURL: "https://example.com", + } + + mockCli := &mockClient{} + importer, err := Create(mockCli, cfg) + + if (err != nil) != tt.wantErr { + t.Errorf("Create() error = %v, wantErr %v", err, tt.wantErr) + + return + } + + if tt.wantErr { + if err == nil || !strings.Contains(err.Error(), tt.errContains) { + t.Errorf("Create() error = %v, want error containing %q", err, tt.errContains) + } + + return + } + + if importer == nil { + t.Error("Create() returned nil importer") + } + }) + } +} + +func TestCreateWithFailingConstructor(t *testing.T) { + // Reset registry before test + Reset() + defer Reset() + + Register(config.RegistryTypeMCP, mockFailingConstructor) + + cfg := config.Config{ + RegistryType: config.RegistryTypeMCP, + RegistryURL: "https://example.com", + } + + mockCli := &mockClient{} + + importer, err := Create(mockCli, cfg) + if err == nil { + t.Error("Create() with failing constructor should return error") + } + + if importer != nil { + t.Error("Create() with failing constructor should return nil importer") + } + + if err.Error() != "construction failed" { + t.Errorf("Create() error = %v, want 'construction failed'", err) + } +} + +func TestMultipleRegistrations(t *testing.T) { + // Reset registry before test + Reset() + defer Reset() + + // Register multiple types + Register(config.RegistryTypeMCP, mockMCPConstructor) + Register("custom", mockMCPConstructor) + + // Verify both are accessible + mockCli := &mockClient{} + cfg1 := config.Config{RegistryType: config.RegistryTypeMCP, RegistryURL: "https://mcp.example.com"} + + importer1, err := Create(mockCli, cfg1) + if err != nil { + t.Errorf("Create() for MCP failed: %v", err) + } + + if importer1 == nil { + t.Error("Create() for MCP returned nil") + } + + cfg2 := config.Config{RegistryType: "custom", RegistryURL: "https://custom.example.com"} + + importer2, err := Create(mockCli, cfg2) + if err != nil { + t.Errorf("Create() for custom failed: %v", err) + } + + if importer2 == nil { + t.Error("Create() for custom returned nil") + } +} + +func TestCreateMultipleInstancesWithDifferentURLs(t *testing.T) { + // Reset registry before test + Reset() + defer Reset() + + Register(config.RegistryTypeMCP, mockMCPConstructor) + + mockCli := &mockClient{} + + // Create two importers with different URLs + cfg1 := config.Config{ + RegistryType: config.RegistryTypeMCP, + RegistryURL: "https://registry1.example.com", + } + importer1, err1 := Create(mockCli, cfg1) + + cfg2 := config.Config{ + RegistryType: config.RegistryTypeMCP, + RegistryURL: "https://registry2.example.com", + } + importer2, err2 := Create(mockCli, cfg2) + + if err1 != nil || err2 != nil { + t.Errorf("Create() failed: err1=%v, err2=%v", err1, err2) + } + + if importer1 == nil || importer2 == nil { + t.Error("Create() returned nil importers") + } + + // Verify they are different instances + if importer1 == importer2 { + t.Error("Create() returned same instance for different configs") + } +} + +func TestRegisteredTypes(t *testing.T) { + // Reset registry before test + Reset() + defer Reset() + + // Initially should be empty + types := RegisteredTypes() + if len(types) != 0 { + t.Errorf("RegisteredTypes() = %v, want empty slice", types) + } + + // Register some types + Register(config.RegistryTypeMCP, mockMCPConstructor) + Register("custom", mockMCPConstructor) + + types = RegisteredTypes() + if len(types) != 2 { + t.Errorf("RegisteredTypes() returned %d types, want 2", len(types)) + } +} + +func TestIsRegistered(t *testing.T) { + // Reset registry before test + Reset() + defer Reset() + + // Initially nothing registered + if IsRegistered(config.RegistryTypeMCP) { + t.Error("IsRegistered() returned true for unregistered type") + } + + // Register and check + Register(config.RegistryTypeMCP, mockMCPConstructor) + + if !IsRegistered(config.RegistryTypeMCP) { + t.Error("IsRegistered() returned false for registered type") + } + + if IsRegistered("unknown") { + t.Error("IsRegistered() returned true for unregistered type") + } +} diff --git a/importer/types/importer.go b/importer/types/importer.go index e4df7de4c..112004f9d 100644 --- a/importer/types/importer.go +++ b/importer/types/importer.go @@ -1,25 +1,25 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package types - -import ( - "context" - - "github.com/agntcy/dir/importer/config" -) - -// Importer defines the interface for importing records from external registries. -type Importer interface { - // Run executes the import operation for the given configuration - Run(ctx context.Context, cfg config.Config) (*ImportResult, error) -} - -// ImportResult summarizes the outcome of an import operation. -type ImportResult struct { - TotalRecords int - ImportedCount int - SkippedCount int - FailedCount int - Errors []error -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package types + +import ( + "context" + + "github.com/agntcy/dir/importer/config" +) + +// Importer defines the interface for importing records from external registries. +type Importer interface { + // Run executes the import operation for the given configuration + Run(ctx context.Context, cfg config.Config) (*ImportResult, error) +} + +// ImportResult summarizes the outcome of an import operation. +type ImportResult struct { + TotalRecords int + ImportedCount int + SkippedCount int + FailedCount int + Errors []error +} diff --git a/install/charts/dir/Chart.lock b/install/charts/dir/Chart.lock index dbc18fdc6..be71187d6 100644 --- a/install/charts/dir/Chart.lock +++ b/install/charts/dir/Chart.lock @@ -1,6 +1,6 @@ -dependencies: -- name: apiserver - repository: file://apiserver - version: 0.1.0 -digest: sha256:e57966560e0f7729731ffe1e6fb8dcfcd590f5817f49b543be4e705f5d8d5cbb -generated: "2025-03-26T12:22:29.608096+01:00" +dependencies: +- name: apiserver + repository: file://apiserver + version: 0.1.0 +digest: sha256:e57966560e0f7729731ffe1e6fb8dcfcd590f5817f49b543be4e705f5d8d5cbb +generated: "2025-03-26T12:22:29.608096+01:00" diff --git a/install/charts/dir/Chart.yaml b/install/charts/dir/Chart.yaml index c2b8c7f8f..1ce237640 100644 --- a/install/charts/dir/Chart.yaml +++ b/install/charts/dir/Chart.yaml @@ -1,32 +1,32 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -apiVersion: v2 -name: dir -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "1.16.0" - -dependencies: - - name: apiserver - version: 0.1.0 - repository: file://apiserver +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: v2 +name: dir +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" + +dependencies: + - name: apiserver + version: 0.1.0 + repository: file://apiserver diff --git a/install/charts/dir/apiserver/.helmignore b/install/charts/dir/apiserver/.helmignore index 0e8a0eb36..f82e96d46 100644 --- a/install/charts/dir/apiserver/.helmignore +++ b/install/charts/dir/apiserver/.helmignore @@ -1,23 +1,23 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/install/charts/dir/apiserver/Chart.lock b/install/charts/dir/apiserver/Chart.lock index 79784546f..20fdd587a 100644 --- a/install/charts/dir/apiserver/Chart.lock +++ b/install/charts/dir/apiserver/Chart.lock @@ -1,9 +1,9 @@ -dependencies: -- name: zot - repository: http://zotregistry.dev/helm-charts - version: 0.1.91 -- name: oasf - repository: oci://ghcr.io/agntcy/oasf/helm-charts - version: v0.5.1 -digest: sha256:1a5b416ad5d7c7ad0341f01892aa5faba3c307a089d27f078ce7b8f8e0fa94d7 -generated: "2025-12-09T12:56:09.43071+01:00" +dependencies: +- name: zot + repository: http://zotregistry.dev/helm-charts + version: 0.1.91 +- name: oasf + repository: oci://ghcr.io/agntcy/oasf/helm-charts + version: v0.5.1 +digest: sha256:1a5b416ad5d7c7ad0341f01892aa5faba3c307a089d27f078ce7b8f8e0fa94d7 +generated: "2025-12-09T12:56:09.43071+01:00" diff --git a/install/charts/dir/apiserver/Chart.yaml b/install/charts/dir/apiserver/Chart.yaml index 31fb0c695..9a80501bd 100644 --- a/install/charts/dir/apiserver/Chart.yaml +++ b/install/charts/dir/apiserver/Chart.yaml @@ -1,39 +1,39 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -apiVersion: v2 -name: apiserver -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "1.16.0" - -dependencies: - - name: zot - version: "0.1.91" - repository: "http://zotregistry.dev/helm-charts" - # OASF server subchart (OPTIONAL - opt-in only) - # Only installed when explicitly enabled via: oasf.enabled: true - # Default: disabled (oasf.enabled: false in values.yaml) - - name: oasf - version: v0.5.1 - repository: oci://ghcr.io/agntcy/oasf/helm-charts - condition: oasf.enabled +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: v2 +name: apiserver +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" + +dependencies: + - name: zot + version: "0.1.91" + repository: "http://zotregistry.dev/helm-charts" + # OASF server subchart (OPTIONAL - opt-in only) + # Only installed when explicitly enabled via: oasf.enabled: true + # Default: disabled (oasf.enabled: false in values.yaml) + - name: oasf + version: v0.5.1 + repository: oci://ghcr.io/agntcy/oasf/helm-charts + condition: oasf.enabled diff --git a/install/charts/dir/apiserver/templates/_helpers.tpl b/install/charts/dir/apiserver/templates/_helpers.tpl index 1d8841ae9..6229b9d67 100644 --- a/install/charts/dir/apiserver/templates/_helpers.tpl +++ b/install/charts/dir/apiserver/templates/_helpers.tpl @@ -1,108 +1,108 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "chart.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "chart.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "chart.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "chart.labels" -}} -helm.sh/chart: {{ include "chart.chart" . }} -{{ include "chart.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "chart.selectorLabels" -}} -app.kubernetes.io/name: {{ include "chart.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "chart.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "chart.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} - -{{/* -Generate cloud provider-specific annotations for routing service -*/}} -{{- define "chart.routingService.annotations" -}} -{{- $annotations := dict -}} -{{- if and .Values.routingService .Values.routingService.cloudProvider -}} -{{- if eq .Values.routingService.cloudProvider "aws" -}} -{{- $_ := set $annotations "service.beta.kubernetes.io/aws-load-balancer-type" "nlb" -}} -{{- $_ := set $annotations "service.beta.kubernetes.io/aws-load-balancer-scheme" "internet-facing" -}} -{{- $_ := set $annotations "service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled" "true" -}} -{{- if .Values.routingService.aws.nlbTargetType -}} -{{- $_ := set $annotations "service.beta.kubernetes.io/aws-load-balancer-nlb-target-type" .Values.routingService.aws.nlbTargetType -}} -{{- end -}} -{{- if .Values.routingService.aws.internal -}} -{{- $_ := set $annotations "service.beta.kubernetes.io/aws-load-balancer-internal" "true" -}} -{{- $_ := set $annotations "service.beta.kubernetes.io/aws-load-balancer-scheme" "internal" -}} -{{- end -}} -{{- else if eq .Values.routingService.cloudProvider "gcp" -}} -{{- $_ := set $annotations "cloud.google.com/load-balancer-type" "External" -}} -{{- if .Values.routingService.gcp.internal -}} -{{- $_ := set $annotations "cloud.google.com/load-balancer-type" "Internal" -}} -{{- end -}} -{{- if .Values.routingService.gcp.backendConfig -}} -{{- $_ := set $annotations "cloud.google.com/backend-config" .Values.routingService.gcp.backendConfig -}} -{{- end -}} -{{- else if eq .Values.routingService.cloudProvider "azure" -}} -{{- $_ := set $annotations "service.beta.kubernetes.io/azure-load-balancer-internal" "false" -}} -{{- if .Values.routingService.azure.internal -}} -{{- $_ := set $annotations "service.beta.kubernetes.io/azure-load-balancer-internal" "true" -}} -{{- end -}} -{{- if .Values.routingService.azure.resourceGroup -}} -{{- $_ := set $annotations "service.beta.kubernetes.io/azure-load-balancer-resource-group" .Values.routingService.azure.resourceGroup -}} -{{- end -}} -{{- end -}} -{{- end -}} -{{- /* Merge provider annotations with custom annotations (custom takes precedence) */ -}} -{{- if and .Values.routingService .Values.routingService.annotations -}} -{{- range $key, $value := .Values.routingService.annotations -}} -{{- $_ := set $annotations $key $value -}} -{{- end -}} -{{- end -}} -{{- if $annotations -}} -{{- toYaml $annotations -}} -{{- end -}} -{{- end -}} +{{/* +Expand the name of the chart. +*/}} +{{- define "chart.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "chart.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "chart.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "chart.labels" -}} +helm.sh/chart: {{ include "chart.chart" . }} +{{ include "chart.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "chart.selectorLabels" -}} +app.kubernetes.io/name: {{ include "chart.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "chart.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "chart.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Generate cloud provider-specific annotations for routing service +*/}} +{{- define "chart.routingService.annotations" -}} +{{- $annotations := dict -}} +{{- if and .Values.routingService .Values.routingService.cloudProvider -}} +{{- if eq .Values.routingService.cloudProvider "aws" -}} +{{- $_ := set $annotations "service.beta.kubernetes.io/aws-load-balancer-type" "nlb" -}} +{{- $_ := set $annotations "service.beta.kubernetes.io/aws-load-balancer-scheme" "internet-facing" -}} +{{- $_ := set $annotations "service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled" "true" -}} +{{- if .Values.routingService.aws.nlbTargetType -}} +{{- $_ := set $annotations "service.beta.kubernetes.io/aws-load-balancer-nlb-target-type" .Values.routingService.aws.nlbTargetType -}} +{{- end -}} +{{- if .Values.routingService.aws.internal -}} +{{- $_ := set $annotations "service.beta.kubernetes.io/aws-load-balancer-internal" "true" -}} +{{- $_ := set $annotations "service.beta.kubernetes.io/aws-load-balancer-scheme" "internal" -}} +{{- end -}} +{{- else if eq .Values.routingService.cloudProvider "gcp" -}} +{{- $_ := set $annotations "cloud.google.com/load-balancer-type" "External" -}} +{{- if .Values.routingService.gcp.internal -}} +{{- $_ := set $annotations "cloud.google.com/load-balancer-type" "Internal" -}} +{{- end -}} +{{- if .Values.routingService.gcp.backendConfig -}} +{{- $_ := set $annotations "cloud.google.com/backend-config" .Values.routingService.gcp.backendConfig -}} +{{- end -}} +{{- else if eq .Values.routingService.cloudProvider "azure" -}} +{{- $_ := set $annotations "service.beta.kubernetes.io/azure-load-balancer-internal" "false" -}} +{{- if .Values.routingService.azure.internal -}} +{{- $_ := set $annotations "service.beta.kubernetes.io/azure-load-balancer-internal" "true" -}} +{{- end -}} +{{- if .Values.routingService.azure.resourceGroup -}} +{{- $_ := set $annotations "service.beta.kubernetes.io/azure-load-balancer-resource-group" .Values.routingService.azure.resourceGroup -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- /* Merge provider annotations with custom annotations (custom takes precedence) */ -}} +{{- if and .Values.routingService .Values.routingService.annotations -}} +{{- range $key, $value := .Values.routingService.annotations -}} +{{- $_ := set $annotations $key $value -}} +{{- end -}} +{{- end -}} +{{- if $annotations -}} +{{- toYaml $annotations -}} +{{- end -}} +{{- end -}} diff --git a/install/charts/dir/apiserver/templates/clusterspiffefederation.yaml b/install/charts/dir/apiserver/templates/clusterspiffefederation.yaml index 0df27ae3a..6e3fcdd9c 100644 --- a/install/charts/dir/apiserver/templates/clusterspiffefederation.yaml +++ b/install/charts/dir/apiserver/templates/clusterspiffefederation.yaml @@ -1,11 +1,11 @@ -{{- if eq .Values.spire.enabled true }} -{{- range .Values.spire.federation }} ---- -apiVersion: spire.spiffe.io/v1alpha1 -kind: ClusterFederatedTrustDomain -metadata: - name: {{ include "chart.fullname" $ }}-{{ .trustDomain | replace "." "-" }} -spec: - {{ . | toYaml | nindent 2 }} -{{- end }} -{{- end }} +{{- if eq .Values.spire.enabled true }} +{{- range .Values.spire.federation }} +--- +apiVersion: spire.spiffe.io/v1alpha1 +kind: ClusterFederatedTrustDomain +metadata: + name: {{ include "chart.fullname" $ }}-{{ .trustDomain | replace "." "-" }} +spec: + {{ . | toYaml | nindent 2 }} +{{- end }} +{{- end }} diff --git a/install/charts/dir/apiserver/templates/clusterspiffeids.yaml b/install/charts/dir/apiserver/templates/clusterspiffeids.yaml index 1a4d661fd..27c3fbd41 100644 --- a/install/charts/dir/apiserver/templates/clusterspiffeids.yaml +++ b/install/charts/dir/apiserver/templates/clusterspiffeids.yaml @@ -1,29 +1,29 @@ -{{- if eq .Values.spire.enabled true }} -apiVersion: spire.spiffe.io/v1alpha1 -kind: ClusterSPIFFEID -metadata: - name: {{ include "chart.fullname" . }} -spec: - className: {{ .Values.spire.className | default (printf "%s-spire" .Release.Namespace) }} - podSelector: - matchExpressions: - - key: app.kubernetes.io/name - operator: In - values: - - {{ include "chart.name" . }} - workloadSelectorTemplates: - - k8s:pod-image:{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }} - - k8s:sa:{{ include "chart.serviceAccountName" . }} - spiffeIDTemplate: {{ "spiffe://{{ .TrustDomain }}/ns/{{ .PodMeta.Namespace }}/sa/{{ .PodSpec.ServiceAccountName }}" }} - autoPopulateDNSNames: true - {{- if .Values.spire.dnsNameTemplates }} - dnsNameTemplates: - {{- toYaml .Values.spire.dnsNameTemplates | nindent 4 }} - {{- end }} - {{- if .Values.spire.federation }} - federatesWith: - {{ range .Values.spire.federation }} - - {{ .trustDomain }} - {{ end }} - {{- end }} -{{- end }} +{{- if eq .Values.spire.enabled true }} +apiVersion: spire.spiffe.io/v1alpha1 +kind: ClusterSPIFFEID +metadata: + name: {{ include "chart.fullname" . }} +spec: + className: {{ .Values.spire.className | default (printf "%s-spire" .Release.Namespace) }} + podSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - {{ include "chart.name" . }} + workloadSelectorTemplates: + - k8s:pod-image:{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }} + - k8s:sa:{{ include "chart.serviceAccountName" . }} + spiffeIDTemplate: {{ "spiffe://{{ .TrustDomain }}/ns/{{ .PodMeta.Namespace }}/sa/{{ .PodSpec.ServiceAccountName }}" }} + autoPopulateDNSNames: true + {{- if .Values.spire.dnsNameTemplates }} + dnsNameTemplates: + {{- toYaml .Values.spire.dnsNameTemplates | nindent 4 }} + {{- end }} + {{- if .Values.spire.federation }} + federatesWith: + {{ range .Values.spire.federation }} + - {{ .trustDomain }} + {{ end }} + {{- end }} +{{- end }} diff --git a/install/charts/dir/apiserver/templates/configmap.yaml b/install/charts/dir/apiserver/templates/configmap.yaml index 286ddf1ee..d8205d7a6 100644 --- a/install/charts/dir/apiserver/templates/configmap.yaml +++ b/install/charts/dir/apiserver/templates/configmap.yaml @@ -1,17 +1,17 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "chart.fullname" . }} - labels: - {{- include "chart.labels" . | nindent 4 }} -data: - server.config.yml: | - {{- $config := deepCopy .Values.config }} - {{- if .Values.metrics }} - {{- $metricsConfig := dict "enabled" .Values.metrics.enabled "address" (printf ":%d" (.Values.metrics.port | int)) }} - {{- $_ := set $config "metrics" $metricsConfig }} - {{- end }} - {{- $config | toYaml | nindent 4 }} +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "chart.fullname" . }} + labels: + {{- include "chart.labels" . | nindent 4 }} +data: + server.config.yml: | + {{- $config := deepCopy .Values.config }} + {{- if .Values.metrics }} + {{- $metricsConfig := dict "enabled" .Values.metrics.enabled "address" (printf ":%d" (.Values.metrics.port | int)) }} + {{- $_ := set $config "metrics" $metricsConfig }} + {{- end }} + {{- $config | toYaml | nindent 4 }} diff --git a/install/charts/dir/apiserver/templates/database-pvc.yaml b/install/charts/dir/apiserver/templates/database-pvc.yaml index 87d962f2b..1229595ba 100644 --- a/install/charts/dir/apiserver/templates/database-pvc.yaml +++ b/install/charts/dir/apiserver/templates/database-pvc.yaml @@ -1,24 +1,24 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -{{- if .Values.database.pvc.enabled }} -{{- if .Values.database.pvc.create }} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: {{ include "chart.fullname" . }}-database-pvc - labels: - {{- include "chart.labels" . | nindent 4 }} - app.kubernetes.io/component: database -spec: - accessModes: - - {{ .Values.database.pvc.accessMode }} - {{- if .Values.database.pvc.storageClassName }} - storageClassName: {{ .Values.database.pvc.storageClassName }} - {{- end }} - resources: - requests: - storage: {{ .Values.database.pvc.size }} -{{- end }} -{{- end }} - +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +{{- if .Values.database.pvc.enabled }} +{{- if .Values.database.pvc.create }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "chart.fullname" . }}-database-pvc + labels: + {{- include "chart.labels" . | nindent 4 }} + app.kubernetes.io/component: database +spec: + accessModes: + - {{ .Values.database.pvc.accessMode }} + {{- if .Values.database.pvc.storageClassName }} + storageClassName: {{ .Values.database.pvc.storageClassName }} + {{- end }} + resources: + requests: + storage: {{ .Values.database.pvc.size }} +{{- end }} +{{- end }} + diff --git a/install/charts/dir/apiserver/templates/deployment.yaml b/install/charts/dir/apiserver/templates/deployment.yaml index 4c41bb15b..523f1de2b 100644 --- a/install/charts/dir/apiserver/templates/deployment.yaml +++ b/install/charts/dir/apiserver/templates/deployment.yaml @@ -1,293 +1,293 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "chart.fullname" . }} - labels: - {{- include "chart.labels" . | nindent 4 }} -spec: - {{- if not .Values.autoscaling.enabled }} - replicas: {{ .Values.autoscaling.replicaCount }} - {{- end }} - revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} - {{- if .Values.strategy }} - strategy: - {{- toYaml .Values.strategy | nindent 4 }} - {{- end }} - selector: - matchLabels: - {{- include "chart.selectorLabels" . | nindent 6 }} - template: - metadata: - {{- with .Values.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "chart.selectorLabels" . | nindent 8 }} - spec: - {{- with .Values.image.pullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "chart.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - {{- if eq .Values.zot.mountConfig false }} - initContainers: - - name: init-zot-config - image: busybox:1.36 - command: - - sh - - -c - - | - echo "Initializing ZOT config..." - if [ ! -f /etc/zot/config.json ]; then - echo "Creating ZOT config from template..." - cat > /etc/zot/config.json << 'EOF' - {{- tpl (index .Values.zot.configFiles "config.json") . | nindent 14 }} - EOF - chmod 666 /etc/zot/config.json - echo "Config initialized successfully" - else - echo "Config exists, ensuring it's writable..." - chmod 666 /etc/zot/config.json - echo "Config permissions updated" - fi - echo "Setting directory permissions..." - chmod 777 /etc/zot - echo "Initialization complete" - volumeMounts: - - name: zot-config-storage - mountPath: /etc/zot - {{- end }} - containers: - - name: {{ .Chart.Name }} - env: - - name: DIRECTORY_LOGGER_LOG_LEVEL - value: {{ .Values.log_level }} - - name: DIRECTORY_LOGGER_LOG_FORMAT - value: {{ .Values.log_format }} - - name: DIRECTORY_SERVER_LOGGING_VERBOSE - value: "{{ .Values.grpc_logging_verbose }}" - {{- if or .Values.secrets.syncAuth.username .Values.secrets.syncAuth.password (and .Values.externalSecrets.enabled .Values.externalSecrets.syncAuth.enabled) }} - - name: DIRECTORY_SERVER_SYNC_AUTH_CONFIG_USERNAME - valueFrom: - secretKeyRef: - name: {{ include "chart.fullname" . }} - key: sync-username - - name: DIRECTORY_SERVER_SYNC_AUTH_CONFIG_PASSWORD - valueFrom: - secretKeyRef: - name: {{ include "chart.fullname" . }} - key: sync-password - {{- end }} - {{- if or .Values.secrets.ociAuth.username .Values.secrets.ociAuth.password (and .Values.externalSecrets.enabled .Values.externalSecrets.ociAuth.enabled) }} - - name: DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_USERNAME - valueFrom: - secretKeyRef: - name: {{ include "chart.fullname" . }} - key: oci-username - - name: DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_PASSWORD - valueFrom: - secretKeyRef: - name: {{ include "chart.fullname" . }} - key: oci-password - {{- end }} - {{- if .Values.database.sqlite.dbPath }} - - name: DIRECTORY_SERVER_DATABASE_SQLITE_DB_PATH - value: {{ .Values.database.sqlite.dbPath }} - {{- end }} - {{- if eq .Values.spire.enabled true }} - - name: DIRECTORY_SERVER_AUTHZ_ENABLED - value: "true" - - name: DIRECTORY_SERVER_AUTHZ_SOCKET_PATH - value: "unix:/run/spire/agent-sockets/api.sock" - - name: DIRECTORY_SERVER_AUTHZ_TRUST_DOMAIN - value: {{ .Values.spire.trustDomain }} - {{- end }} - {{- if and .Values.config.oasf_api_validation .Values.config.oasf_api_validation.schema_url }} - - name: DIRECTORY_SERVER_OASF_API_VALIDATION_SCHEMA_URL - value: {{ .Values.config.oasf_api_validation.schema_url | quote }} - {{- end }} - {{- if and .Values.config.oasf_api_validation .Values.config.oasf_api_validation.disable }} - - name: DIRECTORY_SERVER_OASF_API_VALIDATION_DISABLE - value: {{ .Values.config.oasf_api_validation.disable | quote }} - {{- end }} - {{- if and .Values.config.oasf_api_validation (ne .Values.config.oasf_api_validation.strict_mode nil) }} - - name: DIRECTORY_SERVER_OASF_API_VALIDATION_STRICT_MODE - value: {{ .Values.config.oasf_api_validation.strict_mode | quote }} - {{- end }} - {{- with .Values.extraEnv }} - {{- toYaml . | nindent 12 }} - {{- end }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - name: grpc - containerPort: {{ .Values.config.listen_address | default "0.0.0.0:8888" | splitList ":" | last | int }} - protocol: TCP - - name: routing - {{- if .Values.config.routing.listen_address }} - containerPort: {{ (split "/" .Values.config.routing.listen_address)._4 }} - {{- else }} - containerPort: 8999 - {{- end }} - protocol: TCP - {{- if .Values.metrics.enabled }} - - name: metrics - containerPort: {{ .Values.metrics.port }} - protocol: TCP - {{- end }} - {{- if or (eq .Values.config.authn.enabled true) (eq .Values.spire.enabled true) }} - livenessProbe: - tcpSocket: - port: {{ .Values.config.listen_address | default "0.0.0.0:8888" | splitList ":" | last | int }} - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 3 - readinessProbe: - tcpSocket: - port: {{ .Values.config.listen_address | default "0.0.0.0:8888" | splitList ":" | last | int }} - initialDelaySeconds: 10 - periodSeconds: 5 - timeoutSeconds: 3 - successThreshold: 1 - failureThreshold: 6 - {{- else }} - livenessProbe: - grpc: - port: {{ .Values.config.listen_address | default "0.0.0.0:8888" | splitList ":" | last | int }} - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 3 - readinessProbe: - grpc: - port: {{ .Values.config.listen_address | default "0.0.0.0:8888" | splitList ":" | last | int }} - initialDelaySeconds: 10 - periodSeconds: 5 - timeoutSeconds: 3 - successThreshold: 1 - failureThreshold: 6 - {{- end }} - resources: - {{- toYaml .Values.resources | nindent 12 }} - volumeMounts: - - name: config-volume - mountPath: /etc/agntcy/dir/server.config.yml - subPath: server.config.yml - {{- if or .Values.secrets.privKey (and .Values.externalSecrets.enabled .Values.externalSecrets.nodeIdentity.enabled) }} - - name: secret-volume - mountPath: {{ .Values.config.routing.key_path }} - subPath: node.privkey - readOnly: true - {{- end }} - {{- if .Values.coverageVolume }} - - name: coverage-volume - mountPath: /tmp/coverage - {{- end }} - {{- if eq .Values.spire.enabled true }} - - name: spire-agent-socket - mountPath: /run/spire/agent-sockets - readOnly: true - {{- end }} - {{- if .Values.securityContext.readOnlyRootFilesystem }} - - name: home-dir - mountPath: /home/nonroot - {{- end }} - {{- if .Values.pvc.create }} - - name: routing-data - mountPath: {{ .Values.config.routing.datastore_dir }} - {{- end }} - {{- if .Values.database.pvc.enabled }} - - name: database-data - mountPath: /var/lib/dir/database - {{- end }} - {{- if .Values.securityContext.readOnlyRootFilesystem }} - - name: tmp-data - mountPath: /tmp - {{- end }} - {{- if eq .Values.zot.mountConfig false }} - - name: zot-config-storage - mountPath: /etc/zot - {{- end }} - {{- with .Values.extraVolumeMounts }} - {{- toYaml . | nindent 12 }} - {{- end }} - volumes: - - name: config-volume - configMap: - name: {{ include "chart.fullname" . }} - {{- if .Values.coverageVolume }} - - name: coverage-volume - emptyDir: {} - {{- end }} - {{- if .Values.securityContext.readOnlyRootFilesystem }} - - name: tmp-data - emptyDir: {} - {{- end }} - {{- if eq .Values.spire.enabled true }} - - name: spire-agent-socket - {{- if .Values.spire.useCSIDriver }} - # SPIFFE CSI driver for proper workload attestation - # Ensures synchronous workload registration before pod starts - csi: - driver: "csi.spiffe.io" - readOnly: true - {{- else }} - # Legacy hostPath mount (for debugging/non-production use only) - # May cause authentication issues during pod startup - hostPath: - path: /run/spire/agent-sockets - type: Directory - {{- end }} - {{- end }} - {{- if .Values.securityContext.readOnlyRootFilesystem }} - # Writable home directory for config files when root filesystem is read-only - - name: home-dir - emptyDir: {} - {{- end }} - {{- if or .Values.secrets.privKey (and .Values.externalSecrets.enabled .Values.externalSecrets.nodeIdentity.enabled) }} - - name: secret-volume - secret: - secretName: {{ include "chart.fullname" . }} - items: - - key: node.privkey - path: node.privkey - {{- end }} - {{- if .Values.pvc.create }} - - name: routing-data - persistentVolumeClaim: - claimName: {{ include "chart.fullname" . }}-pvc - {{- end }} - {{- if .Values.database.pvc.enabled }} - - name: database-data - persistentVolumeClaim: - claimName: {{ include "chart.fullname" . }}-database-pvc - {{- end }} - {{- if eq .Values.zot.mountConfig false }} - - name: zot-config-storage - persistentVolumeClaim: - claimName: {{ .Release.Name }}-zot-config - {{- end }} - {{- with .Values.extraVolumes }} - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "chart.fullname" . }} + labels: + {{- include "chart.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.autoscaling.replicaCount }} + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.strategy }} + strategy: + {{- toYaml .Values.strategy | nindent 4 }} + {{- end }} + selector: + matchLabels: + {{- include "chart.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "chart.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "chart.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- if eq .Values.zot.mountConfig false }} + initContainers: + - name: init-zot-config + image: busybox:1.36 + command: + - sh + - -c + - | + echo "Initializing ZOT config..." + if [ ! -f /etc/zot/config.json ]; then + echo "Creating ZOT config from template..." + cat > /etc/zot/config.json << 'EOF' + {{- tpl (index .Values.zot.configFiles "config.json") . | nindent 14 }} + EOF + chmod 666 /etc/zot/config.json + echo "Config initialized successfully" + else + echo "Config exists, ensuring it's writable..." + chmod 666 /etc/zot/config.json + echo "Config permissions updated" + fi + echo "Setting directory permissions..." + chmod 777 /etc/zot + echo "Initialization complete" + volumeMounts: + - name: zot-config-storage + mountPath: /etc/zot + {{- end }} + containers: + - name: {{ .Chart.Name }} + env: + - name: DIRECTORY_LOGGER_LOG_LEVEL + value: {{ .Values.log_level }} + - name: DIRECTORY_LOGGER_LOG_FORMAT + value: {{ .Values.log_format }} + - name: DIRECTORY_SERVER_LOGGING_VERBOSE + value: "{{ .Values.grpc_logging_verbose }}" + {{- if or .Values.secrets.syncAuth.username .Values.secrets.syncAuth.password (and .Values.externalSecrets.enabled .Values.externalSecrets.syncAuth.enabled) }} + - name: DIRECTORY_SERVER_SYNC_AUTH_CONFIG_USERNAME + valueFrom: + secretKeyRef: + name: {{ include "chart.fullname" . }} + key: sync-username + - name: DIRECTORY_SERVER_SYNC_AUTH_CONFIG_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "chart.fullname" . }} + key: sync-password + {{- end }} + {{- if or .Values.secrets.ociAuth.username .Values.secrets.ociAuth.password (and .Values.externalSecrets.enabled .Values.externalSecrets.ociAuth.enabled) }} + - name: DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_USERNAME + valueFrom: + secretKeyRef: + name: {{ include "chart.fullname" . }} + key: oci-username + - name: DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "chart.fullname" . }} + key: oci-password + {{- end }} + {{- if .Values.database.sqlite.dbPath }} + - name: DIRECTORY_SERVER_DATABASE_SQLITE_DB_PATH + value: {{ .Values.database.sqlite.dbPath }} + {{- end }} + {{- if eq .Values.spire.enabled true }} + - name: DIRECTORY_SERVER_AUTHZ_ENABLED + value: "true" + - name: DIRECTORY_SERVER_AUTHZ_SOCKET_PATH + value: "unix:/run/spire/agent-sockets/api.sock" + - name: DIRECTORY_SERVER_AUTHZ_TRUST_DOMAIN + value: {{ .Values.spire.trustDomain }} + {{- end }} + {{- if and .Values.config.oasf_api_validation .Values.config.oasf_api_validation.schema_url }} + - name: DIRECTORY_SERVER_OASF_API_VALIDATION_SCHEMA_URL + value: {{ .Values.config.oasf_api_validation.schema_url | quote }} + {{- end }} + {{- if and .Values.config.oasf_api_validation .Values.config.oasf_api_validation.disable }} + - name: DIRECTORY_SERVER_OASF_API_VALIDATION_DISABLE + value: {{ .Values.config.oasf_api_validation.disable | quote }} + {{- end }} + {{- if and .Values.config.oasf_api_validation (ne .Values.config.oasf_api_validation.strict_mode nil) }} + - name: DIRECTORY_SERVER_OASF_API_VALIDATION_STRICT_MODE + value: {{ .Values.config.oasf_api_validation.strict_mode | quote }} + {{- end }} + {{- with .Values.extraEnv }} + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: grpc + containerPort: {{ .Values.config.listen_address | default "0.0.0.0:8888" | splitList ":" | last | int }} + protocol: TCP + - name: routing + {{- if .Values.config.routing.listen_address }} + containerPort: {{ (split "/" .Values.config.routing.listen_address)._4 }} + {{- else }} + containerPort: 8999 + {{- end }} + protocol: TCP + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.metrics.port }} + protocol: TCP + {{- end }} + {{- if or (eq .Values.config.authn.enabled true) (eq .Values.spire.enabled true) }} + livenessProbe: + tcpSocket: + port: {{ .Values.config.listen_address | default "0.0.0.0:8888" | splitList ":" | last | int }} + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + readinessProbe: + tcpSocket: + port: {{ .Values.config.listen_address | default "0.0.0.0:8888" | splitList ":" | last | int }} + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + successThreshold: 1 + failureThreshold: 6 + {{- else }} + livenessProbe: + grpc: + port: {{ .Values.config.listen_address | default "0.0.0.0:8888" | splitList ":" | last | int }} + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + readinessProbe: + grpc: + port: {{ .Values.config.listen_address | default "0.0.0.0:8888" | splitList ":" | last | int }} + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + successThreshold: 1 + failureThreshold: 6 + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/agntcy/dir/server.config.yml + subPath: server.config.yml + {{- if or .Values.secrets.privKey (and .Values.externalSecrets.enabled .Values.externalSecrets.nodeIdentity.enabled) }} + - name: secret-volume + mountPath: {{ .Values.config.routing.key_path }} + subPath: node.privkey + readOnly: true + {{- end }} + {{- if .Values.coverageVolume }} + - name: coverage-volume + mountPath: /tmp/coverage + {{- end }} + {{- if eq .Values.spire.enabled true }} + - name: spire-agent-socket + mountPath: /run/spire/agent-sockets + readOnly: true + {{- end }} + {{- if .Values.securityContext.readOnlyRootFilesystem }} + - name: home-dir + mountPath: /home/nonroot + {{- end }} + {{- if .Values.pvc.create }} + - name: routing-data + mountPath: {{ .Values.config.routing.datastore_dir }} + {{- end }} + {{- if .Values.database.pvc.enabled }} + - name: database-data + mountPath: /var/lib/dir/database + {{- end }} + {{- if .Values.securityContext.readOnlyRootFilesystem }} + - name: tmp-data + mountPath: /tmp + {{- end }} + {{- if eq .Values.zot.mountConfig false }} + - name: zot-config-storage + mountPath: /etc/zot + {{- end }} + {{- with .Values.extraVolumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + volumes: + - name: config-volume + configMap: + name: {{ include "chart.fullname" . }} + {{- if .Values.coverageVolume }} + - name: coverage-volume + emptyDir: {} + {{- end }} + {{- if .Values.securityContext.readOnlyRootFilesystem }} + - name: tmp-data + emptyDir: {} + {{- end }} + {{- if eq .Values.spire.enabled true }} + - name: spire-agent-socket + {{- if .Values.spire.useCSIDriver }} + # SPIFFE CSI driver for proper workload attestation + # Ensures synchronous workload registration before pod starts + csi: + driver: "csi.spiffe.io" + readOnly: true + {{- else }} + # Legacy hostPath mount (for debugging/non-production use only) + # May cause authentication issues during pod startup + hostPath: + path: /run/spire/agent-sockets + type: Directory + {{- end }} + {{- end }} + {{- if .Values.securityContext.readOnlyRootFilesystem }} + # Writable home directory for config files when root filesystem is read-only + - name: home-dir + emptyDir: {} + {{- end }} + {{- if or .Values.secrets.privKey (and .Values.externalSecrets.enabled .Values.externalSecrets.nodeIdentity.enabled) }} + - name: secret-volume + secret: + secretName: {{ include "chart.fullname" . }} + items: + - key: node.privkey + path: node.privkey + {{- end }} + {{- if .Values.pvc.create }} + - name: routing-data + persistentVolumeClaim: + claimName: {{ include "chart.fullname" . }}-pvc + {{- end }} + {{- if .Values.database.pvc.enabled }} + - name: database-data + persistentVolumeClaim: + claimName: {{ include "chart.fullname" . }}-database-pvc + {{- end }} + {{- if eq .Values.zot.mountConfig false }} + - name: zot-config-storage + persistentVolumeClaim: + claimName: {{ .Release.Name }}-zot-config + {{- end }} + {{- with .Values.extraVolumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/install/charts/dir/apiserver/templates/external-secret.yaml b/install/charts/dir/apiserver/templates/external-secret.yaml index e5967353d..27ad9b8ed 100644 --- a/install/charts/dir/apiserver/templates/external-secret.yaml +++ b/install/charts/dir/apiserver/templates/external-secret.yaml @@ -1,91 +1,91 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -{{- if .Values.externalSecrets.enabled }} ---- -# ExternalSecret for DIR API server credentials -# -# This syncs all sensitive credentials from HashiCorp Vault to a Kubernetes Secret. -# The External Secrets Operator (ESO) manages the sync automatically. -# -# What it creates: -# - Kubernetes Secret: {{ include "chart.fullname" . }} -# - Contains all credentials: node identity, OCI auth, sync auth -# -# Prerequisites: -# 1. Credentials stored in Vault at configured path -# 2. ClusterSecretStore configured (referenced below) -# 3. ESO has permissions to read from Vault path -# -# Lifecycle: -# - ESO syncs from Vault every refresh interval (default: 1h) -# - Secret is automatically updated if Vault values change -# - Secret is automatically recreated if deleted -apiVersion: external-secrets.io/v1beta1 -kind: ExternalSecret -metadata: - name: {{ include "chart.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: - {{- include "chart.labels" . | nindent 4 }} - app.kubernetes.io/component: secrets -spec: - # Refresh interval - how often to sync from Vault - refreshInterval: {{ .Values.externalSecrets.refreshInterval | default "1h" }} - - # Reference to the ClusterSecretStore - secretStoreRef: - name: {{ .Values.externalSecrets.secretStore }} - kind: {{ .Values.externalSecrets.secretStoreKind | default "ClusterSecretStore" }} - - # Target Kubernetes Secret to create/update - target: - name: {{ include "chart.fullname" . }} - creationPolicy: Owner # ESO owns this secret (will recreate if deleted) - deletionPolicy: Retain # Keep secret even if ExternalSecret is deleted - - # Data to sync from Vault - data: - {{- if .Values.externalSecrets.nodeIdentity.enabled }} - # Node identity private key for stable P2P peer ID - - secretKey: node.privkey - remoteRef: - key: {{ .Values.externalSecrets.vaultPath }} - property: {{ .Values.externalSecrets.nodeIdentity.property | default "node.privkey" }} - conversionStrategy: Default - decodingStrategy: None - {{- end }} - - {{- if .Values.externalSecrets.ociAuth.enabled }} - # OCI registry authentication (for storing/retrieving agent records) - - secretKey: oci-username - remoteRef: - key: {{ .Values.externalSecrets.vaultPath }} - property: {{ .Values.externalSecrets.ociAuth.usernameProperty | default "oci-username" }} - conversionStrategy: Default - decodingStrategy: None - - secretKey: oci-password - remoteRef: - key: {{ .Values.externalSecrets.vaultPath }} - property: {{ .Values.externalSecrets.ociAuth.passwordProperty | default "oci-password" }} - conversionStrategy: Default - decodingStrategy: None - {{- end }} - - {{- if .Values.externalSecrets.syncAuth.enabled }} - # Sync authentication (shared with remote nodes for P2P sync) - - secretKey: sync-username - remoteRef: - key: {{ .Values.externalSecrets.vaultPath }} - property: {{ .Values.externalSecrets.syncAuth.usernameProperty | default "sync-username" }} - conversionStrategy: Default - decodingStrategy: None - - secretKey: sync-password - remoteRef: - key: {{ .Values.externalSecrets.vaultPath }} - property: {{ .Values.externalSecrets.syncAuth.passwordProperty | default "sync-password" }} - conversionStrategy: Default - decodingStrategy: None - {{- end }} -{{- end }} - +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +{{- if .Values.externalSecrets.enabled }} +--- +# ExternalSecret for DIR API server credentials +# +# This syncs all sensitive credentials from HashiCorp Vault to a Kubernetes Secret. +# The External Secrets Operator (ESO) manages the sync automatically. +# +# What it creates: +# - Kubernetes Secret: {{ include "chart.fullname" . }} +# - Contains all credentials: node identity, OCI auth, sync auth +# +# Prerequisites: +# 1. Credentials stored in Vault at configured path +# 2. ClusterSecretStore configured (referenced below) +# 3. ESO has permissions to read from Vault path +# +# Lifecycle: +# - ESO syncs from Vault every refresh interval (default: 1h) +# - Secret is automatically updated if Vault values change +# - Secret is automatically recreated if deleted +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: {{ include "chart.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "chart.labels" . | nindent 4 }} + app.kubernetes.io/component: secrets +spec: + # Refresh interval - how often to sync from Vault + refreshInterval: {{ .Values.externalSecrets.refreshInterval | default "1h" }} + + # Reference to the ClusterSecretStore + secretStoreRef: + name: {{ .Values.externalSecrets.secretStore }} + kind: {{ .Values.externalSecrets.secretStoreKind | default "ClusterSecretStore" }} + + # Target Kubernetes Secret to create/update + target: + name: {{ include "chart.fullname" . }} + creationPolicy: Owner # ESO owns this secret (will recreate if deleted) + deletionPolicy: Retain # Keep secret even if ExternalSecret is deleted + + # Data to sync from Vault + data: + {{- if .Values.externalSecrets.nodeIdentity.enabled }} + # Node identity private key for stable P2P peer ID + - secretKey: node.privkey + remoteRef: + key: {{ .Values.externalSecrets.vaultPath }} + property: {{ .Values.externalSecrets.nodeIdentity.property | default "node.privkey" }} + conversionStrategy: Default + decodingStrategy: None + {{- end }} + + {{- if .Values.externalSecrets.ociAuth.enabled }} + # OCI registry authentication (for storing/retrieving agent records) + - secretKey: oci-username + remoteRef: + key: {{ .Values.externalSecrets.vaultPath }} + property: {{ .Values.externalSecrets.ociAuth.usernameProperty | default "oci-username" }} + conversionStrategy: Default + decodingStrategy: None + - secretKey: oci-password + remoteRef: + key: {{ .Values.externalSecrets.vaultPath }} + property: {{ .Values.externalSecrets.ociAuth.passwordProperty | default "oci-password" }} + conversionStrategy: Default + decodingStrategy: None + {{- end }} + + {{- if .Values.externalSecrets.syncAuth.enabled }} + # Sync authentication (shared with remote nodes for P2P sync) + - secretKey: sync-username + remoteRef: + key: {{ .Values.externalSecrets.vaultPath }} + property: {{ .Values.externalSecrets.syncAuth.usernameProperty | default "sync-username" }} + conversionStrategy: Default + decodingStrategy: None + - secretKey: sync-password + remoteRef: + key: {{ .Values.externalSecrets.vaultPath }} + property: {{ .Values.externalSecrets.syncAuth.passwordProperty | default "sync-password" }} + conversionStrategy: Default + decodingStrategy: None + {{- end }} +{{- end }} + diff --git a/install/charts/dir/apiserver/templates/hpa.yaml b/install/charts/dir/apiserver/templates/hpa.yaml index e06711397..90b7ae9f5 100644 --- a/install/charts/dir/apiserver/templates/hpa.yaml +++ b/install/charts/dir/apiserver/templates/hpa.yaml @@ -1,35 +1,35 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -{{- if .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "chart.fullname" . }} - labels: - {{- include "chart.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "chart.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "chart.fullname" . }} + labels: + {{- include "chart.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "chart.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/install/charts/dir/apiserver/templates/ingress.yaml b/install/charts/dir/apiserver/templates/ingress.yaml index 9726a7329..200bcdb75 100644 --- a/install/charts/dir/apiserver/templates/ingress.yaml +++ b/install/charts/dir/apiserver/templates/ingress.yaml @@ -1,34 +1,34 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 ---- - -{{- if .Values.ingress.enabled -}} -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: {{ include "chart.fullname" . }} - labels: - {{- include "chart.labels" . | nindent 4 }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if .Values.ingress.className }} - ingressClassName: {{ .Values.ingress.className }} - {{- end }} - {{- if .Values.ingress.tls }} - tls: - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} - {{- end }} - {{- if .Values.ingress.hosts }} - rules: - {{- toYaml .Values.ingress.hosts | nindent 4 }} - {{- end }} -{{- end }} +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 +--- + +{{- if .Values.ingress.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "chart.fullname" . }} + labels: + {{- include "chart.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingress.className }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + {{- if .Values.ingress.hosts }} + rules: + {{- toYaml .Values.ingress.hosts | nindent 4 }} + {{- end }} +{{- end }} diff --git a/install/charts/dir/apiserver/templates/routing-pvc.yaml b/install/charts/dir/apiserver/templates/routing-pvc.yaml index 2fc81d3c2..167d26105 100644 --- a/install/charts/dir/apiserver/templates/routing-pvc.yaml +++ b/install/charts/dir/apiserver/templates/routing-pvc.yaml @@ -1,16 +1,16 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -{{- if .Values.pvc.create }} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: {{ include "chart.fullname" . }}-pvc -spec: - accessModes: - - ReadWriteOnce - storageClassName: {{ .Values.pvc.storageClassName }} - resources: - requests: - storage: {{ .Values.pvc.size }} +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +{{- if .Values.pvc.create }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "chart.fullname" . }}-pvc +spec: + accessModes: + - ReadWriteOnce + storageClassName: {{ .Values.pvc.storageClassName }} + resources: + requests: + storage: {{ .Values.pvc.size }} {{- end }} \ No newline at end of file diff --git a/install/charts/dir/apiserver/templates/routing_service.yaml b/install/charts/dir/apiserver/templates/routing_service.yaml index 74b8c6edf..83f5c7a48 100644 --- a/install/charts/dir/apiserver/templates/routing_service.yaml +++ b/install/charts/dir/apiserver/templates/routing_service.yaml @@ -1,33 +1,33 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -{{- if .Values.config.routing.listen_address }} -apiVersion: v1 -kind: Service -metadata: - name: {{ include "chart.fullname" . }}-routing - labels: - {{- include "chart.labels" . | nindent 4 }} -spec: - {{- if and .Values.routingService .Values.routingService.type }} - type: {{ .Values.routingService.type }} - {{- else }} - type: {{ .Values.service.type }} - {{- end }} - {{- if and .Values.routingService .Values.routingService.loadBalancerIP }} - loadBalancerIP: {{ .Values.routingService.loadBalancerIP }} - {{- end }} - {{- if and .Values.routingService .Values.routingService.externalTrafficPolicy }} - externalTrafficPolicy: {{ .Values.routingService.externalTrafficPolicy }} - {{- end }} - ports: - - port: {{ (split "/" .Values.config.routing.listen_address)._4 }} - targetPort: routing - protocol: TCP - name: routing - {{- if and .Values.routingService .Values.routingService.nodePort }} - nodePort: {{ .Values.routingService.nodePort }} - {{- end }} - selector: - {{- include "chart.selectorLabels" . | nindent 4 }} -{{- end }} +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +{{- if .Values.config.routing.listen_address }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "chart.fullname" . }}-routing + labels: + {{- include "chart.labels" . | nindent 4 }} +spec: + {{- if and .Values.routingService .Values.routingService.type }} + type: {{ .Values.routingService.type }} + {{- else }} + type: {{ .Values.service.type }} + {{- end }} + {{- if and .Values.routingService .Values.routingService.loadBalancerIP }} + loadBalancerIP: {{ .Values.routingService.loadBalancerIP }} + {{- end }} + {{- if and .Values.routingService .Values.routingService.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.routingService.externalTrafficPolicy }} + {{- end }} + ports: + - port: {{ (split "/" .Values.config.routing.listen_address)._4 }} + targetPort: routing + protocol: TCP + name: routing + {{- if and .Values.routingService .Values.routingService.nodePort }} + nodePort: {{ .Values.routingService.nodePort }} + {{- end }} + selector: + {{- include "chart.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/install/charts/dir/apiserver/templates/secret.yaml b/install/charts/dir/apiserver/templates/secret.yaml index f0ff8e1ba..90d4c7c8a 100644 --- a/install/charts/dir/apiserver/templates/secret.yaml +++ b/install/charts/dir/apiserver/templates/secret.yaml @@ -1,31 +1,31 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -# This secret is only created when NOT using ExternalSecrets. -# When externalSecrets.enabled=true, the ExternalSecret operator creates this secret instead. -{{- if and .Values.secrets (not .Values.externalSecrets.enabled) -}} ---- -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "chart.fullname" . }} - labels: - {{- include "chart.labels" . | nindent 4 }} - app.kubernetes.io/component: secrets -data: - {{- if .Values.secrets.privKey }} - node.privkey: {{ .Values.secrets.privKey | b64enc }} - {{- end }} - {{- if or .Values.secrets.syncAuth.username .Values.secrets.syncAuth.password }} - {{- $syncUsername := .Values.secrets.syncAuth.username | default "sync" }} - {{- $syncPassword := .Values.secrets.syncAuth.password | default (randAlphaNum 32) }} - sync-username: {{ $syncUsername | b64enc }} - sync-password: {{ $syncPassword | b64enc }} - {{- end }} - {{- if or .Values.secrets.ociAuth.username .Values.secrets.ociAuth.password }} - {{- $ociUsername := .Values.secrets.ociAuth.username | default "admin" }} - {{- $ociPassword := .Values.secrets.ociAuth.password | default (randAlphaNum 32) }} - oci-username: {{ $ociUsername | b64enc }} - oci-password: {{ $ociPassword | b64enc }} - {{- end }} +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +# This secret is only created when NOT using ExternalSecrets. +# When externalSecrets.enabled=true, the ExternalSecret operator creates this secret instead. +{{- if and .Values.secrets (not .Values.externalSecrets.enabled) -}} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "chart.fullname" . }} + labels: + {{- include "chart.labels" . | nindent 4 }} + app.kubernetes.io/component: secrets +data: + {{- if .Values.secrets.privKey }} + node.privkey: {{ .Values.secrets.privKey | b64enc }} + {{- end }} + {{- if or .Values.secrets.syncAuth.username .Values.secrets.syncAuth.password }} + {{- $syncUsername := .Values.secrets.syncAuth.username | default "sync" }} + {{- $syncPassword := .Values.secrets.syncAuth.password | default (randAlphaNum 32) }} + sync-username: {{ $syncUsername | b64enc }} + sync-password: {{ $syncPassword | b64enc }} + {{- end }} + {{- if or .Values.secrets.ociAuth.username .Values.secrets.ociAuth.password }} + {{- $ociUsername := .Values.secrets.ociAuth.username | default "admin" }} + {{- $ociPassword := .Values.secrets.ociAuth.password | default (randAlphaNum 32) }} + oci-username: {{ $ociUsername | b64enc }} + oci-password: {{ $ociPassword | b64enc }} + {{- end }} {{- end }} \ No newline at end of file diff --git a/install/charts/dir/apiserver/templates/service.yaml b/install/charts/dir/apiserver/templates/service.yaml index 0e10944d1..44d07a2c8 100644 --- a/install/charts/dir/apiserver/templates/service.yaml +++ b/install/charts/dir/apiserver/templates/service.yaml @@ -1,24 +1,24 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -apiVersion: v1 -kind: Service -metadata: - name: {{ include "chart.fullname" . }} - labels: - {{- include "chart.labels" . | nindent 4 }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: grpc - protocol: TCP - name: grpc - {{- if .Values.metrics.enabled }} - - port: {{ .Values.metrics.port }} - targetPort: metrics - protocol: TCP - name: metrics - {{- end }} - selector: - {{- include "chart.selectorLabels" . | nindent 4 }} +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: v1 +kind: Service +metadata: + name: {{ include "chart.fullname" . }} + labels: + {{- include "chart.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: grpc + protocol: TCP + name: grpc + {{- if .Values.metrics.enabled }} + - port: {{ .Values.metrics.port }} + targetPort: metrics + protocol: TCP + name: metrics + {{- end }} + selector: + {{- include "chart.selectorLabels" . | nindent 4 }} diff --git a/install/charts/dir/apiserver/templates/serviceaccount.yaml b/install/charts/dir/apiserver/templates/serviceaccount.yaml index 26a7fa0a6..f7c29bbb8 100644 --- a/install/charts/dir/apiserver/templates/serviceaccount.yaml +++ b/install/charts/dir/apiserver/templates/serviceaccount.yaml @@ -1,15 +1,15 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "chart.serviceAccountName" . }} - labels: - {{- include "chart.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "chart.serviceAccountName" . }} + labels: + {{- include "chart.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/install/charts/dir/apiserver/templates/servicemonitor.yaml b/install/charts/dir/apiserver/templates/servicemonitor.yaml index edd442e71..c5a3f4bb3 100644 --- a/install/charts/dir/apiserver/templates/servicemonitor.yaml +++ b/install/charts/dir/apiserver/templates/servicemonitor.yaml @@ -1,24 +1,24 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ include "chart.fullname" . }} - labels: - {{- include "chart.labels" . | nindent 4 }} - {{- with .Values.metrics.serviceMonitor.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - selector: - matchLabels: - {{- include "chart.selectorLabels" . | nindent 6 }} - endpoints: - - port: metrics - interval: {{ .Values.metrics.serviceMonitor.interval }} - scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} - path: /metrics -{{- end }} - +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "chart.fullname" . }} + labels: + {{- include "chart.labels" . | nindent 4 }} + {{- with .Values.metrics.serviceMonitor.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "chart.selectorLabels" . | nindent 6 }} + endpoints: + - port: metrics + interval: {{ .Values.metrics.serviceMonitor.interval }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + path: /metrics +{{- end }} + diff --git a/install/charts/dir/apiserver/templates/zot-config-pvc.yaml b/install/charts/dir/apiserver/templates/zot-config-pvc.yaml index a88b4fb09..dcbb60f07 100644 --- a/install/charts/dir/apiserver/templates/zot-config-pvc.yaml +++ b/install/charts/dir/apiserver/templates/zot-config-pvc.yaml @@ -1,22 +1,22 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -# Shared PVC for ZOT config to enable dynamic sync configuration -# This allows the apiserver sync worker to modify ZOT's config at runtime -# Only created when mountConfig is false (writable config needed) -{{- if eq .Values.zot.mountConfig false }} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: {{ .Release.Name }}-zot-config - namespace: {{ .Release.Namespace }} - labels: - {{- include "chart.labels" . | nindent 4 }} -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 100Mi -{{- end }} - +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +# Shared PVC for ZOT config to enable dynamic sync configuration +# This allows the apiserver sync worker to modify ZOT's config at runtime +# Only created when mountConfig is false (writable config needed) +{{- if eq .Values.zot.mountConfig false }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ .Release.Name }}-zot-config + namespace: {{ .Release.Namespace }} + labels: + {{- include "chart.labels" . | nindent 4 }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi +{{- end }} + diff --git a/install/charts/dir/apiserver/values.yaml b/install/charts/dir/apiserver/values.yaml index d986b4557..eaaee55a8 100644 --- a/install/charts/dir/apiserver/values.yaml +++ b/install/charts/dir/apiserver/values.yaml @@ -1,554 +1,554 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -# Default values for helm-chart. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -nameOverride: "" -fullnameOverride: "" - -# Logging configuration -log_level: INFO -log_format: text # Options: "text" (development) or "json" (production) -grpc_logging_verbose: false # Options: false (production - logs start/finish only) or true (verbose - includes payloads) - -# Enable coverage volume (emptyDir persists across container restarts) -coverageVolume: false - -image: - repository: ghcr.io/agntcy/dir-apiserver - tag: latest - pullPolicy: IfNotPresent - pullSecrets: [] - -# Server configuration -config: - # listen_address: "0.0.0.0:8888" - - # Authentication settings (handles identity verification) - # Supports both X.509 (X.509-SVID) and JWT (JWT-SVID) authentication - authn: - # Enable authentication - enabled: false - # Authentication mode: "x509" or "jwt" - # - x509: Uses X.509-SVID from mutual TLS peer certificates - # - jwt: Uses JWT-SVID from Authorization header - mode: "x509" - # SPIFFE Workload API socket path (injected by SPIRE agent) - socket_path: "unix:///run/spire/agent-sockets/api.sock" - # Expected audiences for JWT validation (only used in JWT mode) - audiences: - - "spiffe://example.org/dir-server" - - # Authorization settings (handles access control policies) - # Requires authentication to be enabled first - authz: - # Enable authorization policies - enabled: false - # Trust domain for this Directory server - # Used to distinguish internal (same trust domain) vs external requests - trust_domain: "example.org" - - # Store settings for the storage backend. - store: - # Storage provider to use. - provider: "oci" - - # OCI-backed store - oci: - # Path to a local directory that will be to hold data instead of remote. - # If this is set to non-empty value, only local store will be used. - # local_dir: "" - - # Cache directory to use for metadata. - # cache_dir: "" - - # Registry address to connect to - registry_address: "dir-zot.dir-server.svc.cluster.local:5000" - # All data will be stored under this repo. - # Objects are pushed as tags, manifests, and blobs. - # repository_name: "" - - # Auth credentials to use. - auth_config: - insecure: "true" - access_token: access-token - refresh_token: refresh-token - - # Routing settings for the peer-to-peer network. - routing: - # Address to use for routing - # listen_address: "/ip4/0.0.0.0/tcp/5555" - - # Path to private key file for peer ID. - # key_path: /tmp/agntcy-dir/node.privkey - - # Nodes to use for bootstrapping of the DHT. - # We read initial routing tables here and get introduced - # to the network. - # bootstrap_peers: - # - /ip4/1.1.1.1/tcp/1 - # - /ip4/1.1.1.1/tcp/2 - - # GossipSub configuration for efficient label announcements - # When enabled, labels are propagated via GossipSub mesh to ALL subscribed peers - # When disabled, falls back to DHT+Pull mechanism (higher bandwidth, limited reach) - # Default: true (recommended for production) - gossipsub: - enabled: true - - # Sync configuration - sync: - # How frequently the scheduler checks for pending syncs - scheduler_interval: "30s" - - # Maximum number of sync workers running concurrently - worker_count: 1 - - # Timeout for individual sync operations - worker_timeout: "10m" - - # Registry monitor configuration - registry_monitor: - check_interval: "30s" - - # Authentication configuration for sync operations - auth_config: {} - - # Events configuration - events: - # Channel buffer size per subscriber - # Larger buffers allow subscribers to fall behind temporarily without dropping events - # Default: 100 - subscriber_buffer_size: 100 - - # Enable logging when events are dropped due to slow consumers - # Default: true - log_slow_consumers: true - - # Enable debug logging of all published events (verbose in production) - # Default: false - log_published_events: false - - # Publication configuration - publication: - # How frequently the scheduler checks for pending publications - scheduler_interval: "1h" - - # Maximum number of publication workers running concurrently - worker_count: 1 - - # Timeout for individual publication operations - worker_timeout: "30m" - - # gRPC Connection Management configuration - # Protects server from resource exhaustion, zombie connections, and memory exhaustion - # Production-safe defaults are applied automatically - customization is optional - # Note: These settings can only be configured via Helm values (no environment variables) - connection: - # Connection limits - # max_concurrent_streams: 1000 # Maximum concurrent gRPC streams per connection (default: 1000) - # max_recv_msg_size: 4194304 # Maximum message size for receiving in bytes - 4MB (default: 4MB) - # max_send_msg_size: 4194304 # Maximum message size for sending in bytes - 4MB (default: 4MB) - # connection_timeout: 120s # Timeout for establishing new connections (default: 120s) - - # Keepalive configuration - detects dead connections and prevents resource leaks - # keepalive: - # max_connection_idle: 15m # Close connections idle for this duration (default: 15m) - # max_connection_age: 30m # Close connections after this age to rotate (default: 30m) - # max_connection_age_grace: 5m # Grace period for in-flight RPCs before closing aged connections (default: 5m) - # time: 5m # Send keepalive pings every N duration (default: 5m) - # timeout: 1m # Close connection if ping not acknowledged within timeout (default: 1m) - # min_time: 1m # Minimum time between client pings (prevents abuse) (default: 1m) - # permit_without_stream: true # Allow keepalive pings without active streams (default: true) - - # Example: High-traffic production configuration - # connection: - # max_concurrent_streams: 2000 - # max_recv_msg_size: 8388608 # 8MB for larger records - # max_send_msg_size: 8388608 # 8MB for larger records - # connection_timeout: 60s - # keepalive: - # max_connection_idle: 10m - # max_connection_age: 20m - # max_connection_age_grace: 3m - # time: 3m - # timeout: 30s - # min_time: 30s - # permit_without_stream: false - - # Rate limiting configuration - # Protects the server from abuse and resource exhaustion using token bucket algorithm - ratelimit: - # Enable rate limiting middleware - # Default: false (disabled for development/testing) - enabled: false - - # Global rate limit (applies to all requests regardless of client) - # Set both to 0 to disable global limiting - # global_rps: 0 # Requests per second (float, e.g., 1000.0) - # global_burst: 0 # Burst capacity (int, e.g., 2000) - - # Per-client rate limit (tracked by SPIFFE ID from mTLS) - # Default values shown below are reasonable for production - # Set both to 0 to disable per-client limiting - per_client_rps: 100 # Requests per second per client (float) - per_client_burst: 200 # Burst capacity per client (int) - - # Per-method rate limit overrides (optional) - # Allows fine-grained control over specific gRPC methods - # Note: These can only be configured via Helm values, not environment variables - # method_limits: - # "/agntcy.dir.store.v1.StoreService/CreateRecord": - # rps: 50 # Lower limit for expensive operations - # burst: 100 - # "/agntcy.dir.store.v1.StoreService/PullRecord": - # rps: 200 # Higher limit for read operations - # burst: 400 - -# SPIRE configuration -spire: - enabled: false - trustDomain: example.org - - # SPIRE controller className for ClusterSPIFFEID matching - # - # REQUIRED: The className field is mandatory for ClusterSPIFFEID resources. - # The SPIRE controller manager uses className to match ClusterSPIFFEID resources - # with the appropriate SPIRE installation. Without this field, the controller - # will ignore the ClusterSPIFFEID and no workload registration will occur, - # causing authentication failures and preventing the gRPC server from starting. - # - # The className must match the SPIRE installation's className. - # - # Default: "dir-spire" (matches standard SPIRE installation convention). - # If your SPIRE installation uses a different className, override this value. - # If not specified, falls back to "-spire" (e.g., "my-namespace-spire"). - # - # IMPORTANT: Ensure this matches your SPIRE Controller Manager's className. - # For standard installations, SPIRE is deployed in the "spire" namespace with - # className "dir-spire". Verify with: - # kubectl get deployment -n spire spire-server -o yaml | grep className - # - # See: https://github.com/spiffe/spire-controller-manager/blob/main/docs/clusterspiffeid-crd.md - className: "dir-spire" - - # Use SPIFFE CSI driver for workload attestation (recommended) - # - # When true (recommended): - # - Uses SPIFFE CSI driver for proper workload registration - # - Workload registration happens synchronously before pod starts - # - SPIRE agent issues X.509-SVID with URI SAN before pod begins - # - Eliminates authentication delays and race conditions - # - Production-ready, reliable identity injection - # - # When false (legacy/debugging only): - # - Uses hostPath to mount SPIRE agent socket - # - Workload registration happens asynchronously via ClusterSPIFFEID - # - Pod may retry authentication during startup - # - Only use for debugging or when CSI driver is unavailable - # - # Requires: SPIFFE CSI driver deployed in cluster - # See: https://github.com/spiffe/spiffe-csi - useCSIDriver: true - - # Custom DNS names to add to the X.509-SVID certificate SANs - # Useful for external access with proper TLS verification without --tls-skip-verify - # Example: - # dnsNameTemplates: - # - "dir-api.example.com" - # - "api.example.com" - dnsNameTemplates: [] - - federation: [] - # # Config: https://github.com/spiffe/spire-controller-manager/blob/main/docs/clusterfederatedtrustdomain-crd.md - # - trustDomain: dir-cluster - # bundleEndpointURL: https://0.0.0.0:8081 - # bundleEndpointProfile: - # type: https_web - -# Create PVC for routing/cache data -# IMPORTANT: When PVC is enabled, set strategy.type to "Recreate" to avoid -# BadgerDB lock conflicts during updates (see strategy configuration above) -pvc: - create: false - storageClassName: standard - size: 1G - -# Database configuration -database: - # Database type (currently only sqlite supported) - type: "sqlite" - - # SQLite configuration - sqlite: - # Path to SQLite database file - # Default: /tmp/dir.db (ephemeral - lost on pod restart) - # When using PVC: /var/lib/dir/database/dir.db (persistent) - dbPath: "/tmp/dir.db" - - # PVC for database persistence (optional) - # When enabled, database persists across pod restarts - # Also allows enabling readOnlyRootFilesystem security hardening - # IMPORTANT: When PVC is enabled, set strategy.type to "Recreate" to avoid - # SQLite lock conflicts during updates (see strategy configuration above) - pvc: - enabled: false # Disabled by default (opt-in for backward compatibility) - create: true # Create PVC automatically - storageClassName: "" # Use cluster default storage class - size: 1Gi # Database size (adjust based on expected record count) - accessMode: ReadWriteOnce - -# Service exposes gRPC server api -service: - type: ClusterIP - port: 8888 - -# Prometheus metrics configuration -# This section configures BOTH the application metrics server AND Kubernetes service -# The values are automatically injected into the server configuration -metrics: - # Enable Prometheus metrics collection - # Default: true (recommended for production) - enabled: true - - # Port for Prometheus metrics endpoint - # Used for both the application listen address and Kubernetes service port - # Default: 9090 - port: 9090 - - # ServiceMonitor for Prometheus Operator (optional) - # Creates a ServiceMonitor resource for automatic Prometheus discovery - serviceMonitor: - enabled: false - interval: 30s - scrapeTimeout: 10s - labels: {} - -# Routing service exposes P2P networking (separate from API service) -routingService: - # Service type for routing/P2P traffic - # Options: ClusterIP, NodePort, LoadBalancer - # Default: NodePort (works everywhere - local Kind and cloud) - # For production cloud: override to LoadBalancer for stable external IP - type: NodePort - - # Cloud provider for automatic annotation configuration - # Options: "aws", "gcp", "azure", or leave empty for manual configuration - # When set, provider-specific annotations are automatically applied - cloudProvider: "" - - # AWS-specific configuration (only used when cloudProvider: "aws") - aws: - # Use internal load balancer (default: false = internet-facing) - internal: false - # NLB target type: "instance" or "ip" (default: instance) - # nlbTargetType: "instance" - - # GCP-specific configuration (only used when cloudProvider: "gcp") - gcp: - # Use internal load balancer (default: false = external) - internal: false - # Optional: BackendConfig name for advanced configuration - # backendConfig: "" - - # Azure-specific configuration (only used when cloudProvider: "azure") - azure: - # Use internal load balancer (default: false = public) - internal: false - # Optional: Resource group for load balancer - # resourceGroup: "" - - # Optional: Specify a static IP (must be reserved in cloud provider first) - # loadBalancerIP: "" - - # Optional: Preserve client source IPs (recommended for P2P) - externalTrafficPolicy: Local - - # Optional: Fixed NodePort (only used when type is NodePort) - # nodePort: 30555 - - # Optional: Additional custom annotations (merged with provider annotations) - # Custom annotations take precedence over provider-generated ones - annotations: {} - -serviceAccount: - # Specifies whether a service account should be created - create: false - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "" - -podAnnotations: {} - -podSecurityContext: {} - # fsGroup: 2000 - -securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true # When enabled, /tmp emptyDir is automatically mounted for SQLite temp files - # runAsNonRoot: true - # runAsUser: 1000 - -ingress: - enabled: false - className: "" - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: chart-example.local - paths: - - path: / - pathType: ImplementationSpecific - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - -autoscaling: - enabled: false - replicaCount: 1 - minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - # targetMemoryUtilizationPercentage: 80 - -# Deployment strategy for pod updates -# IMPORTANT: When using PVCs (routing or database), use "Recreate" strategy -# to avoid file lock conflicts with BadgerDB and SQLite. -# -# Recreate: Terminates old pod before starting new one (default, required with PVCs) -# - Ensures clean database shutdown and no lock conflicts -# - Brief downtime during updates (10-15 seconds) -# - Prevents CrashLoopBackOff due to file locks -# -# RollingUpdate: Zero-downtime updates (only for stateless deployments without PVCs) -# - NOT recommended when PVCs are enabled -# - Can cause BadgerDB/SQLite lock conflicts -# -# To use RollingUpdate (only if NO PVCs are used): -# strategy: -# type: RollingUpdate -# rollingUpdate: -# maxSurge: 1 -# maxUnavailable: 0 -strategy: - type: Recreate - -nodeSelector: {} - -tolerations: [] - -affinity: {} - -extraVolumes: [] - # Example: - # - name: datastore - # configMap: - # name: my-configmap - -extraVolumeMounts: [] - # Example: - # - name: datastore - # mountPath: /etc/datastore - -# Extra environment variables for the apiserver container -extraEnv: [] -# - name: SSL_CERT_DIR -# value: "/etc/ca-certs" -# - name: GOCOVERDIR -# value: /tmp/coverage - -revisionHistoryLimit: 2 - -# Secrets configuration -# Choose ONE of two methods: -# 1. Helm-managed secrets (secrets.*) - credentials in values.yaml -# 2. ExternalSecrets (externalSecrets.*) - credentials synced from Vault -# -# Method 1: Helm-managed secrets (default) -# Sensitive credentials are stored in Kubernetes secrets and injected as environment variables -secrets: - # Private key for peer-to-peer routing identity - # If not provided, the secret will not include this key - privKey: "" - - # Sync authentication credentials - # Used for authenticating sync operations between nodes - # Username defaults to "sync" if empty, password is randomly generated if empty - syncAuth: - username: "" - password: "" - - # OCI (Open Container Initiative) registry authentication - # Used for authenticating to the OCI-backed storage backend - # Username defaults to "admin" if empty, password is randomly generated if empty - ociAuth: - username: "" - password: "" - -# Method 2: ExternalSecrets configuration -# Syncs credentials from HashiCorp Vault (or other secret providers) using External Secrets Operator -# When enabled, the Helm-managed secret (above) is NOT created -externalSecrets: - # Enable ExternalSecrets integration (default: false) - # When true, credentials are synced from Vault instead of using values.yaml - enabled: false - - # Vault path where credentials are stored (all credentials in one path) - # Example: "dir_staging/dev/credentials" - vaultPath: "" - - # ClusterSecretStore or SecretStore name to use - # This must be pre-configured in your cluster (managed by platform team) - secretStore: "vault-backend" - - # Secret store kind (default: ClusterSecretStore) - # Options: ClusterSecretStore (cluster-wide) or SecretStore (namespace-scoped) - secretStoreKind: "ClusterSecretStore" - - # Refresh interval - how often ESO syncs from Vault (default: 1h) - refreshInterval: "1h" - - # Node identity configuration - nodeIdentity: - enabled: true - # Property name in Vault secret (default: "node.privkey") - property: "node.privkey" - - # OCI registry authentication - ociAuth: - enabled: true - # Property names in Vault secret (defaults shown) - usernameProperty: "oci-username" - passwordProperty: "oci-password" - - # Sync authentication (shared with remote nodes) - syncAuth: - enabled: true - # Property names in Vault secret (defaults shown) - usernameProperty: "sync-username" - passwordProperty: "sync-password" - -# OASF server subchart configuration (OPTIONAL) -# OASF is NOT installed by default. Set enabled: true to deploy an OASF schema server instance. -oasf: - enabled: false +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +# Default values for helm-chart. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +nameOverride: "" +fullnameOverride: "" + +# Logging configuration +log_level: INFO +log_format: text # Options: "text" (development) or "json" (production) +grpc_logging_verbose: false # Options: false (production - logs start/finish only) or true (verbose - includes payloads) + +# Enable coverage volume (emptyDir persists across container restarts) +coverageVolume: false + +image: + repository: ghcr.io/agntcy/dir-apiserver + tag: latest + pullPolicy: IfNotPresent + pullSecrets: [] + +# Server configuration +config: + # listen_address: "0.0.0.0:8888" + + # Authentication settings (handles identity verification) + # Supports both X.509 (X.509-SVID) and JWT (JWT-SVID) authentication + authn: + # Enable authentication + enabled: false + # Authentication mode: "x509" or "jwt" + # - x509: Uses X.509-SVID from mutual TLS peer certificates + # - jwt: Uses JWT-SVID from Authorization header + mode: "x509" + # SPIFFE Workload API socket path (injected by SPIRE agent) + socket_path: "unix:///run/spire/agent-sockets/api.sock" + # Expected audiences for JWT validation (only used in JWT mode) + audiences: + - "spiffe://example.org/dir-server" + + # Authorization settings (handles access control policies) + # Requires authentication to be enabled first + authz: + # Enable authorization policies + enabled: false + # Trust domain for this Directory server + # Used to distinguish internal (same trust domain) vs external requests + trust_domain: "example.org" + + # Store settings for the storage backend. + store: + # Storage provider to use. + provider: "oci" + + # OCI-backed store + oci: + # Path to a local directory that will be to hold data instead of remote. + # If this is set to non-empty value, only local store will be used. + # local_dir: "" + + # Cache directory to use for metadata. + # cache_dir: "" + + # Registry address to connect to + registry_address: "dir-zot.dir-server.svc.cluster.local:5000" + # All data will be stored under this repo. + # Objects are pushed as tags, manifests, and blobs. + # repository_name: "" + + # Auth credentials to use. + auth_config: + insecure: "true" + access_token: access-token + refresh_token: refresh-token + + # Routing settings for the peer-to-peer network. + routing: + # Address to use for routing + # listen_address: "/ip4/0.0.0.0/tcp/5555" + + # Path to private key file for peer ID. + # key_path: /tmp/agntcy-dir/node.privkey + + # Nodes to use for bootstrapping of the DHT. + # We read initial routing tables here and get introduced + # to the network. + # bootstrap_peers: + # - /ip4/1.1.1.1/tcp/1 + # - /ip4/1.1.1.1/tcp/2 + + # GossipSub configuration for efficient label announcements + # When enabled, labels are propagated via GossipSub mesh to ALL subscribed peers + # When disabled, falls back to DHT+Pull mechanism (higher bandwidth, limited reach) + # Default: true (recommended for production) + gossipsub: + enabled: true + + # Sync configuration + sync: + # How frequently the scheduler checks for pending syncs + scheduler_interval: "30s" + + # Maximum number of sync workers running concurrently + worker_count: 1 + + # Timeout for individual sync operations + worker_timeout: "10m" + + # Registry monitor configuration + registry_monitor: + check_interval: "30s" + + # Authentication configuration for sync operations + auth_config: {} + + # Events configuration + events: + # Channel buffer size per subscriber + # Larger buffers allow subscribers to fall behind temporarily without dropping events + # Default: 100 + subscriber_buffer_size: 100 + + # Enable logging when events are dropped due to slow consumers + # Default: true + log_slow_consumers: true + + # Enable debug logging of all published events (verbose in production) + # Default: false + log_published_events: false + + # Publication configuration + publication: + # How frequently the scheduler checks for pending publications + scheduler_interval: "1h" + + # Maximum number of publication workers running concurrently + worker_count: 1 + + # Timeout for individual publication operations + worker_timeout: "30m" + + # gRPC Connection Management configuration + # Protects server from resource exhaustion, zombie connections, and memory exhaustion + # Production-safe defaults are applied automatically - customization is optional + # Note: These settings can only be configured via Helm values (no environment variables) + connection: + # Connection limits + # max_concurrent_streams: 1000 # Maximum concurrent gRPC streams per connection (default: 1000) + # max_recv_msg_size: 4194304 # Maximum message size for receiving in bytes - 4MB (default: 4MB) + # max_send_msg_size: 4194304 # Maximum message size for sending in bytes - 4MB (default: 4MB) + # connection_timeout: 120s # Timeout for establishing new connections (default: 120s) + + # Keepalive configuration - detects dead connections and prevents resource leaks + # keepalive: + # max_connection_idle: 15m # Close connections idle for this duration (default: 15m) + # max_connection_age: 30m # Close connections after this age to rotate (default: 30m) + # max_connection_age_grace: 5m # Grace period for in-flight RPCs before closing aged connections (default: 5m) + # time: 5m # Send keepalive pings every N duration (default: 5m) + # timeout: 1m # Close connection if ping not acknowledged within timeout (default: 1m) + # min_time: 1m # Minimum time between client pings (prevents abuse) (default: 1m) + # permit_without_stream: true # Allow keepalive pings without active streams (default: true) + + # Example: High-traffic production configuration + # connection: + # max_concurrent_streams: 2000 + # max_recv_msg_size: 8388608 # 8MB for larger records + # max_send_msg_size: 8388608 # 8MB for larger records + # connection_timeout: 60s + # keepalive: + # max_connection_idle: 10m + # max_connection_age: 20m + # max_connection_age_grace: 3m + # time: 3m + # timeout: 30s + # min_time: 30s + # permit_without_stream: false + + # Rate limiting configuration + # Protects the server from abuse and resource exhaustion using token bucket algorithm + ratelimit: + # Enable rate limiting middleware + # Default: false (disabled for development/testing) + enabled: false + + # Global rate limit (applies to all requests regardless of client) + # Set both to 0 to disable global limiting + # global_rps: 0 # Requests per second (float, e.g., 1000.0) + # global_burst: 0 # Burst capacity (int, e.g., 2000) + + # Per-client rate limit (tracked by SPIFFE ID from mTLS) + # Default values shown below are reasonable for production + # Set both to 0 to disable per-client limiting + per_client_rps: 100 # Requests per second per client (float) + per_client_burst: 200 # Burst capacity per client (int) + + # Per-method rate limit overrides (optional) + # Allows fine-grained control over specific gRPC methods + # Note: These can only be configured via Helm values, not environment variables + # method_limits: + # "/agntcy.dir.store.v1.StoreService/CreateRecord": + # rps: 50 # Lower limit for expensive operations + # burst: 100 + # "/agntcy.dir.store.v1.StoreService/PullRecord": + # rps: 200 # Higher limit for read operations + # burst: 400 + +# SPIRE configuration +spire: + enabled: false + trustDomain: example.org + + # SPIRE controller className for ClusterSPIFFEID matching + # + # REQUIRED: The className field is mandatory for ClusterSPIFFEID resources. + # The SPIRE controller manager uses className to match ClusterSPIFFEID resources + # with the appropriate SPIRE installation. Without this field, the controller + # will ignore the ClusterSPIFFEID and no workload registration will occur, + # causing authentication failures and preventing the gRPC server from starting. + # + # The className must match the SPIRE installation's className. + # + # Default: "dir-spire" (matches standard SPIRE installation convention). + # If your SPIRE installation uses a different className, override this value. + # If not specified, falls back to "-spire" (e.g., "my-namespace-spire"). + # + # IMPORTANT: Ensure this matches your SPIRE Controller Manager's className. + # For standard installations, SPIRE is deployed in the "spire" namespace with + # className "dir-spire". Verify with: + # kubectl get deployment -n spire spire-server -o yaml | grep className + # + # See: https://github.com/spiffe/spire-controller-manager/blob/main/docs/clusterspiffeid-crd.md + className: "dir-spire" + + # Use SPIFFE CSI driver for workload attestation (recommended) + # + # When true (recommended): + # - Uses SPIFFE CSI driver for proper workload registration + # - Workload registration happens synchronously before pod starts + # - SPIRE agent issues X.509-SVID with URI SAN before pod begins + # - Eliminates authentication delays and race conditions + # - Production-ready, reliable identity injection + # + # When false (legacy/debugging only): + # - Uses hostPath to mount SPIRE agent socket + # - Workload registration happens asynchronously via ClusterSPIFFEID + # - Pod may retry authentication during startup + # - Only use for debugging or when CSI driver is unavailable + # + # Requires: SPIFFE CSI driver deployed in cluster + # See: https://github.com/spiffe/spiffe-csi + useCSIDriver: true + + # Custom DNS names to add to the X.509-SVID certificate SANs + # Useful for external access with proper TLS verification without --tls-skip-verify + # Example: + # dnsNameTemplates: + # - "dir-api.example.com" + # - "api.example.com" + dnsNameTemplates: [] + + federation: [] + # # Config: https://github.com/spiffe/spire-controller-manager/blob/main/docs/clusterfederatedtrustdomain-crd.md + # - trustDomain: dir-cluster + # bundleEndpointURL: https://0.0.0.0:8081 + # bundleEndpointProfile: + # type: https_web + +# Create PVC for routing/cache data +# IMPORTANT: When PVC is enabled, set strategy.type to "Recreate" to avoid +# BadgerDB lock conflicts during updates (see strategy configuration above) +pvc: + create: false + storageClassName: standard + size: 1G + +# Database configuration +database: + # Database type (currently only sqlite supported) + type: "sqlite" + + # SQLite configuration + sqlite: + # Path to SQLite database file + # Default: /tmp/dir.db (ephemeral - lost on pod restart) + # When using PVC: /var/lib/dir/database/dir.db (persistent) + dbPath: "/tmp/dir.db" + + # PVC for database persistence (optional) + # When enabled, database persists across pod restarts + # Also allows enabling readOnlyRootFilesystem security hardening + # IMPORTANT: When PVC is enabled, set strategy.type to "Recreate" to avoid + # SQLite lock conflicts during updates (see strategy configuration above) + pvc: + enabled: false # Disabled by default (opt-in for backward compatibility) + create: true # Create PVC automatically + storageClassName: "" # Use cluster default storage class + size: 1Gi # Database size (adjust based on expected record count) + accessMode: ReadWriteOnce + +# Service exposes gRPC server api +service: + type: ClusterIP + port: 8888 + +# Prometheus metrics configuration +# This section configures BOTH the application metrics server AND Kubernetes service +# The values are automatically injected into the server configuration +metrics: + # Enable Prometheus metrics collection + # Default: true (recommended for production) + enabled: true + + # Port for Prometheus metrics endpoint + # Used for both the application listen address and Kubernetes service port + # Default: 9090 + port: 9090 + + # ServiceMonitor for Prometheus Operator (optional) + # Creates a ServiceMonitor resource for automatic Prometheus discovery + serviceMonitor: + enabled: false + interval: 30s + scrapeTimeout: 10s + labels: {} + +# Routing service exposes P2P networking (separate from API service) +routingService: + # Service type for routing/P2P traffic + # Options: ClusterIP, NodePort, LoadBalancer + # Default: NodePort (works everywhere - local Kind and cloud) + # For production cloud: override to LoadBalancer for stable external IP + type: NodePort + + # Cloud provider for automatic annotation configuration + # Options: "aws", "gcp", "azure", or leave empty for manual configuration + # When set, provider-specific annotations are automatically applied + cloudProvider: "" + + # AWS-specific configuration (only used when cloudProvider: "aws") + aws: + # Use internal load balancer (default: false = internet-facing) + internal: false + # NLB target type: "instance" or "ip" (default: instance) + # nlbTargetType: "instance" + + # GCP-specific configuration (only used when cloudProvider: "gcp") + gcp: + # Use internal load balancer (default: false = external) + internal: false + # Optional: BackendConfig name for advanced configuration + # backendConfig: "" + + # Azure-specific configuration (only used when cloudProvider: "azure") + azure: + # Use internal load balancer (default: false = public) + internal: false + # Optional: Resource group for load balancer + # resourceGroup: "" + + # Optional: Specify a static IP (must be reserved in cloud provider first) + # loadBalancerIP: "" + + # Optional: Preserve client source IPs (recommended for P2P) + externalTrafficPolicy: Local + + # Optional: Fixed NodePort (only used when type is NodePort) + # nodePort: 30555 + + # Optional: Additional custom annotations (merged with provider annotations) + # Custom annotations take precedence over provider-generated ones + annotations: {} + +serviceAccount: + # Specifies whether a service account should be created + create: false + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true # When enabled, /tmp emptyDir is automatically mounted for SQLite temp files + # runAsNonRoot: true + # runAsUser: 1000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + replicaCount: 1 + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +# Deployment strategy for pod updates +# IMPORTANT: When using PVCs (routing or database), use "Recreate" strategy +# to avoid file lock conflicts with BadgerDB and SQLite. +# +# Recreate: Terminates old pod before starting new one (default, required with PVCs) +# - Ensures clean database shutdown and no lock conflicts +# - Brief downtime during updates (10-15 seconds) +# - Prevents CrashLoopBackOff due to file locks +# +# RollingUpdate: Zero-downtime updates (only for stateless deployments without PVCs) +# - NOT recommended when PVCs are enabled +# - Can cause BadgerDB/SQLite lock conflicts +# +# To use RollingUpdate (only if NO PVCs are used): +# strategy: +# type: RollingUpdate +# rollingUpdate: +# maxSurge: 1 +# maxUnavailable: 0 +strategy: + type: Recreate + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +extraVolumes: [] + # Example: + # - name: datastore + # configMap: + # name: my-configmap + +extraVolumeMounts: [] + # Example: + # - name: datastore + # mountPath: /etc/datastore + +# Extra environment variables for the apiserver container +extraEnv: [] +# - name: SSL_CERT_DIR +# value: "/etc/ca-certs" +# - name: GOCOVERDIR +# value: /tmp/coverage + +revisionHistoryLimit: 2 + +# Secrets configuration +# Choose ONE of two methods: +# 1. Helm-managed secrets (secrets.*) - credentials in values.yaml +# 2. ExternalSecrets (externalSecrets.*) - credentials synced from Vault +# +# Method 1: Helm-managed secrets (default) +# Sensitive credentials are stored in Kubernetes secrets and injected as environment variables +secrets: + # Private key for peer-to-peer routing identity + # If not provided, the secret will not include this key + privKey: "" + + # Sync authentication credentials + # Used for authenticating sync operations between nodes + # Username defaults to "sync" if empty, password is randomly generated if empty + syncAuth: + username: "" + password: "" + + # OCI (Open Container Initiative) registry authentication + # Used for authenticating to the OCI-backed storage backend + # Username defaults to "admin" if empty, password is randomly generated if empty + ociAuth: + username: "" + password: "" + +# Method 2: ExternalSecrets configuration +# Syncs credentials from HashiCorp Vault (or other secret providers) using External Secrets Operator +# When enabled, the Helm-managed secret (above) is NOT created +externalSecrets: + # Enable ExternalSecrets integration (default: false) + # When true, credentials are synced from Vault instead of using values.yaml + enabled: false + + # Vault path where credentials are stored (all credentials in one path) + # Example: "dir_staging/dev/credentials" + vaultPath: "" + + # ClusterSecretStore or SecretStore name to use + # This must be pre-configured in your cluster (managed by platform team) + secretStore: "vault-backend" + + # Secret store kind (default: ClusterSecretStore) + # Options: ClusterSecretStore (cluster-wide) or SecretStore (namespace-scoped) + secretStoreKind: "ClusterSecretStore" + + # Refresh interval - how often ESO syncs from Vault (default: 1h) + refreshInterval: "1h" + + # Node identity configuration + nodeIdentity: + enabled: true + # Property name in Vault secret (default: "node.privkey") + property: "node.privkey" + + # OCI registry authentication + ociAuth: + enabled: true + # Property names in Vault secret (defaults shown) + usernameProperty: "oci-username" + passwordProperty: "oci-password" + + # Sync authentication (shared with remote nodes) + syncAuth: + enabled: true + # Property names in Vault secret (defaults shown) + usernameProperty: "sync-username" + passwordProperty: "sync-password" + +# OASF server subchart configuration (OPTIONAL) +# OASF is NOT installed by default. Set enabled: true to deploy an OASF schema server instance. +oasf: + enabled: false diff --git a/install/charts/dir/values.yaml b/install/charts/dir/values.yaml index 32d60f6de..85fee32b8 100644 --- a/install/charts/dir/values.yaml +++ b/install/charts/dir/values.yaml @@ -1,504 +1,504 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -apiserver: - image: - repository: ghcr.io/agntcy/dir-apiserver - tag: latest - pullPolicy: IfNotPresent - pullSecrets: - - name: regcred - - service: - type: NodePort # Default for local/dev - change to ClusterIP for cloud with Ingress - - # Prometheus metrics configuration - # This section configures BOTH the application metrics server AND Kubernetes service - # The values are automatically injected into the server configuration - metrics: - # Enable Prometheus metrics collection - # Default: true (recommended for production) - enabled: true - - # Port for Prometheus metrics endpoint - # Used for both the application listen address and Kubernetes service port - # Default: 9090 - port: 9090 - - # ServiceMonitor for Prometheus Operator (optional) - # Creates a ServiceMonitor resource for automatic Prometheus discovery - serviceMonitor: - enabled: false - interval: 30s - scrapeTimeout: 10s - labels: {} - - # Routing service configuration for P2P networking - routingService: - # Service type for routing/P2P traffic - # Options: ClusterIP, NodePort, LoadBalancer - # Recommended for cloud: LoadBalancer (for stable external IP) - # Default for local/dev: NodePort (inherits from service.type if not set) - # Uncomment and set to LoadBalancer for production cloud deployments: - # type: LoadBalancer - - # Cloud provider for automatic annotation configuration - # Options: "aws", "gcp", "azure", or leave empty for no auto-configuration - # When set, provider-specific LoadBalancer annotations are automatically applied - # Examples: - # cloudProvider: "aws" # Auto-configures AWS NLB with internet-facing scheme - # cloudProvider: "gcp" # Auto-configures GCP External Load Balancer - # cloudProvider: "azure" # Auto-configures Azure public Load Balancer - # cloudProvider: "" - - # AWS-specific configuration (only used when cloudProvider: "aws") - # aws: - # internal: false # Set to true for internal NLB - # nlbTargetType: "instance" # Or "ip" for IP-based targets - - # GCP-specific configuration (only used when cloudProvider: "gcp") - # gcp: - # internal: false # Set to true for internal load balancer - # backendConfig: "" # Optional BackendConfig resource name - - # Azure-specific configuration (only used when cloudProvider: "azure") - # azure: - # internal: false # Set to true for internal load balancer - # resourceGroup: "" # Optional resource group name - - # Optional: Static IP (must be reserved in cloud provider first) - # loadBalancerIP: "" - - # Optional: Preserve client source IPs (recommended for P2P) - # externalTrafficPolicy: Local - - # Optional: Fixed NodePort (only used when type is NodePort) - # nodePort: 30555 - - # Optional: Additional custom annotations (merged with provider annotations) - # Custom annotations override provider-generated ones - # annotations: {} - - # Server configuration - config: - # listen_address: "0.0.0.0:8888" - - # Authentication settings (handles identity verification) - # Supports both X.509 (X.509-SVID) and JWT (JWT-SVID) authentication - authn: - # Enable authentication - enabled: false - # Authentication mode: "x509" or "jwt" - # - x509: Uses X.509-SVID from mutual TLS peer certificates - # - jwt: Uses JWT-SVID from Authorization header - mode: "x509" - # SPIFFE Workload API socket path (injected by SPIRE agent) - socket_path: "unix:///run/spire/agent-sockets/api.sock" - # Expected audiences for JWT validation (only used in JWT mode) - audiences: - - "spiffe://example.org/dir-server" - - # Authorization settings (handles access control policies) - # Requires authentication to be enabled first - authz: - # Enable authorization policies - enabled: false - # Trust domain for this Directory server - # Used to distinguish internal (same trust domain) vs external requests - trust_domain: "example.org" - - # Store settings for the storage backend. - store: - # Storage provider to use. - provider: "oci" - - # OCI-backed store - oci: - # Path to a local directory that will be to hold data instead of remote. - # If this is set to non-empty value, only local store will be used. - # local_dir: "" - - # Cache directory to use for metadata. - # cache_dir: "" - - # Registry address to connect to - registry_address: "dir-zot.dir-server.svc.cluster.local:5000" - # All data will be stored under this repo. - # Objects are pushed as tags, manifests, and blobs. - # repository_name: "" - - # Auth credentials to use. - auth_config: - insecure: "true" - access_token: access-token - refresh_token: refresh-token - - # Routing settings for the peer-to-peer network. - routing: - # Address to use for routing - listen_address: "/ip4/0.0.0.0/tcp/5555" - - # Path to private key file for peer ID. - # key_path: /tmp/agntcy-dir/node.privkey - - # Nodes to use for bootstrapping of the DHT. - # We read initial routing tables here and get introduced - # to the network. - # bootstrap_peers: - # - /ip4/1.1.1.1/tcp/1 - # - /ip4/1.1.1.1/tcp/2 - - # GossipSub configuration for efficient label announcements - # When enabled, labels are propagated via GossipSub mesh to ALL subscribed peers - # When disabled, falls back to DHT+Pull mechanism (higher bandwidth, limited reach) - # Default: true (recommended for production) - gossipsub: - enabled: true - - # Sync configuration - sync: - # How frequently the scheduler checks for pending syncs - scheduler_interval: "30s" - - # Maximum number of sync workers running concurrently - worker_count: 1 - - # Timeout for individual sync operations - worker_timeout: "10m" - - # Registry monitor configuration - registry_monitor: - check_interval: "30s" - - # Authentication configuration for sync operations - auth_config: {} - - # Publication configuration - publication: - # How frequently the scheduler checks for pending publications - scheduler_interval: "1h" - - # Maximum number of publication workers running concurrently - worker_count: 1 - - # Timeout for individual publication operations - worker_timeout: "30m" - - # Events configuration - events: - # Channel buffer size per subscriber - # Larger buffers allow subscribers to fall behind temporarily without dropping events - # Default: 100 - subscriber_buffer_size: 100 - - # Enable logging when events are dropped due to slow consumers - # Default: true - log_slow_consumers: true - - # Enable debug logging of all published events (verbose in production) - # Default: false - log_published_events: false - - # Rate limiting configuration - # Protects the server from abuse and resource exhaustion using token bucket algorithm - ratelimit: - # Enable rate limiting middleware - # Default: false (disabled for development/testing) - enabled: false - - # Global rate limit (applies to all requests regardless of client) - # Set both to 0 to disable global limiting - # global_rps: 0 # Requests per second (float, e.g., 1000.0) - # global_burst: 0 # Burst capacity (int, e.g., 2000) - - # Per-client rate limit (tracked by SPIFFE ID from mTLS) - # Default values shown below are reasonable for production - # Set both to 0 to disable per-client limiting - per_client_rps: 100 # Requests per second per client (float) - per_client_burst: 200 # Burst capacity per client (int) - - # Per-method rate limit overrides (optional) - # Allows fine-grained control over specific gRPC methods - # Note: These can only be configured via Helm values, not environment variables - # method_limits: - # "/agntcy.dir.store.v1.StoreService/CreateRecord": - # rps: 50 # Lower limit for expensive operations - # burst: 100 - # "/agntcy.dir.store.v1.StoreService/PullRecord": - # rps: 200 # Higher limit for read operations - # burst: 400 - - # OASF API validation configuration - oasf_api_validation: - # Schema URL for API-based validation - # Default: https://schema.oasf.outshift.com (public OASF server) - # To use the OASF instance deployed with this chart (when apiserver.oasf.enabled: true): - # schema_url: "http://-ingress-controller..svc.cluster.local" - # Example: schema_url: "http://dir-ingress-controller.dir-server.svc.cluster.local" - schema_url: "" - - # Disable API validation (use embedded schema validation instead) - # When true, uses embedded schemas for validation (no HTTP calls to OASF server) - # Default: false - disable: false - - # Use strict validation mode - # Default: true - strict_mode: true - - # SPIRE configuration - spire: - enabled: false - trustDomain: example.org - - # SPIRE controller className for ClusterSPIFFEID matching - # - # REQUIRED: The className field is mandatory for ClusterSPIFFEID resources. - # The SPIRE controller manager uses className to match ClusterSPIFFEID resources - # with the appropriate SPIRE installation. Without this field, the controller - # will ignore the ClusterSPIFFEID and no workload registration will occur, - # causing authentication failures and preventing the gRPC server from starting. - # - # The className must match the SPIRE installation's className. - # - # Default: "dir-spire" (matches standard SPIRE installation convention). - # If your SPIRE installation uses a different className, override this value. - # If not specified, falls back to "-spire" (e.g., "my-namespace-spire"). - # - # IMPORTANT: Ensure this matches your SPIRE Controller Manager's className. - # For standard installations, SPIRE is deployed in the "spire" namespace with - # className "dir-spire". Verify with: - # kubectl get deployment -n spire spire-server -o yaml | grep className - # - # See: https://github.com/spiffe/spire-controller-manager/blob/main/docs/clusterspiffeid-crd.md - className: "dir-spire" - - federation: [] - # # Config: https://github.com/spiffe/spire-controller-manager/blob/main/docs/clusterfederatedtrustdomain-crd.md - # - trustDomain: dir-cluster - # bundleEndpointURL: https://0.0.0.0:8081 - # bundleEndpointProfile: - # type: https_web - - # Zot registry configuration (subchart) - zot: - # Disable default config mounting - mountConfig: false - - # Enable default secret mounting - mountSecret: true - - # ZOT configuration file - configFiles: - config.json: |- - { - "distSpecVersion": "1.1.1", - "storage": { - "rootDirectory": "/var/lib/registry" - }, - "http": { - "address": "0.0.0.0", - "port": "5000", - "auth": { - "htpasswd": { - "path": "/secret/htpasswd" - } - }, - "accessControl": { - "adminPolicy": { - "users": ["{{ if .Values.secrets }}{{ if .Values.secrets.ociAuth }}{{ .Values.secrets.ociAuth.username | default "admin" }}{{ else }}admin{{ end }}{{ else }}admin{{ end }}"], - "actions": ["read", "create", "update", "delete"] - }, - "repositories": { - "**": { - "anonymousPolicy": [], - "defaultPolicy": ["read"] - } - } - } - }, - "log": { - "level": "debug" - }, - "extensions": { - "search": { - "enable": true - }, - "trust": { - "enable": true, - "cosign": true, - "notation": false - } - } - } - - # htpasswd credentials for ZOT authentication - secretFiles: - htpasswd: "" - - # Extra volumes to mount the shared PVC for writable config - # This allows the apiserver sync worker to modify ZOT's config at runtime - extraVolumes: - - name: zot-config-storage - persistentVolumeClaim: - claimName: dir-zot-config - - # Mount the shared PVC into ZOT container - extraVolumeMounts: - - name: zot-config-storage - mountPath: /etc/zot - - # Configure zot to use the config file from the shared mounted volume - extraArgs: - - "serve" - - "/etc/zot/config.json" - - # OASF server configuration (subchart) - OPTIONAL - # OASF is NOT installed by default. Set enabled: true to deploy an OASF schema server instance. - # When enabled, deploys an OASF schema server instance alongside the directory server. - # OASF server subchart configuration - oasf: - enabled: false - localDeploy: true # Creates IngressClass resource for ingress controller - - image: - repository: ghcr.io/agntcy/oasf-server - versions: - - server: v0.7.2 - schema: 0.7.0 - - server: v0.8.1 - schema: 0.8.0 - default: true - # - server: latest - # schema: 0.9.0-dev - pullPolicy: IfNotPresent - - service: - type: ClusterIP - port: 8080 - - config: - server_port: 8080 - - env: {} - volumes: [] - volumeMounts: [] - - ingress: - enabled: true - className: "nginx" - annotations: - nginx.ingress.kubernetes.io/ssl-redirect: "false" - hosts: - - host: localhost - - host: dir-ingress-controller.dir-server.svc.cluster.local - tls: [] - service: - type: ClusterIP - - resources: {} - - # Secrets configuration - # Choose ONE of two methods: - # 1. Helm-managed secrets (secrets.*) - credentials in values.yaml - # 2. ExternalSecrets (externalSecrets.*) - credentials synced from Vault - # - # Method 1: Helm-managed secrets (default) - # Sensitive credentials are stored in Kubernetes secrets and injected as environment variables - secrets: - # Private key for peer-to-peer routing identity - # If not provided, the secret will not include this key - privKey: "" - - # Sync authentication credentials - # Used for authenticating sync operations between nodes - # Username defaults to "sync" if empty, password is randomly generated if empty - syncAuth: - username: "" - password: "" - - # OCI registry authentication - # Used for authenticating to the OCI-backed storage backend - # Username defaults to "admin" if empty, password is randomly generated if empty - ociAuth: - username: "" - password: "" - - # Method 2: ExternalSecrets configuration - # Syncs credentials from HashiCorp Vault (or other secret providers) using External Secrets Operator - # When enabled, the Helm-managed secret (above) is NOT created - externalSecrets: - # Enable ExternalSecrets integration (default: false) - # When true, credentials are synced from Vault instead of using values.yaml - enabled: false - - # Vault path where credentials are stored (all credentials in one path) - # Example: "dir_staging/dev/credentials" - vaultPath: "" - - # ClusterSecretStore or SecretStore name to use - # This must be pre-configured in your cluster (managed by platform team) - secretStore: "vault-backend" - - # Secret store kind (default: ClusterSecretStore) - # Options: ClusterSecretStore (cluster-wide) or SecretStore (namespace-scoped) - secretStoreKind: "ClusterSecretStore" - - # Refresh interval - how often ESO syncs from Vault (default: 1h) - refreshInterval: "1h" - - # Node identity configuration - nodeIdentity: - enabled: true - # Property name in Vault secret (default: "node.privkey") - property: "node.privkey" - - # OCI registry authentication - ociAuth: - enabled: true - # Property names in Vault secret (defaults shown) - usernameProperty: "oci-username" - passwordProperty: "oci-password" - - # Sync authentication (shared with remote nodes) - syncAuth: - enabled: true - # Property names in Vault secret (defaults shown) - usernameProperty: "sync-username" - passwordProperty: "sync-password" - - # Database configuration - database: - # Database type (currently only sqlite supported) - type: "sqlite" - - # SQLite configuration - sqlite: - # Path to SQLite database file - # Default: /tmp/dir.db (ephemeral - lost on pod restart) - # When using PVC: /var/lib/dir/database/dir.db (persistent) - dbPath: "/tmp/dir.db" - - # PVC for database persistence (optional) - # When enabled, database persists across pod restarts - # Also allows enabling readOnlyRootFilesystem security hardening - pvc: - enabled: false # Disabled by default (opt-in for backward compatibility) - create: true # Create PVC automatically - storageClassName: "" # Use cluster default storage class - size: 1Gi # Database size (1Gi for dev, 5Gi recommended for production) - accessMode: ReadWriteOnce - - # Deployment strategy for pod updates - # IMPORTANT: Use "Recreate" when PVCs are enabled (routing or database) - # to avoid file lock conflicts with BadgerDB and SQLite - # - # Default: Recreate (prevents database lock conflicts during updates) - # - Brief downtime during updates (10-15 seconds) - # - Required when using PVCs to avoid CrashLoopBackOff - # - # Alternative: RollingUpdate (only for fully stateless deployments) - # - Zero-downtime updates - # - NOT compatible with PVCs (will cause lock conflicts) - strategy: - type: Recreate +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +apiserver: + image: + repository: ghcr.io/agntcy/dir-apiserver + tag: latest + pullPolicy: IfNotPresent + pullSecrets: + - name: regcred + + service: + type: NodePort # Default for local/dev - change to ClusterIP for cloud with Ingress + + # Prometheus metrics configuration + # This section configures BOTH the application metrics server AND Kubernetes service + # The values are automatically injected into the server configuration + metrics: + # Enable Prometheus metrics collection + # Default: true (recommended for production) + enabled: true + + # Port for Prometheus metrics endpoint + # Used for both the application listen address and Kubernetes service port + # Default: 9090 + port: 9090 + + # ServiceMonitor for Prometheus Operator (optional) + # Creates a ServiceMonitor resource for automatic Prometheus discovery + serviceMonitor: + enabled: false + interval: 30s + scrapeTimeout: 10s + labels: {} + + # Routing service configuration for P2P networking + routingService: + # Service type for routing/P2P traffic + # Options: ClusterIP, NodePort, LoadBalancer + # Recommended for cloud: LoadBalancer (for stable external IP) + # Default for local/dev: NodePort (inherits from service.type if not set) + # Uncomment and set to LoadBalancer for production cloud deployments: + # type: LoadBalancer + + # Cloud provider for automatic annotation configuration + # Options: "aws", "gcp", "azure", or leave empty for no auto-configuration + # When set, provider-specific LoadBalancer annotations are automatically applied + # Examples: + # cloudProvider: "aws" # Auto-configures AWS NLB with internet-facing scheme + # cloudProvider: "gcp" # Auto-configures GCP External Load Balancer + # cloudProvider: "azure" # Auto-configures Azure public Load Balancer + # cloudProvider: "" + + # AWS-specific configuration (only used when cloudProvider: "aws") + # aws: + # internal: false # Set to true for internal NLB + # nlbTargetType: "instance" # Or "ip" for IP-based targets + + # GCP-specific configuration (only used when cloudProvider: "gcp") + # gcp: + # internal: false # Set to true for internal load balancer + # backendConfig: "" # Optional BackendConfig resource name + + # Azure-specific configuration (only used when cloudProvider: "azure") + # azure: + # internal: false # Set to true for internal load balancer + # resourceGroup: "" # Optional resource group name + + # Optional: Static IP (must be reserved in cloud provider first) + # loadBalancerIP: "" + + # Optional: Preserve client source IPs (recommended for P2P) + # externalTrafficPolicy: Local + + # Optional: Fixed NodePort (only used when type is NodePort) + # nodePort: 30555 + + # Optional: Additional custom annotations (merged with provider annotations) + # Custom annotations override provider-generated ones + # annotations: {} + + # Server configuration + config: + # listen_address: "0.0.0.0:8888" + + # Authentication settings (handles identity verification) + # Supports both X.509 (X.509-SVID) and JWT (JWT-SVID) authentication + authn: + # Enable authentication + enabled: false + # Authentication mode: "x509" or "jwt" + # - x509: Uses X.509-SVID from mutual TLS peer certificates + # - jwt: Uses JWT-SVID from Authorization header + mode: "x509" + # SPIFFE Workload API socket path (injected by SPIRE agent) + socket_path: "unix:///run/spire/agent-sockets/api.sock" + # Expected audiences for JWT validation (only used in JWT mode) + audiences: + - "spiffe://example.org/dir-server" + + # Authorization settings (handles access control policies) + # Requires authentication to be enabled first + authz: + # Enable authorization policies + enabled: false + # Trust domain for this Directory server + # Used to distinguish internal (same trust domain) vs external requests + trust_domain: "example.org" + + # Store settings for the storage backend. + store: + # Storage provider to use. + provider: "oci" + + # OCI-backed store + oci: + # Path to a local directory that will be to hold data instead of remote. + # If this is set to non-empty value, only local store will be used. + # local_dir: "" + + # Cache directory to use for metadata. + # cache_dir: "" + + # Registry address to connect to + registry_address: "dir-zot.dir-server.svc.cluster.local:5000" + # All data will be stored under this repo. + # Objects are pushed as tags, manifests, and blobs. + # repository_name: "" + + # Auth credentials to use. + auth_config: + insecure: "true" + access_token: access-token + refresh_token: refresh-token + + # Routing settings for the peer-to-peer network. + routing: + # Address to use for routing + listen_address: "/ip4/0.0.0.0/tcp/5555" + + # Path to private key file for peer ID. + # key_path: /tmp/agntcy-dir/node.privkey + + # Nodes to use for bootstrapping of the DHT. + # We read initial routing tables here and get introduced + # to the network. + # bootstrap_peers: + # - /ip4/1.1.1.1/tcp/1 + # - /ip4/1.1.1.1/tcp/2 + + # GossipSub configuration for efficient label announcements + # When enabled, labels are propagated via GossipSub mesh to ALL subscribed peers + # When disabled, falls back to DHT+Pull mechanism (higher bandwidth, limited reach) + # Default: true (recommended for production) + gossipsub: + enabled: true + + # Sync configuration + sync: + # How frequently the scheduler checks for pending syncs + scheduler_interval: "30s" + + # Maximum number of sync workers running concurrently + worker_count: 1 + + # Timeout for individual sync operations + worker_timeout: "10m" + + # Registry monitor configuration + registry_monitor: + check_interval: "30s" + + # Authentication configuration for sync operations + auth_config: {} + + # Publication configuration + publication: + # How frequently the scheduler checks for pending publications + scheduler_interval: "1h" + + # Maximum number of publication workers running concurrently + worker_count: 1 + + # Timeout for individual publication operations + worker_timeout: "30m" + + # Events configuration + events: + # Channel buffer size per subscriber + # Larger buffers allow subscribers to fall behind temporarily without dropping events + # Default: 100 + subscriber_buffer_size: 100 + + # Enable logging when events are dropped due to slow consumers + # Default: true + log_slow_consumers: true + + # Enable debug logging of all published events (verbose in production) + # Default: false + log_published_events: false + + # Rate limiting configuration + # Protects the server from abuse and resource exhaustion using token bucket algorithm + ratelimit: + # Enable rate limiting middleware + # Default: false (disabled for development/testing) + enabled: false + + # Global rate limit (applies to all requests regardless of client) + # Set both to 0 to disable global limiting + # global_rps: 0 # Requests per second (float, e.g., 1000.0) + # global_burst: 0 # Burst capacity (int, e.g., 2000) + + # Per-client rate limit (tracked by SPIFFE ID from mTLS) + # Default values shown below are reasonable for production + # Set both to 0 to disable per-client limiting + per_client_rps: 100 # Requests per second per client (float) + per_client_burst: 200 # Burst capacity per client (int) + + # Per-method rate limit overrides (optional) + # Allows fine-grained control over specific gRPC methods + # Note: These can only be configured via Helm values, not environment variables + # method_limits: + # "/agntcy.dir.store.v1.StoreService/CreateRecord": + # rps: 50 # Lower limit for expensive operations + # burst: 100 + # "/agntcy.dir.store.v1.StoreService/PullRecord": + # rps: 200 # Higher limit for read operations + # burst: 400 + + # OASF API validation configuration + oasf_api_validation: + # Schema URL for API-based validation + # Default: https://schema.oasf.outshift.com (public OASF server) + # To use the OASF instance deployed with this chart (when apiserver.oasf.enabled: true): + # schema_url: "http://-ingress-controller..svc.cluster.local" + # Example: schema_url: "http://dir-ingress-controller.dir-server.svc.cluster.local" + schema_url: "" + + # Disable API validation (use embedded schema validation instead) + # When true, uses embedded schemas for validation (no HTTP calls to OASF server) + # Default: false + disable: false + + # Use strict validation mode + # Default: true + strict_mode: true + + # SPIRE configuration + spire: + enabled: false + trustDomain: example.org + + # SPIRE controller className for ClusterSPIFFEID matching + # + # REQUIRED: The className field is mandatory for ClusterSPIFFEID resources. + # The SPIRE controller manager uses className to match ClusterSPIFFEID resources + # with the appropriate SPIRE installation. Without this field, the controller + # will ignore the ClusterSPIFFEID and no workload registration will occur, + # causing authentication failures and preventing the gRPC server from starting. + # + # The className must match the SPIRE installation's className. + # + # Default: "dir-spire" (matches standard SPIRE installation convention). + # If your SPIRE installation uses a different className, override this value. + # If not specified, falls back to "-spire" (e.g., "my-namespace-spire"). + # + # IMPORTANT: Ensure this matches your SPIRE Controller Manager's className. + # For standard installations, SPIRE is deployed in the "spire" namespace with + # className "dir-spire". Verify with: + # kubectl get deployment -n spire spire-server -o yaml | grep className + # + # See: https://github.com/spiffe/spire-controller-manager/blob/main/docs/clusterspiffeid-crd.md + className: "dir-spire" + + federation: [] + # # Config: https://github.com/spiffe/spire-controller-manager/blob/main/docs/clusterfederatedtrustdomain-crd.md + # - trustDomain: dir-cluster + # bundleEndpointURL: https://0.0.0.0:8081 + # bundleEndpointProfile: + # type: https_web + + # Zot registry configuration (subchart) + zot: + # Disable default config mounting + mountConfig: false + + # Enable default secret mounting + mountSecret: true + + # ZOT configuration file + configFiles: + config.json: |- + { + "distSpecVersion": "1.1.1", + "storage": { + "rootDirectory": "/var/lib/registry" + }, + "http": { + "address": "0.0.0.0", + "port": "5000", + "auth": { + "htpasswd": { + "path": "/secret/htpasswd" + } + }, + "accessControl": { + "adminPolicy": { + "users": ["{{ if .Values.secrets }}{{ if .Values.secrets.ociAuth }}{{ .Values.secrets.ociAuth.username | default "admin" }}{{ else }}admin{{ end }}{{ else }}admin{{ end }}"], + "actions": ["read", "create", "update", "delete"] + }, + "repositories": { + "**": { + "anonymousPolicy": [], + "defaultPolicy": ["read"] + } + } + } + }, + "log": { + "level": "debug" + }, + "extensions": { + "search": { + "enable": true + }, + "trust": { + "enable": true, + "cosign": true, + "notation": false + } + } + } + + # htpasswd credentials for ZOT authentication + secretFiles: + htpasswd: "" + + # Extra volumes to mount the shared PVC for writable config + # This allows the apiserver sync worker to modify ZOT's config at runtime + extraVolumes: + - name: zot-config-storage + persistentVolumeClaim: + claimName: dir-zot-config + + # Mount the shared PVC into ZOT container + extraVolumeMounts: + - name: zot-config-storage + mountPath: /etc/zot + + # Configure zot to use the config file from the shared mounted volume + extraArgs: + - "serve" + - "/etc/zot/config.json" + + # OASF server configuration (subchart) - OPTIONAL + # OASF is NOT installed by default. Set enabled: true to deploy an OASF schema server instance. + # When enabled, deploys an OASF schema server instance alongside the directory server. + # OASF server subchart configuration + oasf: + enabled: false + localDeploy: true # Creates IngressClass resource for ingress controller + + image: + repository: ghcr.io/agntcy/oasf-server + versions: + - server: v0.7.2 + schema: 0.7.0 + - server: v0.8.1 + schema: 0.8.0 + default: true + # - server: latest + # schema: 0.9.0-dev + pullPolicy: IfNotPresent + + service: + type: ClusterIP + port: 8080 + + config: + server_port: 8080 + + env: {} + volumes: [] + volumeMounts: [] + + ingress: + enabled: true + className: "nginx" + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "false" + hosts: + - host: localhost + - host: dir-ingress-controller.dir-server.svc.cluster.local + tls: [] + service: + type: ClusterIP + + resources: {} + + # Secrets configuration + # Choose ONE of two methods: + # 1. Helm-managed secrets (secrets.*) - credentials in values.yaml + # 2. ExternalSecrets (externalSecrets.*) - credentials synced from Vault + # + # Method 1: Helm-managed secrets (default) + # Sensitive credentials are stored in Kubernetes secrets and injected as environment variables + secrets: + # Private key for peer-to-peer routing identity + # If not provided, the secret will not include this key + privKey: "" + + # Sync authentication credentials + # Used for authenticating sync operations between nodes + # Username defaults to "sync" if empty, password is randomly generated if empty + syncAuth: + username: "" + password: "" + + # OCI registry authentication + # Used for authenticating to the OCI-backed storage backend + # Username defaults to "admin" if empty, password is randomly generated if empty + ociAuth: + username: "" + password: "" + + # Method 2: ExternalSecrets configuration + # Syncs credentials from HashiCorp Vault (or other secret providers) using External Secrets Operator + # When enabled, the Helm-managed secret (above) is NOT created + externalSecrets: + # Enable ExternalSecrets integration (default: false) + # When true, credentials are synced from Vault instead of using values.yaml + enabled: false + + # Vault path where credentials are stored (all credentials in one path) + # Example: "dir_staging/dev/credentials" + vaultPath: "" + + # ClusterSecretStore or SecretStore name to use + # This must be pre-configured in your cluster (managed by platform team) + secretStore: "vault-backend" + + # Secret store kind (default: ClusterSecretStore) + # Options: ClusterSecretStore (cluster-wide) or SecretStore (namespace-scoped) + secretStoreKind: "ClusterSecretStore" + + # Refresh interval - how often ESO syncs from Vault (default: 1h) + refreshInterval: "1h" + + # Node identity configuration + nodeIdentity: + enabled: true + # Property name in Vault secret (default: "node.privkey") + property: "node.privkey" + + # OCI registry authentication + ociAuth: + enabled: true + # Property names in Vault secret (defaults shown) + usernameProperty: "oci-username" + passwordProperty: "oci-password" + + # Sync authentication (shared with remote nodes) + syncAuth: + enabled: true + # Property names in Vault secret (defaults shown) + usernameProperty: "sync-username" + passwordProperty: "sync-password" + + # Database configuration + database: + # Database type (currently only sqlite supported) + type: "sqlite" + + # SQLite configuration + sqlite: + # Path to SQLite database file + # Default: /tmp/dir.db (ephemeral - lost on pod restart) + # When using PVC: /var/lib/dir/database/dir.db (persistent) + dbPath: "/tmp/dir.db" + + # PVC for database persistence (optional) + # When enabled, database persists across pod restarts + # Also allows enabling readOnlyRootFilesystem security hardening + pvc: + enabled: false # Disabled by default (opt-in for backward compatibility) + create: true # Create PVC automatically + storageClassName: "" # Use cluster default storage class + size: 1Gi # Database size (1Gi for dev, 5Gi recommended for production) + accessMode: ReadWriteOnce + + # Deployment strategy for pod updates + # IMPORTANT: Use "Recreate" when PVCs are enabled (routing or database) + # to avoid file lock conflicts with BadgerDB and SQLite + # + # Default: Recreate (prevents database lock conflicts during updates) + # - Brief downtime during updates (10-15 seconds) + # - Required when using PVCs to avoid CrashLoopBackOff + # + # Alternative: RollingUpdate (only for fully stateless deployments) + # - Zero-downtime updates + # - NOT compatible with PVCs (will cause lock conflicts) + strategy: + type: Recreate diff --git a/install/charts/dirctl/.helmignore b/install/charts/dirctl/.helmignore index 0e8a0eb36..f82e96d46 100644 --- a/install/charts/dirctl/.helmignore +++ b/install/charts/dirctl/.helmignore @@ -1,23 +1,23 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/install/charts/dirctl/Chart.lock b/install/charts/dirctl/Chart.lock index 11ef8c241..e50171bde 100644 --- a/install/charts/dirctl/Chart.lock +++ b/install/charts/dirctl/Chart.lock @@ -1,3 +1,3 @@ -dependencies: [] -digest: sha256:643d5437104296e21d906ecb15b2c96ad278f20cfc4af53b12bb6069bd853726 -generated: "2025-08-01T10:45:17.145298+02:00" +dependencies: [] +digest: sha256:643d5437104296e21d906ecb15b2c96ad278f20cfc4af53b12bb6069bd853726 +generated: "2025-08-01T10:45:17.145298+02:00" diff --git a/install/charts/dirctl/Chart.yaml b/install/charts/dirctl/Chart.yaml index 58528bfa6..c82d8ab3d 100644 --- a/install/charts/dirctl/Chart.yaml +++ b/install/charts/dirctl/Chart.yaml @@ -1,29 +1,29 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -apiVersion: v2 -name: dirctl -description: A Helm chart for Kubernetes - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "1.16.0" - -dependencies: [] +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: v2 +name: dirctl +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" + +dependencies: [] diff --git a/install/charts/dirctl/templates/_helpers.tpl b/install/charts/dirctl/templates/_helpers.tpl index 7ba5edc27..4dff5d9f2 100644 --- a/install/charts/dirctl/templates/_helpers.tpl +++ b/install/charts/dirctl/templates/_helpers.tpl @@ -1,62 +1,62 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "chart.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "chart.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "chart.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "chart.labels" -}} -helm.sh/chart: {{ include "chart.chart" . }} -{{ include "chart.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "chart.selectorLabels" -}} -app.kubernetes.io/name: {{ include "chart.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "chart.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "chart.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} +{{/* +Expand the name of the chart. +*/}} +{{- define "chart.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "chart.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "chart.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "chart.labels" -}} +helm.sh/chart: {{ include "chart.chart" . }} +{{ include "chart.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "chart.selectorLabels" -}} +app.kubernetes.io/name: {{ include "chart.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "chart.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "chart.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/install/charts/dirctl/templates/clusterspiffefederation.yaml b/install/charts/dirctl/templates/clusterspiffefederation.yaml index 0df27ae3a..6e3fcdd9c 100644 --- a/install/charts/dirctl/templates/clusterspiffefederation.yaml +++ b/install/charts/dirctl/templates/clusterspiffefederation.yaml @@ -1,11 +1,11 @@ -{{- if eq .Values.spire.enabled true }} -{{- range .Values.spire.federation }} ---- -apiVersion: spire.spiffe.io/v1alpha1 -kind: ClusterFederatedTrustDomain -metadata: - name: {{ include "chart.fullname" $ }}-{{ .trustDomain | replace "." "-" }} -spec: - {{ . | toYaml | nindent 2 }} -{{- end }} -{{- end }} +{{- if eq .Values.spire.enabled true }} +{{- range .Values.spire.federation }} +--- +apiVersion: spire.spiffe.io/v1alpha1 +kind: ClusterFederatedTrustDomain +metadata: + name: {{ include "chart.fullname" $ }}-{{ .trustDomain | replace "." "-" }} +spec: + {{ . | toYaml | nindent 2 }} +{{- end }} +{{- end }} diff --git a/install/charts/dirctl/templates/clusterspiffeids.yaml b/install/charts/dirctl/templates/clusterspiffeids.yaml index ee8a8593e..d7026572c 100644 --- a/install/charts/dirctl/templates/clusterspiffeids.yaml +++ b/install/charts/dirctl/templates/clusterspiffeids.yaml @@ -1,25 +1,25 @@ -{{- if eq .Values.spire.enabled true }} -apiVersion: spire.spiffe.io/v1alpha1 -kind: ClusterSPIFFEID -metadata: - name: {{ include "chart.fullname" . }} -spec: - className: {{ .Values.spire.className | default (printf "%s-spire" .Release.Namespace) }} - podSelector: - matchExpressions: - - key: app.kubernetes.io/name - operator: In - values: - - {{ include "chart.name" . }} - workloadSelectorTemplates: - - k8s:pod-image:{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }} - - k8s:sa:{{ include "chart.serviceAccountName" . }} - spiffeIDTemplate: {{ "spiffe://{{ .TrustDomain }}/ns/{{ .PodMeta.Namespace }}/sa/{{ .PodSpec.ServiceAccountName }}" }} - autoPopulateDNSNames: true - {{- if .Values.spire.federation }} - federatesWith: - {{ range .Values.spire.federation }} - - {{ .trustDomain }} - {{ end }} - {{- end }} -{{- end }} +{{- if eq .Values.spire.enabled true }} +apiVersion: spire.spiffe.io/v1alpha1 +kind: ClusterSPIFFEID +metadata: + name: {{ include "chart.fullname" . }} +spec: + className: {{ .Values.spire.className | default (printf "%s-spire" .Release.Namespace) }} + podSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - {{ include "chart.name" . }} + workloadSelectorTemplates: + - k8s:pod-image:{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }} + - k8s:sa:{{ include "chart.serviceAccountName" . }} + spiffeIDTemplate: {{ "spiffe://{{ .TrustDomain }}/ns/{{ .PodMeta.Namespace }}/sa/{{ .PodSpec.ServiceAccountName }}" }} + autoPopulateDNSNames: true + {{- if .Values.spire.federation }} + federatesWith: + {{ range .Values.spire.federation }} + - {{ .trustDomain }} + {{ end }} + {{- end }} +{{- end }} diff --git a/install/charts/dirctl/templates/configmaps.yaml b/install/charts/dirctl/templates/configmaps.yaml index d7a68e807..9b9f2376d 100644 --- a/install/charts/dirctl/templates/configmaps.yaml +++ b/install/charts/dirctl/templates/configmaps.yaml @@ -1,15 +1,15 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -{{- range $config := .Values.configMaps }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ $config.name }} -data: - {{- range $key, $value := $config.data }} - {{ $key }}: | - {{ $value | nindent 4 }} - {{- end }} -{{- end }} +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +{{- range $config := .Values.configMaps }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ $config.name }} +data: + {{- range $key, $value := $config.data }} + {{ $key }}: | + {{ $value | nindent 4 }} + {{- end }} +{{- end }} diff --git a/install/charts/dirctl/templates/cronjob.yaml b/install/charts/dirctl/templates/cronjob.yaml index f42b42ce3..2489a8486 100644 --- a/install/charts/dirctl/templates/cronjob.yaml +++ b/install/charts/dirctl/templates/cronjob.yaml @@ -1,136 +1,136 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -{{- range $name, $cronjob := .Values.cronjobs }} -{{- if $cronjob.enabled }} ---- -apiVersion: batch/v1 -kind: CronJob -metadata: - name: {{ include "chart.fullname" $ }}-{{ $name }} - labels: - {{- include "chart.labels" $ | nindent 4 }} - app.kubernetes.io/component: {{ include "chart.fullname" $ }}-{{ $name }} -spec: - schedule: {{ $cronjob.schedule | quote }} - successfulJobsHistoryLimit: {{ $cronjob.successfulJobsHistoryLimit | default $.Values.global.cronjob.successfulJobsHistoryLimit }} - failedJobsHistoryLimit: {{ $cronjob.failedJobsHistoryLimit | default $.Values.global.cronjob.failedJobsHistoryLimit }} - concurrencyPolicy: {{ $cronjob.concurrencyPolicy | default $.Values.global.cronjob.concurrencyPolicy | quote }} - jobTemplate: - spec: - template: - metadata: - labels: - {{- include "chart.selectorLabels" $ | nindent 12 }} - app.kubernetes.io/component: {{ include "chart.fullname" $ }}-{{ $name }} - {{- with $.Values.podAnnotations }} - annotations: - {{- toYaml . | nindent 12 }} - {{- end }} - spec: - restartPolicy: OnFailure - {{- with $.Values.image.pullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- if $.Values.serviceAccount.create }} - serviceAccountName: {{ include "chart.fullname" $ }} - {{- end }} - {{- with $.Values.podSecurityContext }} - securityContext: - {{- toYaml . | nindent 12 }} - {{- end }} - containers: - - name: dirctl - image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" - imagePullPolicy: {{ $.Values.image.pullPolicy }} - {{- with $.Values.securityContext }} - securityContext: - {{- toYaml . | nindent 14 }} - {{- end }} - args: - {{- range $cronjob.args }} - - {{ . | quote }} - {{- end }} - env: - {{- if eq $.Values.spire.enabled true }} - - name: DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH - value: unix:/run/spire/agent-sockets/api.sock - {{- end }} - {{- with $.Values.env }} - {{- toYaml . | nindent 12 }} - {{- end }} - {{- with $cronjob.env }} - {{- toYaml . | nindent 12 }} - {{- end }} - {{- with ($cronjob.resources | default $.Values.resources) }} - resources: - {{- toYaml . | nindent 14 }} - {{- end }} - volumeMounts: - {{- if eq $.Values.spire.enabled true }} - - name: spire-agent-socket - mountPath: /run/spire/agent-sockets - readOnly: true - {{- end }} - {{- if $.Values.securityContext.readOnlyRootFilesystem }} - - name: home-dir - mountPath: /home/nonroot - {{- end }} - {{- $allVolumeMounts := list }} - {{- if $.Values.volumeMounts }} - {{- $allVolumeMounts = concat $allVolumeMounts $.Values.volumeMounts }} - {{- end }} - {{- if $cronjob.volumeMounts }} - {{- $allVolumeMounts = concat $allVolumeMounts $cronjob.volumeMounts }} - {{- end }} - {{- if $allVolumeMounts }} - {{- toYaml $allVolumeMounts | nindent 14 }} - {{- end }} - volumes: - {{- if eq $.Values.spire.enabled true }} - - name: spire-agent-socket - {{- if $.Values.spire.useCSIDriver }} - # SPIFFE CSI driver for proper workload attestation - # Ensures synchronous workload registration before pod starts - csi: - driver: "csi.spiffe.io" - readOnly: true - {{- else }} - # Legacy hostPath mount (for debugging/non-production use only) - # May cause "certificate contains no URI SAN" authentication errors - hostPath: - path: /run/spire/agent-sockets - type: Directory - {{- end }} - {{- end }} - {{- if $.Values.securityContext.readOnlyRootFilesystem }} - # Writable home directory for config files when root filesystem is read-only - # Required for applications that create config files (e.g., MCP host) - - name: home-dir - emptyDir: {} - {{- end }} - {{- $allVolumes := list }} - {{- if $.Values.volumes }} - {{- $allVolumes = concat $allVolumes $.Values.volumes }} - {{- end }} - {{- if $cronjob.volumes }} - {{- $allVolumes = concat $allVolumes $cronjob.volumes }} - {{- end }} - {{- if $allVolumes }} - {{- toYaml $allVolumes | nindent 12 }} - {{- end }} - {{- with $.Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- with $.Values.affinity }} - affinity: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- with $.Values.tolerations }} - tolerations: - {{- toYaml . | nindent 12 }} - {{- end }} -{{- end }} -{{- end }} +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +{{- range $name, $cronjob := .Values.cronjobs }} +{{- if $cronjob.enabled }} +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: {{ include "chart.fullname" $ }}-{{ $name }} + labels: + {{- include "chart.labels" $ | nindent 4 }} + app.kubernetes.io/component: {{ include "chart.fullname" $ }}-{{ $name }} +spec: + schedule: {{ $cronjob.schedule | quote }} + successfulJobsHistoryLimit: {{ $cronjob.successfulJobsHistoryLimit | default $.Values.global.cronjob.successfulJobsHistoryLimit }} + failedJobsHistoryLimit: {{ $cronjob.failedJobsHistoryLimit | default $.Values.global.cronjob.failedJobsHistoryLimit }} + concurrencyPolicy: {{ $cronjob.concurrencyPolicy | default $.Values.global.cronjob.concurrencyPolicy | quote }} + jobTemplate: + spec: + template: + metadata: + labels: + {{- include "chart.selectorLabels" $ | nindent 12 }} + app.kubernetes.io/component: {{ include "chart.fullname" $ }}-{{ $name }} + {{- with $.Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 12 }} + {{- end }} + spec: + restartPolicy: OnFailure + {{- with $.Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if $.Values.serviceAccount.create }} + serviceAccountName: {{ include "chart.fullname" $ }} + {{- end }} + {{- with $.Values.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + containers: + - name: dirctl + image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + {{- with $.Values.securityContext }} + securityContext: + {{- toYaml . | nindent 14 }} + {{- end }} + args: + {{- range $cronjob.args }} + - {{ . | quote }} + {{- end }} + env: + {{- if eq $.Values.spire.enabled true }} + - name: DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH + value: unix:/run/spire/agent-sockets/api.sock + {{- end }} + {{- with $.Values.env }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with $cronjob.env }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with ($cronjob.resources | default $.Values.resources) }} + resources: + {{- toYaml . | nindent 14 }} + {{- end }} + volumeMounts: + {{- if eq $.Values.spire.enabled true }} + - name: spire-agent-socket + mountPath: /run/spire/agent-sockets + readOnly: true + {{- end }} + {{- if $.Values.securityContext.readOnlyRootFilesystem }} + - name: home-dir + mountPath: /home/nonroot + {{- end }} + {{- $allVolumeMounts := list }} + {{- if $.Values.volumeMounts }} + {{- $allVolumeMounts = concat $allVolumeMounts $.Values.volumeMounts }} + {{- end }} + {{- if $cronjob.volumeMounts }} + {{- $allVolumeMounts = concat $allVolumeMounts $cronjob.volumeMounts }} + {{- end }} + {{- if $allVolumeMounts }} + {{- toYaml $allVolumeMounts | nindent 14 }} + {{- end }} + volumes: + {{- if eq $.Values.spire.enabled true }} + - name: spire-agent-socket + {{- if $.Values.spire.useCSIDriver }} + # SPIFFE CSI driver for proper workload attestation + # Ensures synchronous workload registration before pod starts + csi: + driver: "csi.spiffe.io" + readOnly: true + {{- else }} + # Legacy hostPath mount (for debugging/non-production use only) + # May cause "certificate contains no URI SAN" authentication errors + hostPath: + path: /run/spire/agent-sockets + type: Directory + {{- end }} + {{- end }} + {{- if $.Values.securityContext.readOnlyRootFilesystem }} + # Writable home directory for config files when root filesystem is read-only + # Required for applications that create config files (e.g., MCP host) + - name: home-dir + emptyDir: {} + {{- end }} + {{- $allVolumes := list }} + {{- if $.Values.volumes }} + {{- $allVolumes = concat $allVolumes $.Values.volumes }} + {{- end }} + {{- if $cronjob.volumes }} + {{- $allVolumes = concat $allVolumes $cronjob.volumes }} + {{- end }} + {{- if $allVolumes }} + {{- toYaml $allVolumes | nindent 12 }} + {{- end }} + {{- with $.Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with $.Values.affinity }} + affinity: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with $.Values.tolerations }} + tolerations: + {{- toYaml . | nindent 12 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/install/charts/dirctl/templates/serviceaccount.yaml b/install/charts/dirctl/templates/serviceaccount.yaml index 26a7fa0a6..f7c29bbb8 100644 --- a/install/charts/dirctl/templates/serviceaccount.yaml +++ b/install/charts/dirctl/templates/serviceaccount.yaml @@ -1,15 +1,15 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "chart.serviceAccountName" . }} - labels: - {{- include "chart.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "chart.serviceAccountName" . }} + labels: + {{- include "chart.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/install/charts/dirctl/values.yaml b/install/charts/dirctl/values.yaml index 46dfe0b1d..39120e256 100644 --- a/install/charts/dirctl/values.yaml +++ b/install/charts/dirctl/values.yaml @@ -1,227 +1,227 @@ -# Example configuration with multiple cronjobs -# This demonstrates how to configure multiple dirctl cronjobs - -# Global config -global: - cronjob: - failedJobsHistoryLimit: 10 - successfulJobsHistoryLimit: 10 - concurrencyPolicy: 'Forbid' - -# Image config -image: - repository: ghcr.io/agntcy/dir-ctl - tag: latest - pullPolicy: IfNotPresent - pullSecrets: [] - -# CronJobs configuration -cronjobs: - # Search cronjob - search: - enabled: true - schedule: '* * * * *' - args: - - 'search' - - # Lookup cronjob - lookup: - enabled: true - schedule: '* * * * *' - args: - - 'info' - - 'baeareiesad3lyuacjirp6gxudrzheltwbodtsg7ieqpox36w5j637rchwq' - - # Push cronjob - push: - enabled: true - schedule: '* * * * *' - args: - - 'push' - - '/examples/record.json' - volumes: - - name: record-json - configMap: - name: example-record - items: - - key: record.json - path: examples/record.json - volumeMounts: - - name: record-json - mountPath: /examples/record.json - subPath: examples/record.json - readOnly: true - - # Import cronjob - sync from MCP registry every 6 hours - import-mcp: - enabled: false - schedule: '0 */6 * * *' # Every 6 hours - args: - - 'import' - - '--type=mcp' - - '--url=https://registry.modelcontextprotocol.io/v0.1' - env: - # Import-specific environment variables can be added here - # - name: DIRECTORY_CLIENT_SERVER_ADDRESS - # value: dir-apiserver.dir-server.svc.cluster.local:8888 - -# SPIRE configuration for workload identity -spire: - enabled: false - trustDomain: example.org - - # SPIRE controller className for ClusterSPIFFEID matching - # - # REQUIRED: The className field is mandatory for ClusterSPIFFEID resources. - # The SPIRE controller manager uses className to match ClusterSPIFFEID resources - # with the appropriate SPIRE installation. Without this field, the controller - # will ignore the ClusterSPIFFEID and no workload registration will occur, - # causing authentication failures and preventing workloads from starting. - # - # The className must match the SPIRE installation's className. - # - # Default: "dir-spire" (matches standard SPIRE installation convention). - # If your SPIRE installation uses a different className, override this value. - # If not specified, falls back to "-spire" (e.g., "my-namespace-spire"). - # - # IMPORTANT: Ensure this matches your SPIRE Controller Manager's className. - # For standard installations, SPIRE is deployed in the "spire" namespace with - # className "dir-spire". Verify with: - # kubectl get deployment -n spire spire-server -o yaml | grep className - # - # See: https://github.com/spiffe/spire-controller-manager/blob/main/docs/clusterspiffeid-crd.md - className: "dir-spire" - - # Use SPIFFE CSI driver for workload attestation (recommended) - # - # When true (recommended): - # - Uses SPIFFE CSI driver for proper workload registration - # - Workload registration happens synchronously before pod starts - # - SPIRE agent issues X.509-SVID with URI SAN before pod begins - # - Eliminates "certificate contains no URI SAN" authentication errors - # - No race conditions during authentication - # - Production-ready, reliable identity injection - # - # When false (legacy/debugging only): - # - Uses hostPath to mount SPIRE agent socket - # - Workload registration happens asynchronously via ClusterSPIFFEID - # - Pod may try to authenticate before registration completes - # - May cause intermittent authentication failures - # - Only use for debugging or when CSI driver is unavailable - # - # Requires: SPIFFE CSI driver deployed in cluster - # See: https://github.com/spiffe/spiffe-csi - useCSIDriver: true - - federation: [] - # # Config: https://github.com/spiffe/spire-controller-manager/blob/main/docs/clusterfederatedtrustdomain-crd.md - # - trustDomain: dir-cluster - # bundleEndpointURL: https://0.0.0.0:8081 - # bundleEndpointProfile: - # type: https_web - -# Additional environment variables (applied to all cronjobs) -env: - - name: DIRECTORY_CLIENT_SERVER_ADDRESS - value: dir-apiserver.dir-server.svc.cluster.local:8888 - - name: DIRECTORY_CLIENT_AUTH_MODE - value: '' # Options: "token", "x509", "jwt", or "" for no auth - - name: DIRECTORY_CLIENT_JWT_AUDIENCE - value: '' # Required if using JWT auth - - name: DIRECTORY_CLIENT_SPIFFE_TOKEN - value: '' # Required if using token auth - -# Config maps config (shared across all cronjobs) -configMaps: - - name: example-record - data: - record.json: | - { - "name": "directory.agntcy.org/cisco/marketing-strategy-v3", - "version": "v3.0.0", - "schema_version": "0.7.0", - "description": "Research agent for Cisco's marketing strategy.", - "authors": [ - "Cisco Systems" - ], - "created_at": "2025-03-19T17:06:37Z", - "annotations": { - "key": "value" - }, - "skills": [ - { - "name": "natural_language_processing/natural_language_generation/text_completion", - "id": 10201 - }, - { - "name": "natural_language_processing/analytical_reasoning/problem_solving", - "id": 10702 - } - ], - "locators": [ - { - "type": "docker_image", - "url": "https://ghcr.io/agntcy/marketing-strategy" - } - ], - "domains": [ - { - "name": "life_science/biotechnology" - } - ], - "modules": [ - { - "name": "runtime/model", - "id": 303, - "data": { - "models": [ - { - "provider": "openai", - "model": "gpt-4", - "api_base": "https://api.openai.com/v1", - "env_vars": [ - { - "name": "OPENAI_API_KEY", - "description": "OpenAI API key for authentication", - "required": true - }, - { - "name": "OPENAI_ORG_ID", - "description": "OpenAI organization ID", - "required": false, - "default_value": "" - } - ] - } - ] - } - } - ] - } - -# Service account config -serviceAccount: - create: false - annotations: {} - name: '' - -# Security context -securityContext: - runAsNonRoot: true - runAsUser: 65532 - readOnlyRootFilesystem: true - -# Default resource configuration (can be overridden per cronjob) -resources: - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 50m - memory: 64Mi - -# Default volumes (can be extended per cronjob) -volumes: [] - -# Default volume mounts (can be extended per cronjob) -volumeMounts: [] +# Example configuration with multiple cronjobs +# This demonstrates how to configure multiple dirctl cronjobs + +# Global config +global: + cronjob: + failedJobsHistoryLimit: 10 + successfulJobsHistoryLimit: 10 + concurrencyPolicy: 'Forbid' + +# Image config +image: + repository: ghcr.io/agntcy/dir-ctl + tag: latest + pullPolicy: IfNotPresent + pullSecrets: [] + +# CronJobs configuration +cronjobs: + # Search cronjob + search: + enabled: true + schedule: '* * * * *' + args: + - 'search' + + # Lookup cronjob + lookup: + enabled: true + schedule: '* * * * *' + args: + - 'info' + - 'baeareiesad3lyuacjirp6gxudrzheltwbodtsg7ieqpox36w5j637rchwq' + + # Push cronjob + push: + enabled: true + schedule: '* * * * *' + args: + - 'push' + - '/examples/record.json' + volumes: + - name: record-json + configMap: + name: example-record + items: + - key: record.json + path: examples/record.json + volumeMounts: + - name: record-json + mountPath: /examples/record.json + subPath: examples/record.json + readOnly: true + + # Import cronjob - sync from MCP registry every 6 hours + import-mcp: + enabled: false + schedule: '0 */6 * * *' # Every 6 hours + args: + - 'import' + - '--type=mcp' + - '--url=https://registry.modelcontextprotocol.io/v0.1' + env: + # Import-specific environment variables can be added here + # - name: DIRECTORY_CLIENT_SERVER_ADDRESS + # value: dir-apiserver.dir-server.svc.cluster.local:8888 + +# SPIRE configuration for workload identity +spire: + enabled: false + trustDomain: example.org + + # SPIRE controller className for ClusterSPIFFEID matching + # + # REQUIRED: The className field is mandatory for ClusterSPIFFEID resources. + # The SPIRE controller manager uses className to match ClusterSPIFFEID resources + # with the appropriate SPIRE installation. Without this field, the controller + # will ignore the ClusterSPIFFEID and no workload registration will occur, + # causing authentication failures and preventing workloads from starting. + # + # The className must match the SPIRE installation's className. + # + # Default: "dir-spire" (matches standard SPIRE installation convention). + # If your SPIRE installation uses a different className, override this value. + # If not specified, falls back to "-spire" (e.g., "my-namespace-spire"). + # + # IMPORTANT: Ensure this matches your SPIRE Controller Manager's className. + # For standard installations, SPIRE is deployed in the "spire" namespace with + # className "dir-spire". Verify with: + # kubectl get deployment -n spire spire-server -o yaml | grep className + # + # See: https://github.com/spiffe/spire-controller-manager/blob/main/docs/clusterspiffeid-crd.md + className: "dir-spire" + + # Use SPIFFE CSI driver for workload attestation (recommended) + # + # When true (recommended): + # - Uses SPIFFE CSI driver for proper workload registration + # - Workload registration happens synchronously before pod starts + # - SPIRE agent issues X.509-SVID with URI SAN before pod begins + # - Eliminates "certificate contains no URI SAN" authentication errors + # - No race conditions during authentication + # - Production-ready, reliable identity injection + # + # When false (legacy/debugging only): + # - Uses hostPath to mount SPIRE agent socket + # - Workload registration happens asynchronously via ClusterSPIFFEID + # - Pod may try to authenticate before registration completes + # - May cause intermittent authentication failures + # - Only use for debugging or when CSI driver is unavailable + # + # Requires: SPIFFE CSI driver deployed in cluster + # See: https://github.com/spiffe/spiffe-csi + useCSIDriver: true + + federation: [] + # # Config: https://github.com/spiffe/spire-controller-manager/blob/main/docs/clusterfederatedtrustdomain-crd.md + # - trustDomain: dir-cluster + # bundleEndpointURL: https://0.0.0.0:8081 + # bundleEndpointProfile: + # type: https_web + +# Additional environment variables (applied to all cronjobs) +env: + - name: DIRECTORY_CLIENT_SERVER_ADDRESS + value: dir-apiserver.dir-server.svc.cluster.local:8888 + - name: DIRECTORY_CLIENT_AUTH_MODE + value: '' # Options: "token", "x509", "jwt", or "" for no auth + - name: DIRECTORY_CLIENT_JWT_AUDIENCE + value: '' # Required if using JWT auth + - name: DIRECTORY_CLIENT_SPIFFE_TOKEN + value: '' # Required if using token auth + +# Config maps config (shared across all cronjobs) +configMaps: + - name: example-record + data: + record.json: | + { + "name": "directory.agntcy.org/cisco/marketing-strategy-v3", + "version": "v3.0.0", + "schema_version": "0.7.0", + "description": "Research agent for Cisco's marketing strategy.", + "authors": [ + "Cisco Systems" + ], + "created_at": "2025-03-19T17:06:37Z", + "annotations": { + "key": "value" + }, + "skills": [ + { + "name": "natural_language_processing/natural_language_generation/text_completion", + "id": 10201 + }, + { + "name": "natural_language_processing/analytical_reasoning/problem_solving", + "id": 10702 + } + ], + "locators": [ + { + "type": "docker_image", + "url": "https://ghcr.io/agntcy/marketing-strategy" + } + ], + "domains": [ + { + "name": "life_science/biotechnology" + } + ], + "modules": [ + { + "name": "runtime/model", + "id": 303, + "data": { + "models": [ + { + "provider": "openai", + "model": "gpt-4", + "api_base": "https://api.openai.com/v1", + "env_vars": [ + { + "name": "OPENAI_API_KEY", + "description": "OpenAI API key for authentication", + "required": true + }, + { + "name": "OPENAI_ORG_ID", + "description": "OpenAI organization ID", + "required": false, + "default_value": "" + } + ] + } + ] + } + } + ] + } + +# Service account config +serviceAccount: + create: false + annotations: {} + name: '' + +# Security context +securityContext: + runAsNonRoot: true + runAsUser: 65532 + readOnlyRootFilesystem: true + +# Default resource configuration (can be overridden per cronjob) +resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 50m + memory: 64Mi + +# Default volumes (can be extended per cronjob) +volumes: [] + +# Default volume mounts (can be extended per cronjob) +volumeMounts: [] diff --git a/install/docker/apiserver.env b/install/docker/apiserver.env index 9c7cb6e33..d0091eb1e 100644 --- a/install/docker/apiserver.env +++ b/install/docker/apiserver.env @@ -1,31 +1,31 @@ -DIRECTORY_SERVER_LISTEN_ADDRESS=0.0.0.0:8888 -DIRECTORY_SERVER_AUTHN_ENABLED=false -DIRECTORY_SERVER_AUTHN_MODE=x509 -DIRECTORY_SERVER_AUTHN_SOCKET_PATH=unix:///run/spire/agent-sockets/api.sock -DIRECTORY_SERVER_AUTHN_AUDIENCES= -DIRECTORY_SERVER_AUTHZ_ENABLED=false -DIRECTORY_SERVER_AUTHZ_TRUST_DOMAIN=example.org -DIRECTORY_SERVER_STORE_PROVIDER=oci -DIRECTORY_SERVER_STORE_OCI_LOCAL_DIR= -DIRECTORY_SERVER_STORE_OCI_REGISTRY_ADDRESS=zot:5000 -DIRECTORY_SERVER_STORE_OCI_REPOSITORY_NAME=dir -DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_INSECURE=true -DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_USERNAME= -DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_PASSWORD= -DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_ACCESS_TOKEN=access-token -DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_REFRESH_TOKEN=refresh-token -DIRECTORY_SERVER_ROUTING_GOSSIPSUB_ENABLED=true -DIRECTORY_SERVER_ROUTING_REFRESH_INTERVAL=30s -DIRECTORY_SERVER_SYNC_AUTH_CONFIG_USERNAME= -DIRECTORY_SERVER_SYNC_AUTH_CONFIG_PASSWORD= -DIRECTORY_SERVER_EVENTS_SUBSCRIBER_BUFFER_SIZE=100 -DIRECTORY_SERVER_EVENTS_LOG_SLOW_CONSUMERS=true -DIRECTORY_SERVER_EVENTS_LOG_PUBLISHED_EVENTS=false -DIRECTORY_SERVER_RATELIMIT_ENABLED=false -DIRECTORY_SERVER_RATELIMIT_GLOBAL_RPS=0 -DIRECTORY_SERVER_RATELIMIT_GLOBAL_BURST=0 -DIRECTORY_SERVER_RATELIMIT_PER_CLIENT_RPS=100 -DIRECTORY_SERVER_RATELIMIT_PER_CLIENT_BURST=200 -DIRECTORY_LOGGER_LOG_LEVEL=DEBUG -DIRECTORY_LOGGER_LOG_FORMAT=json +DIRECTORY_SERVER_LISTEN_ADDRESS=0.0.0.0:8888 +DIRECTORY_SERVER_AUTHN_ENABLED=false +DIRECTORY_SERVER_AUTHN_MODE=x509 +DIRECTORY_SERVER_AUTHN_SOCKET_PATH=unix:///run/spire/agent-sockets/api.sock +DIRECTORY_SERVER_AUTHN_AUDIENCES= +DIRECTORY_SERVER_AUTHZ_ENABLED=false +DIRECTORY_SERVER_AUTHZ_TRUST_DOMAIN=example.org +DIRECTORY_SERVER_STORE_PROVIDER=oci +DIRECTORY_SERVER_STORE_OCI_LOCAL_DIR= +DIRECTORY_SERVER_STORE_OCI_REGISTRY_ADDRESS=zot:5000 +DIRECTORY_SERVER_STORE_OCI_REPOSITORY_NAME=dir +DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_INSECURE=true +DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_USERNAME= +DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_PASSWORD= +DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_ACCESS_TOKEN=access-token +DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_REFRESH_TOKEN=refresh-token +DIRECTORY_SERVER_ROUTING_GOSSIPSUB_ENABLED=true +DIRECTORY_SERVER_ROUTING_REFRESH_INTERVAL=30s +DIRECTORY_SERVER_SYNC_AUTH_CONFIG_USERNAME= +DIRECTORY_SERVER_SYNC_AUTH_CONFIG_PASSWORD= +DIRECTORY_SERVER_EVENTS_SUBSCRIBER_BUFFER_SIZE=100 +DIRECTORY_SERVER_EVENTS_LOG_SLOW_CONSUMERS=true +DIRECTORY_SERVER_EVENTS_LOG_PUBLISHED_EVENTS=false +DIRECTORY_SERVER_RATELIMIT_ENABLED=false +DIRECTORY_SERVER_RATELIMIT_GLOBAL_RPS=0 +DIRECTORY_SERVER_RATELIMIT_GLOBAL_BURST=0 +DIRECTORY_SERVER_RATELIMIT_PER_CLIENT_RPS=100 +DIRECTORY_SERVER_RATELIMIT_PER_CLIENT_BURST=200 +DIRECTORY_LOGGER_LOG_LEVEL=DEBUG +DIRECTORY_LOGGER_LOG_FORMAT=json DIRECTORY_SERVER_LOGGING_VERBOSE=false \ No newline at end of file diff --git a/install/docker/docker-compose.yml b/install/docker/docker-compose.yml index 50b2c899b..b912cecf4 100644 --- a/install/docker/docker-compose.yml +++ b/install/docker/docker-compose.yml @@ -1,42 +1,42 @@ -name: dir - -services: - apiserver: - build: - context: ../../ - dockerfile: server/Dockerfile - env_file: - - ./apiserver.env - ports: - - 8888:8888 - depends_on: - - zot - deploy: - mode: replicated - replicas: 1 - restart_policy: - condition: on-failure - healthcheck: - test: grpc-health-probe -addr=127.0.0.1:8888 || exit 1 - interval: 10s - retries: 60 - start_period: 10s - - zot: - image: ghcr.io/project-zot/zot:v2.1.11 - ports: - - 5555:5000 - deploy: - mode: replicated - replicas: 1 - restart_policy: - condition: on-failure - healthcheck: - test: curl -f http://localhost:5000/v2/ || exit 1 - interval: 5s - retries: 60 - start_period: 15s - -networks: - default: - name: dir-network +name: dir + +services: + apiserver: + build: + context: ../../ + dockerfile: server/Dockerfile + env_file: + - ./apiserver.env + ports: + - 8888:8888 + depends_on: + - zot + deploy: + mode: replicated + replicas: 1 + restart_policy: + condition: on-failure + healthcheck: + test: grpc-health-probe -addr=127.0.0.1:8888 || exit 1 + interval: 10s + retries: 60 + start_period: 10s + + zot: + image: ghcr.io/project-zot/zot:v2.1.11 + ports: + - 5555:5000 + deploy: + mode: replicated + replicas: 1 + restart_policy: + condition: on-failure + healthcheck: + test: curl -f http://localhost:5000/v2/ || exit 1 + interval: 5s + retries: 60 + start_period: 15s + +networks: + default: + name: dir-network diff --git a/mcp/README.md b/mcp/README.md index 67623c9fd..f3bb6b818 100644 --- a/mcp/README.md +++ b/mcp/README.md @@ -1,430 +1,430 @@ -# MCP Server for Directory - -[Model Context Protocol (MCP)](https://modelcontextprotocol.io/) server for working with OASF agent records. - -## Tools - -### `agntcy_oasf_list_versions` - -Lists all available OASF schema versions supported by the server. - -**Input:** None -**Output:** `available_versions` ([]string), `count` (int), `error_message` (string) - -### `agntcy_oasf_get_schema` - -Retrieves the complete OASF schema JSON content for the specified version. - -**Input:** `version` (string) - OASF schema version (e.g., "0.3.1", "0.7.0") -**Output:** `version` (string), `schema` (string), `available_versions` ([]string), `error_message` (string) - -### `agntcy_oasf_get_schema_skills` - -Retrieves skills from the OASF schema with hierarchical navigation support. - -**Input:** -- `version` (string, **required**) - OASF schema version (e.g., "0.7.0") -- `parent_skill` (string, optional) - Parent skill to filter sub-skills - -**Output:** `skills` (array), `version`, `parent_skill`, `available_versions`, `error_message` - -Without `parent_skill`, returns top-level skill categories. With `parent_skill`, returns direct sub-skills under that parent. Each skill includes `name`, `caption`, and `id` fields. - -### `agntcy_oasf_get_schema_domains` - -Retrieves domains from the OASF schema with hierarchical navigation support. - -**Input:** -- `version` (string, **required**) - OASF schema version (e.g., "0.7.0") -- `parent_domain` (string, optional) - Parent domain to filter sub-domains - -**Output:** `domains` (array), `version`, `parent_domain`, `available_versions`, `error_message` - -Without `parent_domain`, returns top-level domain categories. With `parent_domain`, returns direct sub-domains under that parent. Each domain includes `name`, `caption`, and `id` fields. - -### `agntcy_oasf_validate_record` - -Validates an OASF agent record against the OASF schema. - -**Input:** `record_json` (string) -**Output:** `valid` (bool), `schema_version` (string), `validation_errors` ([]string), `error_message` (string) - -### `agntcy_dir_push_record` - -Pushes an OASF agent record to a Directory server. - -**Input:** `record_json` (string) - OASF agent record JSON -**Output:** `cid` (string), `server_address` (string), `error_message` (string) - -This tool validates and uploads the record to the configured Directory server. It returns the Content Identifier (CID) and the server address where the record was stored. - -**Note:** Requires Directory server configuration via environment variables. - -### `agntcy_dir_search_local` - -Searches for agent records on the local directory node using structured query filters. - -**Input (all optional):** -- `limit` (uint32) - Maximum results to return (default: 100, max: 1000) -- `offset` (uint32) - Pagination offset (default: 0) -- `names` ([]string) - Agent name patterns (supports wildcards) -- `versions` ([]string) - Version patterns (supports wildcards) -- `skill_ids` ([]string) - Skill IDs (exact match only) -- `skill_names` ([]string) - Skill name patterns (supports wildcards) -- `locators` ([]string) - Locator patterns (supports wildcards) -- `modules` ([]string) - Module patterns (supports wildcards) - -**Output:** -- `record_cids` ([]string) - Array of matching record CIDs -- `count` (int) - Number of results returned -- `has_more` (bool) - Whether more results are available - -**Wildcard Patterns:** -- `*` - Matches zero or more characters -- `?` - Matches exactly one character -- `[]` - Matches any character within brackets (e.g., `[0-9]`, `[a-z]`, `[abc]`) - -**Examples:** -```json -// Find all Python-related agents -{ - "skill_names": ["*python*", "*Python*"] -} - -// Find specific version -{ - "names": ["my-agent"], - "versions": ["v1.*"] -} - -// Complex search with pagination -{ - "skill_names": ["*machine*learning*"], - "locators": ["docker-image:*"], - "limit": 50, - "offset": 0 -} -``` - -**Note:** Multiple filters are combined with OR logic. Requires Directory server configuration via environment variables. - -### `agntcy_dir_pull_record` - -Pulls an OASF agent record from the local Directory node by its CID (Content Identifier). - -**Input:** -- `cid` (string) - Content Identifier of the record to pull (required) - -**Output:** -- `record_data` (string) - The record data (JSON string) -- `error_message` (string) - Error message if pull failed - -**Example:** -```json -{ - "cid": "bafkreiabcd1234567890" -} -``` - -**Note:** The pulled record is content-addressable and can be validated against its hash. Requires Directory server configuration via environment variables. - -### `agntcy_oasf_import_record` - -Imports data from other formats (MCP, A2A) to OASF agent record format. - -**Input:** -- `source_data` (string, **required**) - JSON string of the source data to import -- `source_format` (string, **required**) - Source format: "mcp" or "a2a" - -**Output:** -- `record_json` (string) - The imported OASF record (JSON string) -- `error_message` (string) - Error message if import failed - -**Note:** The resulting record requires domain and skill enrichment. For the complete workflow with automatic enrichment and validation, use the `import_record` prompt instead. - -### `agntcy_oasf_export_record` - -Exports an OASF agent record to other formats (A2A, GitHub Copilot). - -**Input:** -- `record_json` (string, **required**) - JSON string of the OASF agent record to export -- `target_format` (string, **required**) - Target format: "a2a" or "ghcopilot" - -**Output:** -- `exported_data` (string) - The exported data in the target format (JSON string) -- `error_message` (string) - Error message if export failed - -**Note:** For the complete workflow with validation, use the `export_record` prompt instead. - -## Prompts - -MCP Prompts are guided workflows that help you accomplish tasks. The server exposes the following prompts: - -### `create_record` - -Analyzes the **current directory** codebase and automatically generates a complete, valid OASF agent record. The AI examines the repository structure, documentation, and code to determine appropriate skills, domains, and metadata. - -**Input (optional):** -- `output_path` (string) - Where to output the record: - - File path (e.g., `"agent.json"`) to save to file - - `"stdout"` to display only (no file saved) - - Empty or omitted defaults to `"stdout"` -- `schema_version` (string) - OASF schema version to use (defaults to "0.7.0") - -**Use when:** You want to automatically generate an OASF record for the current directory's codebase. - -### `validate_record` - -Guides you through validating an existing OASF agent record. Reads a file, validates it against the schema, and reports any errors. - -**Input (required):** `record_path` (string) - Path to the OASF record JSON file to validate - -**Use when:** You have an existing record file and want to check if it's valid. - -### `push_record` - -Complete workflow for validating and pushing an OASF record to the Directory server. Validates the record first, then pushes it to the configured server and returns the CID. - -**Input (required):** `record_path` (string) - Path to the OASF record JSON file to validate and push - -**Use when:** You're ready to publish your record to a Directory server. - -### `search_records` - -Guided workflow for searching agent records using **free-text queries**. This prompt automatically translates natural language queries into structured search parameters by leveraging OASF schema knowledge. - -**Input (required):** `query` (string) - Free-text description of what agents you're looking for - -**What it does:** -1. Retrieves the OASF schema to understand available skills and domains -2. Analyzes your free-text query -3. Translates it to appropriate search filters (names, skills, locators, etc.) -4. Executes the search using `agntcy_dir_search_local` -5. **Extracts and displays ALL CIDs** from the search results (from the `record_cids` field) -6. Provides summary and explanation of search strategy - -**Important:** The prompt explicitly instructs the AI to extract the `record_cids` array from the tool response and display every CID clearly. The response will always include actual CID values, never placeholders. - -**Example queries:** -- `"find Python agents"` -- `"agents that can process images"` -- `"docker-based translation services"` -- `"GPT models version 2"` -- `"agents with text completion skills"` - -**Use when:** You want to search using natural language rather than structured filters. The AI will map your query to OASF taxonomy. - -**Note:** For direct, structured searches, use the `agntcy_dir_search_local` tool instead. - -### `pull_record` - -Guided workflow for pulling an OASF agent record from the Directory by its CID. - -**Input:** -- `cid` (string, **required**) - Content Identifier (CID) of the record to pull -- `output_path` (string, optional) - Where to save the record: - - File path (e.g., `"record.json"`) to save to file - - `"stdout"` or empty to display only (no file saved) - - Empty or omitted defaults to `"stdout"` - -**What it does:** -1. Validates the CID format -2. Calls `agntcy_dir_pull_record` with the CID -3. Displays the record data -4. Parses and formats the record JSON for readability -5. Saves to file if `output_path` is specified -6. Optionally validates the record using `agntcy_oasf_validate_record` - -**Use when:** You have a CID and want to retrieve the full record. The pulled record is content-addressable and can be validated against its hash. - -### `import_record` - -Complete guided workflow for importing data from other formats to OASF. - -**Input:** -- `source_data_path` (string, **required**) - Path to the source data file to import -- `source_format` (string, **required**) - Source format: "mcp" or "a2a" -- `output_path` (string, optional) - Where to save the imported OASF record (file path or empty for stdout) -- `schema_version` (string, optional) - OASF schema version to use for validation (defaults to "0.8.0") - -**What it does:** -Reads the source file, converts it to OASF format, enriches domains and skills using the OASF schema, validates the result, and optionally saves to file. - -**Use when:** You want to import MCP servers or A2A cards into the OASF format. This handles all the complexity automatically. - -### `export_record` - -Complete guided workflow for exporting an OASF record to other formats. - -**Input:** -- `record_path` (string, **required**) - Path to the OASF record JSON file to export -- `target_format` (string, **required**) - Target format: "a2a" or "ghcopilot" -- `output_path` (string, optional) - Where to save the exported data (file path or empty for stdout) - -**What it does:** -Reads the OASF record, validates it, converts it to the target format, and optionally saves to file. - -**Use when:** You want to export OASF records to A2A cards or GitHub Copilot MCP configurations. - -## Setup - -The MCP server runs via the `dirctl` CLI tool, which can be obtained as a pre-built binary or Docker image. About the possible installation methods, see the CLI [README.md](../cli/README.md) file. - -### 1. Binary - -Add the MCP server to your IDE's MCP configuration using the absolute path to the dirctl binary. - -**Example Cursor configuration (`~/.cursor/mcp.json`):** - -```json -{ - "mcpServers": { - "dir-mcp-server": { - "command": "/absolute/path/to/dirctl", - "args": ["mcp", "serve"], - } - } -} -``` - -### 2. Docker Image - -Add the MCP server to your IDE's MCP configuration using Docker. - -**Example Cursor configuration (`~/.cursor/mcp.json`):** - -```json -{ - "mcpServers": { - "dir-mcp-server": { - "command": "docker", - "args": [ - "run", - "--rm", - "-i", - "ghcr.io/agntcy/dir-ctl:latest", - "mcp", - "serve" - ] - } - } -} -``` - -### Environment Variables - -The following environment variables can be used with both binary and Docker configurations: - -#### Directory Client Configuration - -- `DIRECTORY_CLIENT_SERVER_ADDRESS` - Directory server address (default: `0.0.0.0:8888`) -- `DIRECTORY_CLIENT_AUTH_MODE` - Authentication mode: `none`, `x509`, `jwt`, `token` -- `DIRECTORY_CLIENT_SPIFFE_TOKEN` - Path to SPIFFE token file (for token authentication) -- `DIRECTORY_CLIENT_TLS_SKIP_VERIFY` - Skip TLS verification (set to `true` if needed) - -#### OASF Validation Configuration - -- `OASF_API_VALIDATION_SCHEMA_URL` - OASF schema URL for API-based validation - - **Default**: `https://schema.oasf.outshift.com` - - URL of the OASF server to use for validation - - The MCP server uses API-based validation by default for more comprehensive validation with the latest schema rules - -- `OASF_API_VALIDATION_DISABLE` - Disable API-based validation - - **Default**: `false` (API validation enabled) - - When `true`, uses embedded schemas instead of the API validator - - When `false`, uses API validation with the configured `OASF_API_VALIDATION_SCHEMA_URL` - -- `OASF_API_VALIDATION_STRICT_MODE` - API validation strictness mode - - **Default**: `true` (strict mode) - - **Strict mode** (`true`): Fails on unknown attributes, deprecated fields, and schema violations - - **Lax mode** (`false`): More permissive, only fails on critical errors - - Only applies when API validation is enabled - -**Example - Use default OASF server (Cursor):** - -```json -{ - "mcpServers": { - "dir-mcp-server": { - "command": "/absolute/path/to/dirctl", - "args": ["mcp", "serve"], - "env": { - "DIRECTORY_CLIENT_SERVER_ADDRESS": "localhost:8888" - } - } - } -} -``` - -**Example - Use custom OASF server (Cursor):** - -```json -{ - "mcpServers": { - "dir-mcp-server": { - "command": "/absolute/path/to/dirctl", - "args": ["mcp", "serve"], - "env": { - "OASF_API_VALIDATION_SCHEMA_URL": "http://localhost:8080", - "DIRECTORY_CLIENT_SERVER_ADDRESS": "localhost:8888" - } - } - } -} -``` - -**Example - Use lax validation mode (Cursor):** - -```json -{ - "mcpServers": { - "dir-mcp-server": { - "command": "/absolute/path/to/dirctl", - "args": ["mcp", "serve"], - "env": { - "OASF_API_VALIDATION_STRICT_MODE": "false", - "DIRECTORY_CLIENT_SERVER_ADDRESS": "localhost:8888" - } - } - } -} -``` - -**Example - Use embedded schemas (Cursor):** - -```json -{ - "mcpServers": { - "dir-mcp-server": { - "command": "/absolute/path/to/dirctl", - "args": ["mcp", "serve"], - "env": { - "OASF_API_VALIDATION_DISABLE": "true", - "DIRECTORY_CLIENT_SERVER_ADDRESS": "localhost:8888" - } - } - } -} -``` - -**Note:** After changing the configuration, fully restart your IDE (e.g., quit and reopen Cursor) for the MCP server to reload with the new settings. - -## Usage in Cursor Chat - -**Using Tools** - Ask naturally, AI calls tools automatically: -- "List available OASF schema versions" -- "Validate this OASF record at path: /path/to/record.json" -- "Search for Python agents with image processing" -- "Push this record: [JSON]" -- "Import this A2A card to OASF format: [JSON]" -- "Export this OASF record to A2A format: [JSON]" - -**Using Prompts** - For guided workflows reference prompts with: - -- `/dir-mcp-server/create_record` - Generate OASF record from current directory -- `/dir-mcp-server/validate_record` - Validate an existing OASF record file -- `/dir-mcp-server/push_record` - Validate and push record to Directory -- `/dir-mcp-server/search_records` - Search with natural language queries -- `/dir-mcp-server/pull_record` - Pull record by CID -- `/dir-mcp-server/import_record` - Import from MCP/A2A with enrichment -- `/dir-mcp-server/export_record` - Export OASF to other formats +# MCP Server for Directory + +[Model Context Protocol (MCP)](https://modelcontextprotocol.io/) server for working with OASF agent records. + +## Tools + +### `agntcy_oasf_list_versions` + +Lists all available OASF schema versions supported by the server. + +**Input:** None +**Output:** `available_versions` ([]string), `count` (int), `error_message` (string) + +### `agntcy_oasf_get_schema` + +Retrieves the complete OASF schema JSON content for the specified version. + +**Input:** `version` (string) - OASF schema version (e.g., "0.3.1", "0.7.0") +**Output:** `version` (string), `schema` (string), `available_versions` ([]string), `error_message` (string) + +### `agntcy_oasf_get_schema_skills` + +Retrieves skills from the OASF schema with hierarchical navigation support. + +**Input:** +- `version` (string, **required**) - OASF schema version (e.g., "0.7.0") +- `parent_skill` (string, optional) - Parent skill to filter sub-skills + +**Output:** `skills` (array), `version`, `parent_skill`, `available_versions`, `error_message` + +Without `parent_skill`, returns top-level skill categories. With `parent_skill`, returns direct sub-skills under that parent. Each skill includes `name`, `caption`, and `id` fields. + +### `agntcy_oasf_get_schema_domains` + +Retrieves domains from the OASF schema with hierarchical navigation support. + +**Input:** +- `version` (string, **required**) - OASF schema version (e.g., "0.7.0") +- `parent_domain` (string, optional) - Parent domain to filter sub-domains + +**Output:** `domains` (array), `version`, `parent_domain`, `available_versions`, `error_message` + +Without `parent_domain`, returns top-level domain categories. With `parent_domain`, returns direct sub-domains under that parent. Each domain includes `name`, `caption`, and `id` fields. + +### `agntcy_oasf_validate_record` + +Validates an OASF agent record against the OASF schema. + +**Input:** `record_json` (string) +**Output:** `valid` (bool), `schema_version` (string), `validation_errors` ([]string), `error_message` (string) + +### `agntcy_dir_push_record` + +Pushes an OASF agent record to a Directory server. + +**Input:** `record_json` (string) - OASF agent record JSON +**Output:** `cid` (string), `server_address` (string), `error_message` (string) + +This tool validates and uploads the record to the configured Directory server. It returns the Content Identifier (CID) and the server address where the record was stored. + +**Note:** Requires Directory server configuration via environment variables. + +### `agntcy_dir_search_local` + +Searches for agent records on the local directory node using structured query filters. + +**Input (all optional):** +- `limit` (uint32) - Maximum results to return (default: 100, max: 1000) +- `offset` (uint32) - Pagination offset (default: 0) +- `names` ([]string) - Agent name patterns (supports wildcards) +- `versions` ([]string) - Version patterns (supports wildcards) +- `skill_ids` ([]string) - Skill IDs (exact match only) +- `skill_names` ([]string) - Skill name patterns (supports wildcards) +- `locators` ([]string) - Locator patterns (supports wildcards) +- `modules` ([]string) - Module patterns (supports wildcards) + +**Output:** +- `record_cids` ([]string) - Array of matching record CIDs +- `count` (int) - Number of results returned +- `has_more` (bool) - Whether more results are available + +**Wildcard Patterns:** +- `*` - Matches zero or more characters +- `?` - Matches exactly one character +- `[]` - Matches any character within brackets (e.g., `[0-9]`, `[a-z]`, `[abc]`) + +**Examples:** +```json +// Find all Python-related agents +{ + "skill_names": ["*python*", "*Python*"] +} + +// Find specific version +{ + "names": ["my-agent"], + "versions": ["v1.*"] +} + +// Complex search with pagination +{ + "skill_names": ["*machine*learning*"], + "locators": ["docker-image:*"], + "limit": 50, + "offset": 0 +} +``` + +**Note:** Multiple filters are combined with OR logic. Requires Directory server configuration via environment variables. + +### `agntcy_dir_pull_record` + +Pulls an OASF agent record from the local Directory node by its CID (Content Identifier). + +**Input:** +- `cid` (string) - Content Identifier of the record to pull (required) + +**Output:** +- `record_data` (string) - The record data (JSON string) +- `error_message` (string) - Error message if pull failed + +**Example:** +```json +{ + "cid": "bafkreiabcd1234567890" +} +``` + +**Note:** The pulled record is content-addressable and can be validated against its hash. Requires Directory server configuration via environment variables. + +### `agntcy_oasf_import_record` + +Imports data from other formats (MCP, A2A) to OASF agent record format. + +**Input:** +- `source_data` (string, **required**) - JSON string of the source data to import +- `source_format` (string, **required**) - Source format: "mcp" or "a2a" + +**Output:** +- `record_json` (string) - The imported OASF record (JSON string) +- `error_message` (string) - Error message if import failed + +**Note:** The resulting record requires domain and skill enrichment. For the complete workflow with automatic enrichment and validation, use the `import_record` prompt instead. + +### `agntcy_oasf_export_record` + +Exports an OASF agent record to other formats (A2A, GitHub Copilot). + +**Input:** +- `record_json` (string, **required**) - JSON string of the OASF agent record to export +- `target_format` (string, **required**) - Target format: "a2a" or "ghcopilot" + +**Output:** +- `exported_data` (string) - The exported data in the target format (JSON string) +- `error_message` (string) - Error message if export failed + +**Note:** For the complete workflow with validation, use the `export_record` prompt instead. + +## Prompts + +MCP Prompts are guided workflows that help you accomplish tasks. The server exposes the following prompts: + +### `create_record` + +Analyzes the **current directory** codebase and automatically generates a complete, valid OASF agent record. The AI examines the repository structure, documentation, and code to determine appropriate skills, domains, and metadata. + +**Input (optional):** +- `output_path` (string) - Where to output the record: + - File path (e.g., `"agent.json"`) to save to file + - `"stdout"` to display only (no file saved) + - Empty or omitted defaults to `"stdout"` +- `schema_version` (string) - OASF schema version to use (defaults to "0.7.0") + +**Use when:** You want to automatically generate an OASF record for the current directory's codebase. + +### `validate_record` + +Guides you through validating an existing OASF agent record. Reads a file, validates it against the schema, and reports any errors. + +**Input (required):** `record_path` (string) - Path to the OASF record JSON file to validate + +**Use when:** You have an existing record file and want to check if it's valid. + +### `push_record` + +Complete workflow for validating and pushing an OASF record to the Directory server. Validates the record first, then pushes it to the configured server and returns the CID. + +**Input (required):** `record_path` (string) - Path to the OASF record JSON file to validate and push + +**Use when:** You're ready to publish your record to a Directory server. + +### `search_records` + +Guided workflow for searching agent records using **free-text queries**. This prompt automatically translates natural language queries into structured search parameters by leveraging OASF schema knowledge. + +**Input (required):** `query` (string) - Free-text description of what agents you're looking for + +**What it does:** +1. Retrieves the OASF schema to understand available skills and domains +2. Analyzes your free-text query +3. Translates it to appropriate search filters (names, skills, locators, etc.) +4. Executes the search using `agntcy_dir_search_local` +5. **Extracts and displays ALL CIDs** from the search results (from the `record_cids` field) +6. Provides summary and explanation of search strategy + +**Important:** The prompt explicitly instructs the AI to extract the `record_cids` array from the tool response and display every CID clearly. The response will always include actual CID values, never placeholders. + +**Example queries:** +- `"find Python agents"` +- `"agents that can process images"` +- `"docker-based translation services"` +- `"GPT models version 2"` +- `"agents with text completion skills"` + +**Use when:** You want to search using natural language rather than structured filters. The AI will map your query to OASF taxonomy. + +**Note:** For direct, structured searches, use the `agntcy_dir_search_local` tool instead. + +### `pull_record` + +Guided workflow for pulling an OASF agent record from the Directory by its CID. + +**Input:** +- `cid` (string, **required**) - Content Identifier (CID) of the record to pull +- `output_path` (string, optional) - Where to save the record: + - File path (e.g., `"record.json"`) to save to file + - `"stdout"` or empty to display only (no file saved) + - Empty or omitted defaults to `"stdout"` + +**What it does:** +1. Validates the CID format +2. Calls `agntcy_dir_pull_record` with the CID +3. Displays the record data +4. Parses and formats the record JSON for readability +5. Saves to file if `output_path` is specified +6. Optionally validates the record using `agntcy_oasf_validate_record` + +**Use when:** You have a CID and want to retrieve the full record. The pulled record is content-addressable and can be validated against its hash. + +### `import_record` + +Complete guided workflow for importing data from other formats to OASF. + +**Input:** +- `source_data_path` (string, **required**) - Path to the source data file to import +- `source_format` (string, **required**) - Source format: "mcp" or "a2a" +- `output_path` (string, optional) - Where to save the imported OASF record (file path or empty for stdout) +- `schema_version` (string, optional) - OASF schema version to use for validation (defaults to "0.8.0") + +**What it does:** +Reads the source file, converts it to OASF format, enriches domains and skills using the OASF schema, validates the result, and optionally saves to file. + +**Use when:** You want to import MCP servers or A2A cards into the OASF format. This handles all the complexity automatically. + +### `export_record` + +Complete guided workflow for exporting an OASF record to other formats. + +**Input:** +- `record_path` (string, **required**) - Path to the OASF record JSON file to export +- `target_format` (string, **required**) - Target format: "a2a" or "ghcopilot" +- `output_path` (string, optional) - Where to save the exported data (file path or empty for stdout) + +**What it does:** +Reads the OASF record, validates it, converts it to the target format, and optionally saves to file. + +**Use when:** You want to export OASF records to A2A cards or GitHub Copilot MCP configurations. + +## Setup + +The MCP server runs via the `dirctl` CLI tool, which can be obtained as a pre-built binary or Docker image. About the possible installation methods, see the CLI [README.md](../cli/README.md) file. + +### 1. Binary + +Add the MCP server to your IDE's MCP configuration using the absolute path to the dirctl binary. + +**Example Cursor configuration (`~/.cursor/mcp.json`):** + +```json +{ + "mcpServers": { + "dir-mcp-server": { + "command": "/absolute/path/to/dirctl", + "args": ["mcp", "serve"], + } + } +} +``` + +### 2. Docker Image + +Add the MCP server to your IDE's MCP configuration using Docker. + +**Example Cursor configuration (`~/.cursor/mcp.json`):** + +```json +{ + "mcpServers": { + "dir-mcp-server": { + "command": "docker", + "args": [ + "run", + "--rm", + "-i", + "ghcr.io/agntcy/dir-ctl:latest", + "mcp", + "serve" + ] + } + } +} +``` + +### Environment Variables + +The following environment variables can be used with both binary and Docker configurations: + +#### Directory Client Configuration + +- `DIRECTORY_CLIENT_SERVER_ADDRESS` - Directory server address (default: `0.0.0.0:8888`) +- `DIRECTORY_CLIENT_AUTH_MODE` - Authentication mode: `none`, `x509`, `jwt`, `token` +- `DIRECTORY_CLIENT_SPIFFE_TOKEN` - Path to SPIFFE token file (for token authentication) +- `DIRECTORY_CLIENT_TLS_SKIP_VERIFY` - Skip TLS verification (set to `true` if needed) + +#### OASF Validation Configuration + +- `OASF_API_VALIDATION_SCHEMA_URL` - OASF schema URL for API-based validation + - **Default**: `https://schema.oasf.outshift.com` + - URL of the OASF server to use for validation + - The MCP server uses API-based validation by default for more comprehensive validation with the latest schema rules + +- `OASF_API_VALIDATION_DISABLE` - Disable API-based validation + - **Default**: `false` (API validation enabled) + - When `true`, uses embedded schemas instead of the API validator + - When `false`, uses API validation with the configured `OASF_API_VALIDATION_SCHEMA_URL` + +- `OASF_API_VALIDATION_STRICT_MODE` - API validation strictness mode + - **Default**: `true` (strict mode) + - **Strict mode** (`true`): Fails on unknown attributes, deprecated fields, and schema violations + - **Lax mode** (`false`): More permissive, only fails on critical errors + - Only applies when API validation is enabled + +**Example - Use default OASF server (Cursor):** + +```json +{ + "mcpServers": { + "dir-mcp-server": { + "command": "/absolute/path/to/dirctl", + "args": ["mcp", "serve"], + "env": { + "DIRECTORY_CLIENT_SERVER_ADDRESS": "localhost:8888" + } + } + } +} +``` + +**Example - Use custom OASF server (Cursor):** + +```json +{ + "mcpServers": { + "dir-mcp-server": { + "command": "/absolute/path/to/dirctl", + "args": ["mcp", "serve"], + "env": { + "OASF_API_VALIDATION_SCHEMA_URL": "http://localhost:8080", + "DIRECTORY_CLIENT_SERVER_ADDRESS": "localhost:8888" + } + } + } +} +``` + +**Example - Use lax validation mode (Cursor):** + +```json +{ + "mcpServers": { + "dir-mcp-server": { + "command": "/absolute/path/to/dirctl", + "args": ["mcp", "serve"], + "env": { + "OASF_API_VALIDATION_STRICT_MODE": "false", + "DIRECTORY_CLIENT_SERVER_ADDRESS": "localhost:8888" + } + } + } +} +``` + +**Example - Use embedded schemas (Cursor):** + +```json +{ + "mcpServers": { + "dir-mcp-server": { + "command": "/absolute/path/to/dirctl", + "args": ["mcp", "serve"], + "env": { + "OASF_API_VALIDATION_DISABLE": "true", + "DIRECTORY_CLIENT_SERVER_ADDRESS": "localhost:8888" + } + } + } +} +``` + +**Note:** After changing the configuration, fully restart your IDE (e.g., quit and reopen Cursor) for the MCP server to reload with the new settings. + +## Usage in Cursor Chat + +**Using Tools** - Ask naturally, AI calls tools automatically: +- "List available OASF schema versions" +- "Validate this OASF record at path: /path/to/record.json" +- "Search for Python agents with image processing" +- "Push this record: [JSON]" +- "Import this A2A card to OASF format: [JSON]" +- "Export this OASF record to A2A format: [JSON]" + +**Using Prompts** - For guided workflows reference prompts with: + +- `/dir-mcp-server/create_record` - Generate OASF record from current directory +- `/dir-mcp-server/validate_record` - Validate an existing OASF record file +- `/dir-mcp-server/push_record` - Validate and push record to Directory +- `/dir-mcp-server/search_records` - Search with natural language queries +- `/dir-mcp-server/pull_record` - Pull record by CID +- `/dir-mcp-server/import_record` - Import from MCP/A2A with enrichment +- `/dir-mcp-server/export_record` - Export OASF to other formats diff --git a/mcp/go.mod b/mcp/go.mod index 65a6438f5..bbd92c9e2 100644 --- a/mcp/go.mod +++ b/mcp/go.mod @@ -1,183 +1,183 @@ -module github.com/agntcy/dir/mcp - -go 1.25.2 - -replace ( - github.com/agntcy/dir/api => ../api - github.com/agntcy/dir/client => ../client - github.com/agntcy/dir/utils => ../utils -) - -require ( - github.com/agntcy/dir/api v0.6.0 - github.com/agntcy/dir/client v0.6.0 - github.com/agntcy/oasf-sdk/pkg v0.0.14 - github.com/modelcontextprotocol/go-sdk v0.8.0 - github.com/stretchr/testify v1.11.1 - google.golang.org/protobuf v1.36.10 -) - -require ( - buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 // indirect - buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 // indirect - github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect - github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/ThalesIgnite/crypto11 v1.2.5 // indirect - github.com/agntcy/dir/utils v0.6.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/blang/semver v3.5.1+incompatible // indirect - github.com/cenkalti/backoff/v5 v5.0.3 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect - github.com/coreos/go-oidc/v3 v3.17.0 // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect - github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect - github.com/docker/cli v29.0.3+incompatible // indirect - github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker-credential-helpers v0.9.4 // indirect - github.com/dustin/go-humanize v1.0.1 // indirect - github.com/emicklei/go-restful/v3 v3.12.2 // indirect - github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/fxamacker/cbor/v2 v2.9.0 // indirect - github.com/go-chi/chi/v5 v5.2.3 // indirect - github.com/go-jose/go-jose/v4 v4.1.3 // indirect - github.com/go-logr/logr v1.4.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.24.1 // indirect - github.com/go-openapi/errors v0.22.4 // indirect - github.com/go-openapi/jsonpointer v0.22.1 // indirect - github.com/go-openapi/jsonreference v0.21.3 // indirect - github.com/go-openapi/loads v0.23.2 // indirect - github.com/go-openapi/runtime v0.29.2 // indirect - github.com/go-openapi/spec v0.22.1 // indirect - github.com/go-openapi/strfmt v0.25.0 // indirect - github.com/go-openapi/swag v0.25.4 // indirect - github.com/go-openapi/swag/cmdutils v0.25.4 // indirect - github.com/go-openapi/swag/conv v0.25.4 // indirect - github.com/go-openapi/swag/fileutils v0.25.4 // indirect - github.com/go-openapi/swag/jsonname v0.25.4 // indirect - github.com/go-openapi/swag/jsonutils v0.25.4 // indirect - github.com/go-openapi/swag/loading v0.25.4 // indirect - github.com/go-openapi/swag/mangling v0.25.4 // indirect - github.com/go-openapi/swag/netutils v0.25.4 // indirect - github.com/go-openapi/swag/stringutils v0.25.4 // indirect - github.com/go-openapi/swag/typeutils v0.25.4 // indirect - github.com/go-openapi/swag/yamlutils v0.25.4 // indirect - github.com/go-openapi/validate v0.25.1 // indirect - github.com/go-viper/mapstructure/v2 v2.4.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/certificate-transparency-go v1.3.2 // indirect - github.com/google/gnostic-models v0.7.0 // indirect - github.com/google/go-containerregistry v0.20.7 // indirect - github.com/google/go-github/v73 v73.0.0 // indirect - github.com/google/go-querystring v1.1.0 // indirect - github.com/google/jsonschema-go v0.3.0 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect - github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-retryablehttp v0.7.8 // indirect - github.com/in-toto/attestation v1.1.2 // indirect - github.com/in-toto/in-toto-golang v0.9.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/ipfs/go-cid v0.5.0 // indirect - github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.18.1 // indirect - github.com/klauspost/cpuid/v2 v2.2.10 // indirect - github.com/letsencrypt/boulder v0.20251110.0 // indirect - github.com/miekg/pkcs11 v1.1.1 // indirect - github.com/minio/sha256-simd v1.0.1 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect - github.com/moby/term v0.5.2 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect - github.com/mr-tron/base58 v1.2.0 // indirect - github.com/multiformats/go-base32 v0.1.0 // indirect - github.com/multiformats/go-base36 v0.2.0 // indirect - github.com/multiformats/go-multibase v0.2.0 // indirect - github.com/multiformats/go-multihash v0.2.3 // indirect - github.com/multiformats/go-varint v0.0.7 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect - github.com/oklog/ulid v1.3.1 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.1 // indirect - github.com/pelletier/go-toml/v2 v2.2.4 // indirect - github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/sagikazarmark/locafero v0.11.0 // indirect - github.com/sassoftware/relic v7.2.1+incompatible // indirect - github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect - github.com/shibumi/go-pathspec v1.3.0 // indirect - github.com/sigstore/cosign/v3 v3.0.3 // indirect - github.com/sigstore/protobuf-specs v0.5.0 // indirect - github.com/sigstore/rekor v1.4.3 // indirect - github.com/sigstore/rekor-tiles/v2 v2.0.1 // indirect - github.com/sigstore/sigstore v1.10.0 // indirect - github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 // indirect - github.com/sigstore/timestamp-authority/v2 v2.0.3 // indirect - github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect - github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect - github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.15.0 // indirect - github.com/spf13/cast v1.10.0 // indirect - github.com/spf13/cobra v1.10.2 // indirect - github.com/spf13/pflag v1.0.10 // indirect - github.com/spf13/viper v1.21.0 // indirect - github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect - github.com/subosito/gotenv v1.6.0 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect - github.com/thales-e-security/pool v0.0.2 // indirect - github.com/theupdateframework/go-tuf v0.7.0 // indirect - github.com/theupdateframework/go-tuf/v2 v2.3.0 // indirect - github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect - github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect - github.com/transparency-dev/merkle v0.0.2 // indirect - github.com/vbatts/tar-split v0.12.2 // indirect - github.com/x448/float16 v0.8.4 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xeipuuv/gojsonschema v1.2.0 // indirect - github.com/yosida95/uritemplate/v3 v3.0.2 // indirect - gitlab.com/gitlab-org/api/client-go v0.160.0 // indirect - go.mongodb.org/mongo-driver v1.17.6 // indirect - go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/otel v1.38.0 // indirect - go.opentelemetry.io/otel/metric v1.38.0 // indirect - go.opentelemetry.io/otel/trace v1.38.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.1 // indirect - go.yaml.in/yaml/v2 v2.4.3 // indirect - go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/crypto v0.45.0 // indirect - golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect - golang.org/x/mod v0.30.0 // indirect - golang.org/x/net v0.47.0 // indirect - golang.org/x/oauth2 v0.33.0 // indirect - golang.org/x/sync v0.18.0 // indirect - golang.org/x/sys v0.38.0 // indirect - golang.org/x/term v0.37.0 // indirect - golang.org/x/text v0.31.0 // indirect - golang.org/x/time v0.14.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect - google.golang.org/grpc v1.77.0 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.34.2 // indirect - k8s.io/apimachinery v0.34.2 // indirect - k8s.io/client-go v0.34.2 // indirect - k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect - k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d // indirect - lukechampine.com/blake3 v1.4.0 // indirect - sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect - sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect - sigs.k8s.io/yaml v1.6.0 // indirect -) +module github.com/agntcy/dir/mcp + +go 1.25.2 + +replace ( + github.com/agntcy/dir/api => ../api + github.com/agntcy/dir/client => ../client + github.com/agntcy/dir/utils => ../utils +) + +require ( + github.com/agntcy/dir/api v0.6.0 + github.com/agntcy/dir/client v0.6.0 + github.com/agntcy/oasf-sdk/pkg v0.0.14 + github.com/modelcontextprotocol/go-sdk v0.8.0 + github.com/stretchr/testify v1.11.1 + google.golang.org/protobuf v1.36.10 +) + +require ( + buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 // indirect + buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/ThalesIgnite/crypto11 v1.2.5 // indirect + github.com/agntcy/dir/utils v0.6.0 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/blang/semver v3.5.1+incompatible // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect + github.com/coreos/go-oidc/v3 v3.17.0 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect + github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect + github.com/docker/cli v29.0.3+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.4 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect + github.com/go-jose/go-jose/v4 v4.1.3 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/analysis v0.24.1 // indirect + github.com/go-openapi/errors v0.22.4 // indirect + github.com/go-openapi/jsonpointer v0.22.1 // indirect + github.com/go-openapi/jsonreference v0.21.3 // indirect + github.com/go-openapi/loads v0.23.2 // indirect + github.com/go-openapi/runtime v0.29.2 // indirect + github.com/go-openapi/spec v0.22.1 // indirect + github.com/go-openapi/strfmt v0.25.0 // indirect + github.com/go-openapi/swag v0.25.4 // indirect + github.com/go-openapi/swag/cmdutils v0.25.4 // indirect + github.com/go-openapi/swag/conv v0.25.4 // indirect + github.com/go-openapi/swag/fileutils v0.25.4 // indirect + github.com/go-openapi/swag/jsonname v0.25.4 // indirect + github.com/go-openapi/swag/jsonutils v0.25.4 // indirect + github.com/go-openapi/swag/loading v0.25.4 // indirect + github.com/go-openapi/swag/mangling v0.25.4 // indirect + github.com/go-openapi/swag/netutils v0.25.4 // indirect + github.com/go-openapi/swag/stringutils v0.25.4 // indirect + github.com/go-openapi/swag/typeutils v0.25.4 // indirect + github.com/go-openapi/swag/yamlutils v0.25.4 // indirect + github.com/go-openapi/validate v0.25.1 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/certificate-transparency-go v1.3.2 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/go-containerregistry v0.20.7 // indirect + github.com/google/go-github/v73 v73.0.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/jsonschema-go v0.3.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-retryablehttp v0.7.8 // indirect + github.com/in-toto/attestation v1.1.2 // indirect + github.com/in-toto/in-toto-golang v0.9.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/ipfs/go-cid v0.5.0 // indirect + github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.18.1 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect + github.com/letsencrypt/boulder v0.20251110.0 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect + github.com/moby/term v0.5.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multihash v0.2.3 // indirect + github.com/multiformats/go-varint v0.0.7 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect + github.com/sassoftware/relic v7.2.1+incompatible // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect + github.com/shibumi/go-pathspec v1.3.0 // indirect + github.com/sigstore/cosign/v3 v3.0.3 // indirect + github.com/sigstore/protobuf-specs v0.5.0 // indirect + github.com/sigstore/rekor v1.4.3 // indirect + github.com/sigstore/rekor-tiles/v2 v2.0.1 // indirect + github.com/sigstore/sigstore v1.10.0 // indirect + github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 // indirect + github.com/sigstore/timestamp-authority/v2 v2.0.3 // indirect + github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/cobra v1.10.2 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/spf13/viper v1.21.0 // indirect + github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/thales-e-security/pool v0.0.2 // indirect + github.com/theupdateframework/go-tuf v0.7.0 // indirect + github.com/theupdateframework/go-tuf/v2 v2.3.0 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect + github.com/transparency-dev/merkle v0.0.2 // indirect + github.com/vbatts/tar-split v0.12.2 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + github.com/yosida95/uritemplate/v3 v3.0.2 // indirect + gitlab.com/gitlab-org/api/client-go v0.160.0 // indirect + go.mongodb.org/mongo-driver v1.17.6 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.1 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect + golang.org/x/mod v0.30.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.33.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.14.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect + google.golang.org/grpc v1.77.0 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.34.2 // indirect + k8s.io/apimachinery v0.34.2 // indirect + k8s.io/client-go v0.34.2 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d // indirect + lukechampine.com/blake3 v1.4.0 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect +) diff --git a/mcp/go.sum b/mcp/go.sum index dcf4c4c80..a3774ad99 100644 --- a/mcp/go.sum +++ b/mcp/go.sum @@ -1,705 +1,705 @@ -al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= -al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= -buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 h1:THc6uLCGTpU393vVD5Eu5JHUdikvaP1+dqAclQe8pOE= -buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1/go.mod h1:xkbAJMbZuuebIblSFnLrfTpvmfjarhKsIid+Q9snDQ0= -buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 h1:ZObM/Cdu5dZO4ibBXNRSy+rFwG4oV86mYfKbI0Z7AAI= -buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1/go.mod h1:yJHswa2p3J+WxGLpgzuWNWn3I1CIkxdOu80Y/vN5lbE= -cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= -cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= -cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= -cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= -cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= -cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= -cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= -cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= -cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= -cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= -cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= -cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= -cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= -cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= -github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= -github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0 h1:E4MgwLBGeVB5f2MdcIVD3ELVAWpr+WD6MUe1i+tM/PA= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0/go.mod h1:Y2b/1clN4zsAoUd/pgNAQHjLDnTis/6ROkUfyob6psM= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA= -github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= -github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= -github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= -github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= -github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= -github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= -github.com/agntcy/oasf-sdk/pkg v0.0.14 h1:DNKQNf4R4SMDbnaawoSl6FVOBvkSy4O9MyqKd7iHE8I= -github.com/agntcy/oasf-sdk/pkg v0.0.14/go.mod h1:FvcEB49gsvK+JO5i6l/pt5QgTK0LZeR7KYKsdcI6ZIM= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= -github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= -github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc= -github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE= -github.com/aws/aws-sdk-go-v2/config v1.32.2 h1:4liUsdEpUUPZs5WVapsJLx5NPmQhQdez7nYFcovrytk= -github.com/aws/aws-sdk-go-v2/config v1.32.2/go.mod h1:l0hs06IFz1eCT+jTacU/qZtC33nvcnLADAPL/XyrkZI= -github.com/aws/aws-sdk-go-v2/credentials v1.19.2 h1:qZry8VUyTK4VIo5aEdUcBjPZHL2v4FyQ3QEOaWcFLu4= -github.com/aws/aws-sdk-go-v2/credentials v1.19.2/go.mod h1:YUqm5a1/kBnoK+/NY5WEiMocZihKSo15/tJdmdXnM5g= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 h1:WZVR5DbDgxzA0BJeudId89Kmgy6DIU4ORpxwsVHz0qA= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14/go.mod h1:Dadl9QO0kHgbrH1GRqGiZdYtW5w+IXXaBNCHTIaheM4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 h1:FIouAnCE46kyYqyhs0XEBDFFSREtdnr8HQuLPQPLCrY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14/go.mod h1:UTwDc5COa5+guonQU8qBikJo1ZJ4ln2r1MkF7Dqag1E= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.1 h1:U0asSZ3ifpuIehDPkRI2rxHbmFUMplDA2VeR9Uogrmw= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.1/go.mod h1:NZo9WJqQ0sxQ1Yqu1IwCHQFQunTms2MlVgejg16S1rY= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 h1:MxMBdKTYBjPQChlJhi4qlEueqB1p1KcbTEa7tD5aqPs= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.2/go.mod h1:iS6EPmNeqCsGo+xQmXv0jIMjyYtQfnwg36zl2FwEouk= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 h1:ksUT5KtgpZd3SAiFJNJ0AFEJVva3gjBmN7eXUZjzUwQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.5/go.mod h1:av+ArJpoYf3pgyrj6tcehSFW+y9/QvAY8kMooR9bZCw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 h1:GtsxyiF3Nd3JahRBJbxLCCdYW9ltGQYrFWg8XdkGDd8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10/go.mod h1:/j67Z5XBVDx8nZVp9EuFM9/BS5dvBznbqILGuu73hug= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 h1:a5UTtD4mHBU3t0o6aHQZFJTNKVfxFWfPX7J0Lr7G+uY= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.2/go.mod h1:6TxbXoDSgBQ225Qd8Q+MbxUxUh6TtNKwbRt/EPS9xso= -github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= -github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= -github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= -github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= -github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= -github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= -github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= -github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= -github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= -github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/danieljoos/wincred v1.2.3 h1:v7dZC2x32Ut3nEfRH+vhoZGvN72+dQ/snVXo/vMFLdQ= -github.com/danieljoos/wincred v1.2.3/go.mod h1:6qqX0WNrS4RzPZ1tnroDzq9kY3fu1KwE7MRLQK4X0bs= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= -github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= -github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= -github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= -github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= -github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= -github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= -github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= -github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= -github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= -github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= -github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= -github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= -github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= -github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= -github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= -github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rBOesYM= -github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84= -github.com/go-openapi/errors v0.22.4 h1:oi2K9mHTOb5DPW2Zjdzs/NIvwi2N3fARKaTJLdNabaM= -github.com/go-openapi/errors v0.22.4/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= -github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= -github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= -github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc= -github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4= -github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4= -github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY= -github.com/go-openapi/runtime v0.29.2 h1:UmwSGWNmWQqKm1c2MGgXVpC2FTGwPDQeUsBMufc5Yj0= -github.com/go-openapi/runtime v0.29.2/go.mod h1:biq5kJXRJKBJxTDJXAa00DOTa/anflQPhT0/wmjuy+0= -github.com/go-openapi/spec v0.22.1 h1:beZMa5AVQzRspNjvhe5aG1/XyBSMeX1eEOs7dMoXh/k= -github.com/go-openapi/spec v0.22.1/go.mod h1:c7aeIQT175dVowfp7FeCvXXnjN/MrpaONStibD2WtDA= -github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= -github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= -github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= -github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= -github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= -github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= -github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= -github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= -github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= -github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= -github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= -github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= -github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= -github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= -github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= -github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= -github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= -github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= -github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= -github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= -github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= -github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= -github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= -github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= -github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= -github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= -github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= -github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= -github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw= -github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc= -github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= -github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= -github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= -github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= -github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= -github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= -github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= -github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= -github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= -github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= -github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= -github.com/google/go-github/v73 v73.0.0 h1:aR+Utnh+Y4mMkS+2qLQwcQ/cF9mOTpdwnzlaw//rG24= -github.com/google/go-github/v73 v73.0.0/go.mod h1:fa6w8+/V+edSU0muqdhCVY7Beh1M8F1IlQPZIANKIYw= -github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= -github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/jsonschema-go v0.3.0 h1:6AH2TxVNtk3IlvkkhjrtbUc4S8AvO0Xii0DxIygDg+Q= -github.com/google/jsonschema-go v0.3.0/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e h1:FJta/0WsADCe1r9vQjdHbd3KuiLPu7Y9WlyLGwMUNyE= -github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= -github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= -github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= -github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= -github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= -github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= -github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= -github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= -github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= -github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= -github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= -github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= -github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/vault/api v1.22.0 h1:+HYFquE35/B74fHoIeXlZIP2YADVboaPjaSicHEZiH0= -github.com/hashicorp/vault/api v1.22.0/go.mod h1:IUZA2cDvr4Ok3+NtK2Oq/r+lJeXkeCrHRmqdyWfpmGM= -github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= -github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E= -github.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM= -github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= -github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= -github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= -github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= -github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= -github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= -github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= -github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= -github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= -github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= -github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= -github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= -github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= -github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= -github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/letsencrypt/boulder v0.20251110.0 h1:J8MnKICeilO91dyQ2n5eBbab24neHzUpYMUIOdOtbjc= -github.com/letsencrypt/boulder v0.20251110.0/go.mod h1:ogKCJQwll82m7OVHWyTuf8eeFCjuzdRQlgnZcCl0V+8= -github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= -github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= -github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= -github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= -github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= -github.com/modelcontextprotocol/go-sdk v0.8.0 h1:jdsBtGzBLY287WKSIjYovOXAqtJkP+HtFQFKrZd4a6c= -github.com/modelcontextprotocol/go-sdk v0.8.0/go.mod h1:nYtYQroQ2KQiM0/SbyEPUWQ6xs4B95gJjEalc9AQyOs= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= -github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= -github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= -github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= -github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= -github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= -github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= -github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= -github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= -github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= -github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= -github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= -github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= -github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= -github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= -github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= -github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= -github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= -github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= -github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= -github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= -github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= -github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= -github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= -github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= -github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= -github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= -github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= -github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= -github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= -github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= -github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= -github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= -github.com/sigstore/cosign/v3 v3.0.3 h1:IknuTUYM+tZ/ToghM7mvg9V0O31NG3rev97u1IJIuYA= -github.com/sigstore/cosign/v3 v3.0.3/go.mod h1:poeQqwvpDNIDyim7a2ljUhonVKpCys+fx3SY0Lkmi/4= -github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= -github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= -github.com/sigstore/rekor v1.4.3 h1:2+aw4Gbgumv8vYM/QVg6b+hvr4x4Cukur8stJrVPKU0= -github.com/sigstore/rekor v1.4.3/go.mod h1:o0zgY087Q21YwohVvGwV9vK1/tliat5mfnPiVI3i75o= -github.com/sigstore/rekor-tiles/v2 v2.0.1 h1:1Wfz15oSRNGF5Dzb0lWn5W8+lfO50ork4PGIfEKjZeo= -github.com/sigstore/rekor-tiles/v2 v2.0.1/go.mod h1:Pjsbhzj5hc3MKY8FfVTYHBUHQEnP0ozC4huatu4x7OU= -github.com/sigstore/sigstore v1.10.0 h1:lQrmdzqlR8p9SCfWIpFoGUqdXEzJSZT2X+lTXOMPaQI= -github.com/sigstore/sigstore v1.10.0/go.mod h1:Ygq+L/y9Bm3YnjpJTlQrOk/gXyrjkpn3/AEJpmk1n9Y= -github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 h1:K8hnZhun6XacjxAdCdxkowSi7+FpmfYnAcMhTXZQyPg= -github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894/go.mod h1:uuR+Edo6P+iwi0HKscycUm8mxXL748nAureqSg6jFLA= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0 h1:UOHpiyezCj5RuixgIvCV3QyuxIGQT+N6nGZEXA7OTTY= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0/go.mod h1:U0CZmA2psabDa8DdiV7yXab0AHODzfKqvD2isH7Hrvw= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0 h1:fq4+8Y4YadxeF8mzhoMRPZ1mVvDYXmI3BfS0vlkPT7M= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0/go.mod h1:u05nqPWY05lmcdHhv2lPaWTH3FGUhJzO7iW2hbboK3Q= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0 h1:iUEf5MZYOuXGnXxdF/WrarJrk0DTVHqeIOjYdtpVXtc= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0/go.mod h1:i6vg5JfEQix46R1rhQlrKmUtJoeH91drltyYOJEk1T4= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0 h1:dUvPv/MP23ZPIXZUW45kvCIgC0ZRfYxEof57AB6bAtU= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0/go.mod h1:fR/gDdPvJWGWL70/NgBBIL1O0/3Wma6JHs3tSSYg3s4= -github.com/sigstore/timestamp-authority/v2 v2.0.3 h1:sRyYNtdED/ttLCMdaYnwpf0zre1A9chvjTnCmWWxN8Y= -github.com/sigstore/timestamp-authority/v2 v2.0.3/go.mod h1:mDaHxkt3HmZYoIlwYj4QWo0RUr7VjYU52aVO5f5Qb3I= -github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= -github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= -github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= -github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= -github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= -github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= -github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= -github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= -github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= -github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= -github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= -github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= -github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= -github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= -github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= -github.com/theupdateframework/go-tuf/v2 v2.3.0 h1:gt3X8xT8qu/HT4w+n1jgv+p7koi5ad8XEkLXXZqG9AA= -github.com/theupdateframework/go-tuf/v2 v2.3.0/go.mod h1:xW8yNvgXRncmovMLvBxKwrKpsOwJZu/8x+aB0KtFcdw= -github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= -github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= -github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= -github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= -github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= -github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= -github.com/tink-crypto/tink-go/v2 v2.5.0 h1:B8KLF6AofxdBIE4UJIaFbmoj5/1ehEtt7/MmzfI4Zpw= -github.com/tink-crypto/tink-go/v2 v2.5.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8= -github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= -github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= -github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c h1:5a2XDQ2LiAUV+/RjckMyq9sXudfrPSuCY4FuPC1NyAw= -github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c/go.mod h1:g85IafeFJZLxlzZCDRu4JLpfS7HKzR+Hw9qRh3bVzDI= -github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= -github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= -github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= -github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= -github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= -github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= -github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= -github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= -github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= -github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= -github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= -github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= -github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= -github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= -github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/zalando/go-keyring v0.2.6 h1:r7Yc3+H+Ux0+M72zacZoItR3UDxeWfKTcabvkI8ua9s= -github.com/zalando/go-keyring v0.2.6/go.mod h1:2TCrxYrbUNYfNS/Kgy/LSrkSQzZ5UPVH85RwfczwvcI= -gitlab.com/gitlab-org/api/client-go v0.160.0 h1:aMQzbcE8zFe0lR/J+a3zneEgH+/EBFs8rD8Chrr4Snw= -gitlab.com/gitlab-org/api/client-go v0.160.0/go.mod h1:ooCNtKB7OyP7GBa279+HrUS3eeJF6Yi6XABZZy7RTSk= -go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= -go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= -go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= -go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= -go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= -go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= -go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= -go.step.sm/crypto v0.74.0 h1:/APBEv45yYR4qQFg47HA8w1nesIGcxh44pGyQNw6JRA= -go.step.sm/crypto v0.74.0/go.mod h1:UoXqCAJjjRgzPte0Llaqen7O9P7XjPmgjgTHQGkKCDk= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= -go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= -go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= -go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= -go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= -golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= -golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= -golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= -golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= -golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= -golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= -golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= -golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= -golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= -gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI= -google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964= -google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 h1:LvZVVaPE0JSqL+ZWb6ErZfnEOKIqqFWUJE2D0fObSmc= -google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9/go.mod h1:QFOrLhdAe2PsTp3vQY4quuLKTi9j3XG3r6JPPaw7MSc= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= -google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= -google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= -google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 h1:ExN12ndbJ608cboPYflpTny6mXSzPrDLh0iTaVrRrds= -google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6/go.mod h1:6ytKWczdvnpnO+m+JiG9NjEDzR1FJfsnmJdG7B8QVZ8= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= -google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= -gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= -k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY= -k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw= -k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= -k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= -k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M= -k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE= -k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= -k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= -k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= -k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= -lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= -sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= -sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= -sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= -software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= -software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= +al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= +al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= +buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 h1:THc6uLCGTpU393vVD5Eu5JHUdikvaP1+dqAclQe8pOE= +buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1/go.mod h1:xkbAJMbZuuebIblSFnLrfTpvmfjarhKsIid+Q9snDQ0= +buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 h1:ZObM/Cdu5dZO4ibBXNRSy+rFwG4oV86mYfKbI0Z7AAI= +buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1/go.mod h1:yJHswa2p3J+WxGLpgzuWNWn3I1CIkxdOu80Y/vN5lbE= +cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= +cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= +cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= +cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= +cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= +cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= +cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0 h1:E4MgwLBGeVB5f2MdcIVD3ELVAWpr+WD6MUe1i+tM/PA= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0/go.mod h1:Y2b/1clN4zsAoUd/pgNAQHjLDnTis/6ROkUfyob6psM= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= +github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= +github.com/agntcy/oasf-sdk/pkg v0.0.14 h1:DNKQNf4R4SMDbnaawoSl6FVOBvkSy4O9MyqKd7iHE8I= +github.com/agntcy/oasf-sdk/pkg v0.0.14/go.mod h1:FvcEB49gsvK+JO5i6l/pt5QgTK0LZeR7KYKsdcI6ZIM= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= +github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= +github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc= +github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE= +github.com/aws/aws-sdk-go-v2/config v1.32.2 h1:4liUsdEpUUPZs5WVapsJLx5NPmQhQdez7nYFcovrytk= +github.com/aws/aws-sdk-go-v2/config v1.32.2/go.mod h1:l0hs06IFz1eCT+jTacU/qZtC33nvcnLADAPL/XyrkZI= +github.com/aws/aws-sdk-go-v2/credentials v1.19.2 h1:qZry8VUyTK4VIo5aEdUcBjPZHL2v4FyQ3QEOaWcFLu4= +github.com/aws/aws-sdk-go-v2/credentials v1.19.2/go.mod h1:YUqm5a1/kBnoK+/NY5WEiMocZihKSo15/tJdmdXnM5g= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 h1:WZVR5DbDgxzA0BJeudId89Kmgy6DIU4ORpxwsVHz0qA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14/go.mod h1:Dadl9QO0kHgbrH1GRqGiZdYtW5w+IXXaBNCHTIaheM4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 h1:FIouAnCE46kyYqyhs0XEBDFFSREtdnr8HQuLPQPLCrY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14/go.mod h1:UTwDc5COa5+guonQU8qBikJo1ZJ4ln2r1MkF7Dqag1E= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.1 h1:U0asSZ3ifpuIehDPkRI2rxHbmFUMplDA2VeR9Uogrmw= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.1/go.mod h1:NZo9WJqQ0sxQ1Yqu1IwCHQFQunTms2MlVgejg16S1rY= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 h1:MxMBdKTYBjPQChlJhi4qlEueqB1p1KcbTEa7tD5aqPs= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.2/go.mod h1:iS6EPmNeqCsGo+xQmXv0jIMjyYtQfnwg36zl2FwEouk= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 h1:ksUT5KtgpZd3SAiFJNJ0AFEJVva3gjBmN7eXUZjzUwQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.5/go.mod h1:av+ArJpoYf3pgyrj6tcehSFW+y9/QvAY8kMooR9bZCw= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 h1:GtsxyiF3Nd3JahRBJbxLCCdYW9ltGQYrFWg8XdkGDd8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10/go.mod h1:/j67Z5XBVDx8nZVp9EuFM9/BS5dvBznbqILGuu73hug= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 h1:a5UTtD4mHBU3t0o6aHQZFJTNKVfxFWfPX7J0Lr7G+uY= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.2/go.mod h1:6TxbXoDSgBQ225Qd8Q+MbxUxUh6TtNKwbRt/EPS9xso= +github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= +github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= +github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= +github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= +github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= +github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/danieljoos/wincred v1.2.3 h1:v7dZC2x32Ut3nEfRH+vhoZGvN72+dQ/snVXo/vMFLdQ= +github.com/danieljoos/wincred v1.2.3/go.mod h1:6qqX0WNrS4RzPZ1tnroDzq9kY3fu1KwE7MRLQK4X0bs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= +github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= +github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= +github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rBOesYM= +github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84= +github.com/go-openapi/errors v0.22.4 h1:oi2K9mHTOb5DPW2Zjdzs/NIvwi2N3fARKaTJLdNabaM= +github.com/go-openapi/errors v0.22.4/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= +github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= +github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= +github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc= +github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4= +github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4= +github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY= +github.com/go-openapi/runtime v0.29.2 h1:UmwSGWNmWQqKm1c2MGgXVpC2FTGwPDQeUsBMufc5Yj0= +github.com/go-openapi/runtime v0.29.2/go.mod h1:biq5kJXRJKBJxTDJXAa00DOTa/anflQPhT0/wmjuy+0= +github.com/go-openapi/spec v0.22.1 h1:beZMa5AVQzRspNjvhe5aG1/XyBSMeX1eEOs7dMoXh/k= +github.com/go-openapi/spec v0.22.1/go.mod h1:c7aeIQT175dVowfp7FeCvXXnjN/MrpaONStibD2WtDA= +github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= +github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= +github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= +github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= +github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= +github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= +github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= +github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= +github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= +github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= +github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= +github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= +github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= +github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= +github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= +github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= +github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= +github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= +github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= +github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= +github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= +github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw= +github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc= +github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= +github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= +github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= +github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= +github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= +github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= +github.com/google/go-github/v73 v73.0.0 h1:aR+Utnh+Y4mMkS+2qLQwcQ/cF9mOTpdwnzlaw//rG24= +github.com/google/go-github/v73 v73.0.0/go.mod h1:fa6w8+/V+edSU0muqdhCVY7Beh1M8F1IlQPZIANKIYw= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/jsonschema-go v0.3.0 h1:6AH2TxVNtk3IlvkkhjrtbUc4S8AvO0Xii0DxIygDg+Q= +github.com/google/jsonschema-go v0.3.0/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e h1:FJta/0WsADCe1r9vQjdHbd3KuiLPu7Y9WlyLGwMUNyE= +github.com/google/pprof v0.0.0-20250602020802-c6617b811d0e/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= +github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= +github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/vault/api v1.22.0 h1:+HYFquE35/B74fHoIeXlZIP2YADVboaPjaSicHEZiH0= +github.com/hashicorp/vault/api v1.22.0/go.mod h1:IUZA2cDvr4Ok3+NtK2Oq/r+lJeXkeCrHRmqdyWfpmGM= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E= +github.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM= +github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= +github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= +github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= +github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= +github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= +github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= +github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= +github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/letsencrypt/boulder v0.20251110.0 h1:J8MnKICeilO91dyQ2n5eBbab24neHzUpYMUIOdOtbjc= +github.com/letsencrypt/boulder v0.20251110.0/go.mod h1:ogKCJQwll82m7OVHWyTuf8eeFCjuzdRQlgnZcCl0V+8= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/modelcontextprotocol/go-sdk v0.8.0 h1:jdsBtGzBLY287WKSIjYovOXAqtJkP+HtFQFKrZd4a6c= +github.com/modelcontextprotocol/go-sdk v0.8.0/go.mod h1:nYtYQroQ2KQiM0/SbyEPUWQ6xs4B95gJjEalc9AQyOs= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= +github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= +github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= +github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= +github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= +github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= +github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= +github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= +github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= +github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= +github.com/sigstore/cosign/v3 v3.0.3 h1:IknuTUYM+tZ/ToghM7mvg9V0O31NG3rev97u1IJIuYA= +github.com/sigstore/cosign/v3 v3.0.3/go.mod h1:poeQqwvpDNIDyim7a2ljUhonVKpCys+fx3SY0Lkmi/4= +github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= +github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.4.3 h1:2+aw4Gbgumv8vYM/QVg6b+hvr4x4Cukur8stJrVPKU0= +github.com/sigstore/rekor v1.4.3/go.mod h1:o0zgY087Q21YwohVvGwV9vK1/tliat5mfnPiVI3i75o= +github.com/sigstore/rekor-tiles/v2 v2.0.1 h1:1Wfz15oSRNGF5Dzb0lWn5W8+lfO50ork4PGIfEKjZeo= +github.com/sigstore/rekor-tiles/v2 v2.0.1/go.mod h1:Pjsbhzj5hc3MKY8FfVTYHBUHQEnP0ozC4huatu4x7OU= +github.com/sigstore/sigstore v1.10.0 h1:lQrmdzqlR8p9SCfWIpFoGUqdXEzJSZT2X+lTXOMPaQI= +github.com/sigstore/sigstore v1.10.0/go.mod h1:Ygq+L/y9Bm3YnjpJTlQrOk/gXyrjkpn3/AEJpmk1n9Y= +github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 h1:K8hnZhun6XacjxAdCdxkowSi7+FpmfYnAcMhTXZQyPg= +github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894/go.mod h1:uuR+Edo6P+iwi0HKscycUm8mxXL748nAureqSg6jFLA= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0 h1:UOHpiyezCj5RuixgIvCV3QyuxIGQT+N6nGZEXA7OTTY= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0/go.mod h1:U0CZmA2psabDa8DdiV7yXab0AHODzfKqvD2isH7Hrvw= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0 h1:fq4+8Y4YadxeF8mzhoMRPZ1mVvDYXmI3BfS0vlkPT7M= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0/go.mod h1:u05nqPWY05lmcdHhv2lPaWTH3FGUhJzO7iW2hbboK3Q= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0 h1:iUEf5MZYOuXGnXxdF/WrarJrk0DTVHqeIOjYdtpVXtc= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0/go.mod h1:i6vg5JfEQix46R1rhQlrKmUtJoeH91drltyYOJEk1T4= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0 h1:dUvPv/MP23ZPIXZUW45kvCIgC0ZRfYxEof57AB6bAtU= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0/go.mod h1:fR/gDdPvJWGWL70/NgBBIL1O0/3Wma6JHs3tSSYg3s4= +github.com/sigstore/timestamp-authority/v2 v2.0.3 h1:sRyYNtdED/ttLCMdaYnwpf0zre1A9chvjTnCmWWxN8Y= +github.com/sigstore/timestamp-authority/v2 v2.0.3/go.mod h1:mDaHxkt3HmZYoIlwYj4QWo0RUr7VjYU52aVO5f5Qb3I= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= +github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= +github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= +github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= +github.com/theupdateframework/go-tuf/v2 v2.3.0 h1:gt3X8xT8qu/HT4w+n1jgv+p7koi5ad8XEkLXXZqG9AA= +github.com/theupdateframework/go-tuf/v2 v2.3.0/go.mod h1:xW8yNvgXRncmovMLvBxKwrKpsOwJZu/8x+aB0KtFcdw= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= +github.com/tink-crypto/tink-go/v2 v2.5.0 h1:B8KLF6AofxdBIE4UJIaFbmoj5/1ehEtt7/MmzfI4Zpw= +github.com/tink-crypto/tink-go/v2 v2.5.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c h1:5a2XDQ2LiAUV+/RjckMyq9sXudfrPSuCY4FuPC1NyAw= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c/go.mod h1:g85IafeFJZLxlzZCDRu4JLpfS7HKzR+Hw9qRh3bVzDI= +github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= +github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= +github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= +github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= +github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= +github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= +github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= +github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= +github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= +github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= +github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= +github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= +github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= +github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= +github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zalando/go-keyring v0.2.6 h1:r7Yc3+H+Ux0+M72zacZoItR3UDxeWfKTcabvkI8ua9s= +github.com/zalando/go-keyring v0.2.6/go.mod h1:2TCrxYrbUNYfNS/Kgy/LSrkSQzZ5UPVH85RwfczwvcI= +gitlab.com/gitlab-org/api/client-go v0.160.0 h1:aMQzbcE8zFe0lR/J+a3zneEgH+/EBFs8rD8Chrr4Snw= +gitlab.com/gitlab-org/api/client-go v0.160.0/go.mod h1:ooCNtKB7OyP7GBa279+HrUS3eeJF6Yi6XABZZy7RTSk= +go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= +go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.step.sm/crypto v0.74.0 h1:/APBEv45yYR4qQFg47HA8w1nesIGcxh44pGyQNw6JRA= +go.step.sm/crypto v0.74.0/go.mod h1:UoXqCAJjjRgzPte0Llaqen7O9P7XjPmgjgTHQGkKCDk= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= +golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI= +google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964= +google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 h1:LvZVVaPE0JSqL+ZWb6ErZfnEOKIqqFWUJE2D0fObSmc= +google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9/go.mod h1:QFOrLhdAe2PsTp3vQY4quuLKTi9j3XG3r6JPPaw7MSc= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 h1:ExN12ndbJ608cboPYflpTny6mXSzPrDLh0iTaVrRrds= +google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6/go.mod h1:6ytKWczdvnpnO+m+JiG9NjEDzR1FJfsnmJdG7B8QVZ8= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY= +k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw= +k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= +k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M= +k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w= +lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/mcp/main.go b/mcp/main.go index b4f5a86d6..87484e4db 100644 --- a/mcp/main.go +++ b/mcp/main.go @@ -1,17 +1,17 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package main - -import ( - "context" - "log" - - "github.com/agntcy/dir/mcp/server" -) - -func main() { - if err := server.Serve(context.Background()); err != nil { - log.Fatal(err) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "context" + "log" + + "github.com/agntcy/dir/mcp/server" +) + +func main() { + if err := server.Serve(context.Background()); err != nil { + log.Fatal(err) + } +} diff --git a/mcp/prompts/create_record.go b/mcp/prompts/create_record.go index 4ed3a753b..02a89d960 100644 --- a/mcp/prompts/create_record.go +++ b/mcp/prompts/create_record.go @@ -1,79 +1,79 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package prompts - -import ( - "context" - "fmt" - "strings" - - "github.com/modelcontextprotocol/go-sdk/mcp" -) - -const ( - stdoutOutput = "stdout" -) - -// CreateRecordInput defines the input parameters for the create_agent_record prompt. -type CreateRecordInput struct { - OutputPath string `json:"output_path" jsonschema:"Where to output the record: file path (e.g., agent.json), 'stdout' to display only. Defaults to stdout"` - SchemaVersion string `json:"schema_version" jsonschema:"OASF schema version to use (e.g., 0.7.0, 0.3.1). Defaults to 0.7.0"` -} - -// CreateRecord implements the create_agent_record prompt. -// It analyzes a codebase and creates a complete OASF agent record. -func CreateRecord(_ context.Context, req *mcp.GetPromptRequest) ( - *mcp.GetPromptResult, - error, -) { - // Parse arguments from the request - args := req.Params.Arguments - - outputPath := args["output_path"] - if outputPath == "" { - outputPath = stdoutOutput - } - - // Determine output action based on outputPath - outputAction := "Save the record to: " + outputPath - if strings.EqualFold(outputPath, stdoutOutput) || outputPath == "-" { - outputAction = "Display the complete JSON record (do not save to file)" - } - - schemaVersion := args["schema_version"] - if schemaVersion == "" { - schemaVersion = "0.7.0" - } - - promptText := fmt.Sprintf(strings.TrimSpace(` -I'll create an OASF %s agent record by analyzing the codebase in the current directory. - -Here's the workflow I'll follow: - -1. **Analyze Codebase**: Examine the repository structure, README, documentation, and code to understand what this application does -2. **Get Schema**: Use the agntcy_oasf_get_schema tool to retrieve the OASF %s schema and see available domains and skills -3. **Select Skills & Domains**: Based on the codebase analysis, choose the most relevant skills and domains from the schema -4. **Build Record**: Create a complete OASF record with: - - Name and version (extracted from package.json, go.mod, pyproject.toml, etc.) - - Description (from README or package metadata) - - Skills and domains (selected from the schema) - - Locators (repository URL, container images, etc.) - - Authors and timestamps -5. **Validate**: Use the agntcy_oasf_validate_record tool to validate the generated record against the OASF schema -6. **Output**: %s - -Let me start by analyzing the codebase and retrieving the OASF schema using the agntcy_oasf_get_schema tool. - `), schemaVersion, schemaVersion, outputAction) - - return &mcp.GetPromptResult{ - Messages: []*mcp.PromptMessage{ - { - Role: "user", - Content: &mcp.TextContent{ - Text: promptText, - }, - }, - }, - }, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package prompts + +import ( + "context" + "fmt" + "strings" + + "github.com/modelcontextprotocol/go-sdk/mcp" +) + +const ( + stdoutOutput = "stdout" +) + +// CreateRecordInput defines the input parameters for the create_agent_record prompt. +type CreateRecordInput struct { + OutputPath string `json:"output_path" jsonschema:"Where to output the record: file path (e.g., agent.json), 'stdout' to display only. Defaults to stdout"` + SchemaVersion string `json:"schema_version" jsonschema:"OASF schema version to use (e.g., 0.7.0, 0.3.1). Defaults to 0.7.0"` +} + +// CreateRecord implements the create_agent_record prompt. +// It analyzes a codebase and creates a complete OASF agent record. +func CreateRecord(_ context.Context, req *mcp.GetPromptRequest) ( + *mcp.GetPromptResult, + error, +) { + // Parse arguments from the request + args := req.Params.Arguments + + outputPath := args["output_path"] + if outputPath == "" { + outputPath = stdoutOutput + } + + // Determine output action based on outputPath + outputAction := "Save the record to: " + outputPath + if strings.EqualFold(outputPath, stdoutOutput) || outputPath == "-" { + outputAction = "Display the complete JSON record (do not save to file)" + } + + schemaVersion := args["schema_version"] + if schemaVersion == "" { + schemaVersion = "0.7.0" + } + + promptText := fmt.Sprintf(strings.TrimSpace(` +I'll create an OASF %s agent record by analyzing the codebase in the current directory. + +Here's the workflow I'll follow: + +1. **Analyze Codebase**: Examine the repository structure, README, documentation, and code to understand what this application does +2. **Get Schema**: Use the agntcy_oasf_get_schema tool to retrieve the OASF %s schema and see available domains and skills +3. **Select Skills & Domains**: Based on the codebase analysis, choose the most relevant skills and domains from the schema +4. **Build Record**: Create a complete OASF record with: + - Name and version (extracted from package.json, go.mod, pyproject.toml, etc.) + - Description (from README or package metadata) + - Skills and domains (selected from the schema) + - Locators (repository URL, container images, etc.) + - Authors and timestamps +5. **Validate**: Use the agntcy_oasf_validate_record tool to validate the generated record against the OASF schema +6. **Output**: %s + +Let me start by analyzing the codebase and retrieving the OASF schema using the agntcy_oasf_get_schema tool. + `), schemaVersion, schemaVersion, outputAction) + + return &mcp.GetPromptResult{ + Messages: []*mcp.PromptMessage{ + { + Role: "user", + Content: &mcp.TextContent{ + Text: promptText, + }, + }, + }, + }, nil +} diff --git a/mcp/prompts/create_record_test.go b/mcp/prompts/create_record_test.go index 99bb69f76..f158a44b2 100644 --- a/mcp/prompts/create_record_test.go +++ b/mcp/prompts/create_record_test.go @@ -1,84 +1,84 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package prompts - -import ( - "context" - "testing" - - "github.com/modelcontextprotocol/go-sdk/mcp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestCreateRecord(t *testing.T) { - t.Run("should return prompt with default values", func(t *testing.T) { - ctx := context.Background() - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Arguments: map[string]string{}, - }, - } - - result, err := CreateRecord(ctx, req) - require.NoError(t, err) - assert.NotNil(t, result) - assert.NotEmpty(t, result.Messages) - assert.Len(t, result.Messages, 1) - assert.Equal(t, mcp.Role("user"), result.Messages[0].Role) - - // Check that prompt contains important elements - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok, "Content should be TextContent") - - content := textContent.Text - assert.Contains(t, content, "current directory") - assert.Contains(t, content, "Display the complete JSON record") - assert.Contains(t, content, "0.7.0") - assert.Contains(t, content, "agntcy_oasf_get_schema") - assert.Contains(t, content, "agntcy_oasf_validate_record") - }) - - t.Run("should parse custom output_path", func(t *testing.T) { - ctx := context.Background() - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Arguments: map[string]string{ - "output_path": "custom-agent.json", - }, - }, - } - - result, err := CreateRecord(ctx, req) - require.NoError(t, err) - assert.NotNil(t, result) - - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok, "Content should be TextContent") - - content := textContent.Text - assert.Contains(t, content, "custom-agent.json") - }) - - t.Run("should parse custom schema_version", func(t *testing.T) { - ctx := context.Background() - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Arguments: map[string]string{ - "schema_version": "0.3.1", - }, - }, - } - - result, err := CreateRecord(ctx, req) - require.NoError(t, err) - assert.NotNil(t, result) - - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok, "Content should be TextContent") - - content := textContent.Text - assert.Contains(t, content, "0.3.1") - }) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package prompts + +import ( + "context" + "testing" + + "github.com/modelcontextprotocol/go-sdk/mcp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCreateRecord(t *testing.T) { + t.Run("should return prompt with default values", func(t *testing.T) { + ctx := context.Background() + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Arguments: map[string]string{}, + }, + } + + result, err := CreateRecord(ctx, req) + require.NoError(t, err) + assert.NotNil(t, result) + assert.NotEmpty(t, result.Messages) + assert.Len(t, result.Messages, 1) + assert.Equal(t, mcp.Role("user"), result.Messages[0].Role) + + // Check that prompt contains important elements + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok, "Content should be TextContent") + + content := textContent.Text + assert.Contains(t, content, "current directory") + assert.Contains(t, content, "Display the complete JSON record") + assert.Contains(t, content, "0.7.0") + assert.Contains(t, content, "agntcy_oasf_get_schema") + assert.Contains(t, content, "agntcy_oasf_validate_record") + }) + + t.Run("should parse custom output_path", func(t *testing.T) { + ctx := context.Background() + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Arguments: map[string]string{ + "output_path": "custom-agent.json", + }, + }, + } + + result, err := CreateRecord(ctx, req) + require.NoError(t, err) + assert.NotNil(t, result) + + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok, "Content should be TextContent") + + content := textContent.Text + assert.Contains(t, content, "custom-agent.json") + }) + + t.Run("should parse custom schema_version", func(t *testing.T) { + ctx := context.Background() + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Arguments: map[string]string{ + "schema_version": "0.3.1", + }, + }, + } + + result, err := CreateRecord(ctx, req) + require.NoError(t, err) + assert.NotNil(t, result) + + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok, "Content should be TextContent") + + content := textContent.Text + assert.Contains(t, content, "0.3.1") + }) +} diff --git a/mcp/prompts/export_record.go b/mcp/prompts/export_record.go index 9032d8a75..acd382a45 100644 --- a/mcp/prompts/export_record.go +++ b/mcp/prompts/export_record.go @@ -1,104 +1,104 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package prompts - -import ( - "context" - "fmt" - "strings" - - "github.com/modelcontextprotocol/go-sdk/mcp" -) - -const defaultRecordPath = "" - -// ExportRecordInput defines the input parameters for the export_record prompt. -type ExportRecordInput struct { - RecordPath string `json:"record_path" jsonschema:"Path to the OASF record JSON file to export (required)"` - TargetFormat string `json:"target_format" jsonschema:"Target format to export to (e.g., 'a2a', 'ghcopilot') (required)"` - OutputPath string `json:"output_path" jsonschema:"Where to save the exported data: file path (e.g., output.json) or empty for stdout"` -} - -// ExportRecord implements the export_record prompt. -// It guides users through the complete workflow of validating and exporting a record. -func ExportRecord(_ context.Context, req *mcp.GetPromptRequest) ( - *mcp.GetPromptResult, - error, -) { - // Parse arguments from the request - args := req.Params.Arguments - - recordPath := args["record_path"] - if recordPath == "" { - recordPath = defaultRecordPath - } - - targetFormat := args["target_format"] - if targetFormat == "" { - targetFormat = "" - } - - outputPath := args["output_path"] - - outputAction := "Display the exported data (stdout)" - if outputPath != "" { - outputAction = "Save the exported data to: " + outputPath - } - - promptText := fmt.Sprintf(strings.TrimSpace(` -I'll export an OASF agent record to %s format with validation. - -Record source: %s -Target format: %s - -Here's the complete workflow: - -1. **Get Record**: - - If the record is in a file, read it directly - - If you have a CID, use the pull_record prompt or agntcy_dir_pull_record tool to retrieve it from the Directory - - The record contains its schema_version, which will be used for validation - -2. **Validate Record**: Use agntcy_oasf_validate_record to ensure the record is valid OASF - - Check for any validation errors - - Verify all required fields are present - - Confirm domains and skills are valid according to the schema - - If validation fails, display errors and stop (fix the record before exporting) - -3. **Check Schema Compatibility**: Use agntcy_oasf_get_schema with the record's schema_version - - Retrieve the schema to understand the record structure - - Check if any format-specific requirements need to be met for the target format - -4. **Export Record**: Use agntcy_oasf_export_record tool to convert the OASF record to the target format - - This performs the translation using the OASF SDK translator - - The translator will map OASF fields to the target format's structure - - The output is the faithful translation from the OASF SDK (no modifications) - -5. **Summarize Export**: Review and summarize the translation: - - Identify key OASF fields that were successfully mapped to the target format - - Note any information that may have been lost or not preserved (if any) - - This is informational only - the exported data is not modified - -6. **Output**: %s - -**Supported Export Formats**: -- **a2a**: Agent-to-Agent (A2A) format for inter-agent communication -- **ghcopilot**: GitHub Copilot MCP configuration format - -**Note**: Some OASF record data may not have direct equivalents in the target format. -The export process will preserve as much information as possible based on the target format's capabilities. - -Let me start by obtaining and validating the OASF record. - `), targetFormat, recordPath, targetFormat, outputAction) - - return &mcp.GetPromptResult{ - Messages: []*mcp.PromptMessage{ - { - Role: "user", - Content: &mcp.TextContent{ - Text: promptText, - }, - }, - }, - }, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package prompts + +import ( + "context" + "fmt" + "strings" + + "github.com/modelcontextprotocol/go-sdk/mcp" +) + +const defaultRecordPath = "" + +// ExportRecordInput defines the input parameters for the export_record prompt. +type ExportRecordInput struct { + RecordPath string `json:"record_path" jsonschema:"Path to the OASF record JSON file to export (required)"` + TargetFormat string `json:"target_format" jsonschema:"Target format to export to (e.g., 'a2a', 'ghcopilot') (required)"` + OutputPath string `json:"output_path" jsonschema:"Where to save the exported data: file path (e.g., output.json) or empty for stdout"` +} + +// ExportRecord implements the export_record prompt. +// It guides users through the complete workflow of validating and exporting a record. +func ExportRecord(_ context.Context, req *mcp.GetPromptRequest) ( + *mcp.GetPromptResult, + error, +) { + // Parse arguments from the request + args := req.Params.Arguments + + recordPath := args["record_path"] + if recordPath == "" { + recordPath = defaultRecordPath + } + + targetFormat := args["target_format"] + if targetFormat == "" { + targetFormat = "" + } + + outputPath := args["output_path"] + + outputAction := "Display the exported data (stdout)" + if outputPath != "" { + outputAction = "Save the exported data to: " + outputPath + } + + promptText := fmt.Sprintf(strings.TrimSpace(` +I'll export an OASF agent record to %s format with validation. + +Record source: %s +Target format: %s + +Here's the complete workflow: + +1. **Get Record**: + - If the record is in a file, read it directly + - If you have a CID, use the pull_record prompt or agntcy_dir_pull_record tool to retrieve it from the Directory + - The record contains its schema_version, which will be used for validation + +2. **Validate Record**: Use agntcy_oasf_validate_record to ensure the record is valid OASF + - Check for any validation errors + - Verify all required fields are present + - Confirm domains and skills are valid according to the schema + - If validation fails, display errors and stop (fix the record before exporting) + +3. **Check Schema Compatibility**: Use agntcy_oasf_get_schema with the record's schema_version + - Retrieve the schema to understand the record structure + - Check if any format-specific requirements need to be met for the target format + +4. **Export Record**: Use agntcy_oasf_export_record tool to convert the OASF record to the target format + - This performs the translation using the OASF SDK translator + - The translator will map OASF fields to the target format's structure + - The output is the faithful translation from the OASF SDK (no modifications) + +5. **Summarize Export**: Review and summarize the translation: + - Identify key OASF fields that were successfully mapped to the target format + - Note any information that may have been lost or not preserved (if any) + - This is informational only - the exported data is not modified + +6. **Output**: %s + +**Supported Export Formats**: +- **a2a**: Agent-to-Agent (A2A) format for inter-agent communication +- **ghcopilot**: GitHub Copilot MCP configuration format + +**Note**: Some OASF record data may not have direct equivalents in the target format. +The export process will preserve as much information as possible based on the target format's capabilities. + +Let me start by obtaining and validating the OASF record. + `), targetFormat, recordPath, targetFormat, outputAction) + + return &mcp.GetPromptResult{ + Messages: []*mcp.PromptMessage{ + { + Role: "user", + Content: &mcp.TextContent{ + Text: promptText, + }, + }, + }, + }, nil +} diff --git a/mcp/prompts/export_record_test.go b/mcp/prompts/export_record_test.go index a61275a44..b1aaff5e2 100644 --- a/mcp/prompts/export_record_test.go +++ b/mcp/prompts/export_record_test.go @@ -1,132 +1,132 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package prompts - -import ( - "context" - "testing" - - "github.com/modelcontextprotocol/go-sdk/mcp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestExportRecord(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - t.Run("generates prompt with all parameters", func(t *testing.T) { - t.Parallel() - - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Arguments: map[string]string{ - "record_path": "record.json", - "target_format": "a2a", - "output_path": "output.json", - }, - }, - } - - result, err := ExportRecord(ctx, req) - - require.NoError(t, err) - require.NotNil(t, result) - require.Len(t, result.Messages, 1) - - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok, "Content should be TextContent") - - promptText := textContent.Text - - assert.Contains(t, promptText, "record.json") - assert.Contains(t, promptText, "a2a") - assert.Contains(t, promptText, "output.json") - assert.Contains(t, promptText, "agntcy_oasf_validate_record") - assert.Contains(t, promptText, "agntcy_oasf_get_schema") - assert.Contains(t, promptText, "agntcy_oasf_export_record") - assert.Contains(t, promptText, "pull_record prompt") - assert.Contains(t, promptText, "agntcy_dir_pull_record") - }) - - t.Run("uses default values when parameters are missing", func(t *testing.T) { - t.Parallel() - - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Arguments: map[string]string{}, - }, - } - - result, err := ExportRecord(ctx, req) - - require.NoError(t, err) - require.NotNil(t, result) - require.Len(t, result.Messages, 1) - - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok, "Content should be TextContent") - - promptText := textContent.Text - - assert.Contains(t, promptText, "") - assert.Contains(t, promptText, "") - assert.Contains(t, promptText, "stdout") - }) - - t.Run("handles stdout output", func(t *testing.T) { - t.Parallel() - - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Arguments: map[string]string{ - "record_path": "record.json", - "target_format": "ghcopilot", - }, - }, - } - - result, err := ExportRecord(ctx, req) - - require.NoError(t, err) - require.NotNil(t, result) - require.Len(t, result.Messages, 1) - - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok, "Content should be TextContent") - - promptText := textContent.Text - - assert.Contains(t, promptText, "Display the exported data (stdout)") - assert.Contains(t, promptText, "ghcopilot") - }) - - t.Run("includes format documentation", func(t *testing.T) { - t.Parallel() - - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Arguments: map[string]string{ - "record_path": "record.json", - "target_format": "a2a", - }, - }, - } - - result, err := ExportRecord(ctx, req) - - require.NoError(t, err) - require.NotNil(t, result) - - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok, "Content should be TextContent") - - promptText := textContent.Text - - assert.Contains(t, promptText, "Supported Export Formats") - assert.Contains(t, promptText, "a2a") - assert.Contains(t, promptText, "ghcopilot") - }) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package prompts + +import ( + "context" + "testing" + + "github.com/modelcontextprotocol/go-sdk/mcp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestExportRecord(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + t.Run("generates prompt with all parameters", func(t *testing.T) { + t.Parallel() + + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Arguments: map[string]string{ + "record_path": "record.json", + "target_format": "a2a", + "output_path": "output.json", + }, + }, + } + + result, err := ExportRecord(ctx, req) + + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result.Messages, 1) + + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok, "Content should be TextContent") + + promptText := textContent.Text + + assert.Contains(t, promptText, "record.json") + assert.Contains(t, promptText, "a2a") + assert.Contains(t, promptText, "output.json") + assert.Contains(t, promptText, "agntcy_oasf_validate_record") + assert.Contains(t, promptText, "agntcy_oasf_get_schema") + assert.Contains(t, promptText, "agntcy_oasf_export_record") + assert.Contains(t, promptText, "pull_record prompt") + assert.Contains(t, promptText, "agntcy_dir_pull_record") + }) + + t.Run("uses default values when parameters are missing", func(t *testing.T) { + t.Parallel() + + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Arguments: map[string]string{}, + }, + } + + result, err := ExportRecord(ctx, req) + + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result.Messages, 1) + + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok, "Content should be TextContent") + + promptText := textContent.Text + + assert.Contains(t, promptText, "") + assert.Contains(t, promptText, "") + assert.Contains(t, promptText, "stdout") + }) + + t.Run("handles stdout output", func(t *testing.T) { + t.Parallel() + + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Arguments: map[string]string{ + "record_path": "record.json", + "target_format": "ghcopilot", + }, + }, + } + + result, err := ExportRecord(ctx, req) + + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result.Messages, 1) + + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok, "Content should be TextContent") + + promptText := textContent.Text + + assert.Contains(t, promptText, "Display the exported data (stdout)") + assert.Contains(t, promptText, "ghcopilot") + }) + + t.Run("includes format documentation", func(t *testing.T) { + t.Parallel() + + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Arguments: map[string]string{ + "record_path": "record.json", + "target_format": "a2a", + }, + }, + } + + result, err := ExportRecord(ctx, req) + + require.NoError(t, err) + require.NotNil(t, result) + + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok, "Content should be TextContent") + + promptText := textContent.Text + + assert.Contains(t, promptText, "Supported Export Formats") + assert.Contains(t, promptText, "a2a") + assert.Contains(t, promptText, "ghcopilot") + }) +} diff --git a/mcp/prompts/import_record.go b/mcp/prompts/import_record.go index 9159b1bfe..dbee33e5d 100644 --- a/mcp/prompts/import_record.go +++ b/mcp/prompts/import_record.go @@ -1,119 +1,119 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package prompts - -import ( - "context" - "fmt" - "strings" - - "github.com/modelcontextprotocol/go-sdk/mcp" -) - -// ImportRecordInput defines the input parameters for the import_record prompt. -type ImportRecordInput struct { - SourceDataPath string `json:"source_data_path" jsonschema:"Path to the source data file to import (required)"` - SourceFormat string `json:"source_format" jsonschema:"Source format to import from (e.g., 'mcp', 'a2a') (required)"` - OutputPath string `json:"output_path" jsonschema:"Where to save the imported OASF record: file path (e.g., record.json) or empty for stdout"` - SchemaVersion string `json:"schema_version" jsonschema:"OASF schema version to use for validation (e.g., 0.7.0, 0.8.0). Defaults to 0.8.0"` -} - -// ImportRecord implements the import_record prompt. -// It guides users through the complete workflow of importing, enriching, and validating a record. -func ImportRecord(_ context.Context, req *mcp.GetPromptRequest) ( - *mcp.GetPromptResult, - error, -) { - // Parse arguments from the request - args := req.Params.Arguments - - sourceDataPath := args["source_data_path"] - if sourceDataPath == "" { - sourceDataPath = "" - } - - sourceFormat := args["source_format"] - if sourceFormat == "" { - sourceFormat = "" - } - - outputPath := args["output_path"] - - outputAction := "Display the imported record (stdout)" - if outputPath != "" { - outputAction = "Save the imported record to: " + outputPath - } - - schemaVersion := args["schema_version"] - if schemaVersion == "" { - schemaVersion = "0.8.0" - } - - promptText := fmt.Sprintf(strings.TrimSpace(` -I'll import data from %s format to an OASF agent record with complete enrichment and validation. - -Source file: %s -Source format: %s -Schema version: %s - -Here's the complete workflow: - -1. **Check Compatibility**: - - Use agntcy_oasf_list_versions to verify the target schema version (%s) is supported - - Verify the source format (%s) is supported for import (currently: mcp, a2a) - -2. **Read Source Data**: Load the source data from the file - -3. **Import to OASF**: Use agntcy_oasf_import_record tool to convert the source data to OASF format - - This performs the initial translation using the OASF SDK translator for %s format - -4. **Get Schema Details**: Use agntcy_oasf_get_schema to retrieve the complete OASF %s schema - - This provides context for enrichment - -5. **Analyze Content**: Examine the imported record and source data to understand: - - What the agent does (capabilities, functions, purpose) - - What domains it operates in (e.g., artificial_intelligence, software_development) - - What skills it has (e.g., natural_language_processing, code_generation) - - **Note**: Ignore/drop any skills and domains from the translation - they will be replaced - -6. **Enrich Domains**: - - Remove any existing domains field from the translated record - - Use agntcy_oasf_get_schema_domains (without parent_domain) to get top-level domains - - Use agntcy_oasf_get_schema_domains (with parent_domain) to explore sub-domains if needed - - Select the most relevant domains based on the agent's purpose - - Add the chosen domains to the record with proper domain names and IDs - -7. **Enrich Skills**: - - Remove any existing skills field from the translated record - - Use agntcy_oasf_get_schema_skills (without parent_skill) to get top-level skill categories - - Use agntcy_oasf_get_schema_skills (with parent_skill) to explore sub-skills if needed - - Select the most relevant skills based on the agent's capabilities - - Add the chosen skills to the record with proper skill names and IDs - -8. **Validate Record**: Use agntcy_oasf_validate_record to ensure the enriched record is valid OASF - - Fix any validation errors if they occur - -9. **Output**: %s - -**Supported Source Formats**: -- **mcp**: Model Context Protocol format -- **a2a**: Agent-to-Agent (A2A) format - -**Note**: The domains and skills from the initial import may be incomplete or generic. -The enrichment steps (6-7) are crucial for creating an accurate and discoverable OASF record. - -Let me start by checking compatibility and reading the source data. - `), sourceFormat, sourceDataPath, sourceFormat, schemaVersion, schemaVersion, sourceFormat, sourceFormat, schemaVersion, outputAction) - - return &mcp.GetPromptResult{ - Messages: []*mcp.PromptMessage{ - { - Role: "user", - Content: &mcp.TextContent{ - Text: promptText, - }, - }, - }, - }, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package prompts + +import ( + "context" + "fmt" + "strings" + + "github.com/modelcontextprotocol/go-sdk/mcp" +) + +// ImportRecordInput defines the input parameters for the import_record prompt. +type ImportRecordInput struct { + SourceDataPath string `json:"source_data_path" jsonschema:"Path to the source data file to import (required)"` + SourceFormat string `json:"source_format" jsonschema:"Source format to import from (e.g., 'mcp', 'a2a') (required)"` + OutputPath string `json:"output_path" jsonschema:"Where to save the imported OASF record: file path (e.g., record.json) or empty for stdout"` + SchemaVersion string `json:"schema_version" jsonschema:"OASF schema version to use for validation (e.g., 0.7.0, 0.8.0). Defaults to 0.8.0"` +} + +// ImportRecord implements the import_record prompt. +// It guides users through the complete workflow of importing, enriching, and validating a record. +func ImportRecord(_ context.Context, req *mcp.GetPromptRequest) ( + *mcp.GetPromptResult, + error, +) { + // Parse arguments from the request + args := req.Params.Arguments + + sourceDataPath := args["source_data_path"] + if sourceDataPath == "" { + sourceDataPath = "" + } + + sourceFormat := args["source_format"] + if sourceFormat == "" { + sourceFormat = "" + } + + outputPath := args["output_path"] + + outputAction := "Display the imported record (stdout)" + if outputPath != "" { + outputAction = "Save the imported record to: " + outputPath + } + + schemaVersion := args["schema_version"] + if schemaVersion == "" { + schemaVersion = "0.8.0" + } + + promptText := fmt.Sprintf(strings.TrimSpace(` +I'll import data from %s format to an OASF agent record with complete enrichment and validation. + +Source file: %s +Source format: %s +Schema version: %s + +Here's the complete workflow: + +1. **Check Compatibility**: + - Use agntcy_oasf_list_versions to verify the target schema version (%s) is supported + - Verify the source format (%s) is supported for import (currently: mcp, a2a) + +2. **Read Source Data**: Load the source data from the file + +3. **Import to OASF**: Use agntcy_oasf_import_record tool to convert the source data to OASF format + - This performs the initial translation using the OASF SDK translator for %s format + +4. **Get Schema Details**: Use agntcy_oasf_get_schema to retrieve the complete OASF %s schema + - This provides context for enrichment + +5. **Analyze Content**: Examine the imported record and source data to understand: + - What the agent does (capabilities, functions, purpose) + - What domains it operates in (e.g., artificial_intelligence, software_development) + - What skills it has (e.g., natural_language_processing, code_generation) + - **Note**: Ignore/drop any skills and domains from the translation - they will be replaced + +6. **Enrich Domains**: + - Remove any existing domains field from the translated record + - Use agntcy_oasf_get_schema_domains (without parent_domain) to get top-level domains + - Use agntcy_oasf_get_schema_domains (with parent_domain) to explore sub-domains if needed + - Select the most relevant domains based on the agent's purpose + - Add the chosen domains to the record with proper domain names and IDs + +7. **Enrich Skills**: + - Remove any existing skills field from the translated record + - Use agntcy_oasf_get_schema_skills (without parent_skill) to get top-level skill categories + - Use agntcy_oasf_get_schema_skills (with parent_skill) to explore sub-skills if needed + - Select the most relevant skills based on the agent's capabilities + - Add the chosen skills to the record with proper skill names and IDs + +8. **Validate Record**: Use agntcy_oasf_validate_record to ensure the enriched record is valid OASF + - Fix any validation errors if they occur + +9. **Output**: %s + +**Supported Source Formats**: +- **mcp**: Model Context Protocol format +- **a2a**: Agent-to-Agent (A2A) format + +**Note**: The domains and skills from the initial import may be incomplete or generic. +The enrichment steps (6-7) are crucial for creating an accurate and discoverable OASF record. + +Let me start by checking compatibility and reading the source data. + `), sourceFormat, sourceDataPath, sourceFormat, schemaVersion, schemaVersion, sourceFormat, sourceFormat, schemaVersion, outputAction) + + return &mcp.GetPromptResult{ + Messages: []*mcp.PromptMessage{ + { + Role: "user", + Content: &mcp.TextContent{ + Text: promptText, + }, + }, + }, + }, nil +} diff --git a/mcp/prompts/import_record_test.go b/mcp/prompts/import_record_test.go index 0ebeca00e..77db78a00 100644 --- a/mcp/prompts/import_record_test.go +++ b/mcp/prompts/import_record_test.go @@ -1,109 +1,109 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package prompts - -import ( - "context" - "testing" - - "github.com/modelcontextprotocol/go-sdk/mcp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestImportRecord(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - t.Run("generates prompt with all parameters", func(t *testing.T) { - t.Parallel() - - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Arguments: map[string]string{ - "source_data_path": "server.json", - "source_format": "mcp", - "output_path": "record.json", - "schema_version": "0.7.0", - }, - }, - } - - result, err := ImportRecord(ctx, req) - - require.NoError(t, err) - require.NotNil(t, result) - require.Len(t, result.Messages, 1) - - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok, "Content should be TextContent") - - promptText := textContent.Text - - assert.Contains(t, promptText, "server.json") - assert.Contains(t, promptText, "mcp") - assert.Contains(t, promptText, "record.json") - assert.Contains(t, promptText, "0.7.0") - assert.Contains(t, promptText, "agntcy_oasf_list_versions") - assert.Contains(t, promptText, "agntcy_oasf_import_record") - assert.Contains(t, promptText, "agntcy_oasf_get_schema_domains") - assert.Contains(t, promptText, "agntcy_oasf_get_schema_skills") - assert.Contains(t, promptText, "agntcy_oasf_validate_record") - assert.Contains(t, promptText, "Supported Source Formats") - assert.Contains(t, promptText, "source format (mcp) is supported") - }) - - t.Run("uses default values when parameters are missing", func(t *testing.T) { - t.Parallel() - - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Arguments: map[string]string{}, - }, - } - - result, err := ImportRecord(ctx, req) - - require.NoError(t, err) - require.NotNil(t, result) - require.Len(t, result.Messages, 1) - - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok, "Content should be TextContent") - - promptText := textContent.Text - - assert.Contains(t, promptText, "") - assert.Contains(t, promptText, "") - assert.Contains(t, promptText, "0.8.0") - assert.Contains(t, promptText, "stdout") - }) - - t.Run("handles stdout output", func(t *testing.T) { - t.Parallel() - - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Arguments: map[string]string{ - "source_data_path": "data.json", - "source_format": "a2a", - }, - }, - } - - result, err := ImportRecord(ctx, req) - - require.NoError(t, err) - require.NotNil(t, result) - require.Len(t, result.Messages, 1) - - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok, "Content should be TextContent") - - promptText := textContent.Text - - assert.Contains(t, promptText, "Display the imported record (stdout)") - }) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package prompts + +import ( + "context" + "testing" + + "github.com/modelcontextprotocol/go-sdk/mcp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestImportRecord(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + t.Run("generates prompt with all parameters", func(t *testing.T) { + t.Parallel() + + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Arguments: map[string]string{ + "source_data_path": "server.json", + "source_format": "mcp", + "output_path": "record.json", + "schema_version": "0.7.0", + }, + }, + } + + result, err := ImportRecord(ctx, req) + + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result.Messages, 1) + + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok, "Content should be TextContent") + + promptText := textContent.Text + + assert.Contains(t, promptText, "server.json") + assert.Contains(t, promptText, "mcp") + assert.Contains(t, promptText, "record.json") + assert.Contains(t, promptText, "0.7.0") + assert.Contains(t, promptText, "agntcy_oasf_list_versions") + assert.Contains(t, promptText, "agntcy_oasf_import_record") + assert.Contains(t, promptText, "agntcy_oasf_get_schema_domains") + assert.Contains(t, promptText, "agntcy_oasf_get_schema_skills") + assert.Contains(t, promptText, "agntcy_oasf_validate_record") + assert.Contains(t, promptText, "Supported Source Formats") + assert.Contains(t, promptText, "source format (mcp) is supported") + }) + + t.Run("uses default values when parameters are missing", func(t *testing.T) { + t.Parallel() + + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Arguments: map[string]string{}, + }, + } + + result, err := ImportRecord(ctx, req) + + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result.Messages, 1) + + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok, "Content should be TextContent") + + promptText := textContent.Text + + assert.Contains(t, promptText, "") + assert.Contains(t, promptText, "") + assert.Contains(t, promptText, "0.8.0") + assert.Contains(t, promptText, "stdout") + }) + + t.Run("handles stdout output", func(t *testing.T) { + t.Parallel() + + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Arguments: map[string]string{ + "source_data_path": "data.json", + "source_format": "a2a", + }, + }, + } + + result, err := ImportRecord(ctx, req) + + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result.Messages, 1) + + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok, "Content should be TextContent") + + promptText := textContent.Text + + assert.Contains(t, promptText, "Display the imported record (stdout)") + }) +} diff --git a/mcp/prompts/pull_record.go b/mcp/prompts/pull_record.go index 27511073f..04f40d79e 100644 --- a/mcp/prompts/pull_record.go +++ b/mcp/prompts/pull_record.go @@ -1,76 +1,76 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package prompts - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/modelcontextprotocol/go-sdk/mcp" -) - -// PullRecordInput defines the input for the pull_record prompt. -type PullRecordInput struct { - CID string `json:"cid" jsonschema:"Content Identifier (CID) of the record to pull (required)"` - OutputPath string `json:"output_path,omitempty" jsonschema:"Where to save the pulled record: file path (e.g., record.json) or empty/stdout to display only (default: stdout)"` -} - -// PullRecord generates a prompt for pulling a record from Directory. -func PullRecord(_ context.Context, req *mcp.GetPromptRequest) (*mcp.GetPromptResult, error) { - // Parse arguments from the request - args := req.Params.Arguments - - cid := args["cid"] - if cid == "" { - cid = "[User will provide CID]" - } - - outputPath := args["output_path"] - if outputPath == "" { - outputPath = stdoutOutput - } - - // Determine output action - outputAction := "Display the record (do not save to file)" - if outputPath != stdoutOutput && outputPath != "-" && outputPath != "" { - outputAction = "Save the record to: " + outputPath - } - - // Build prompt text - promptText := fmt.Sprintf(`Pull an OASF agent record from the local Directory node by its CID. - -CID: %s -Output: %s - -WORKFLOW: - -1. Validate: Ensure the CID format is valid -2. Pull: Call 'agntcy_dir_pull_record' tool with cid: "%s" -3. Display: Show the record data -4. Parse: If the record is valid JSON, parse and display it formatted -5. Save: %s - -NOTES: -- The pulled record is content-addressable and can be validated against its hash -- Use 'agntcy_oasf_validate_record' tool to validate the record against OASF schema`, cid, outputPath, cid, outputAction) - - return &mcp.GetPromptResult{ - Messages: []*mcp.PromptMessage{ - { - Role: "user", - Content: &mcp.TextContent{ - Text: promptText, - }, - }, - }, - }, nil -} - -// MarshalPullRecordInput marshals input to JSON for testing/debugging. -func MarshalPullRecordInput(input PullRecordInput) (string, error) { - b, err := json.Marshal(input) - - return string(b), err -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package prompts + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/modelcontextprotocol/go-sdk/mcp" +) + +// PullRecordInput defines the input for the pull_record prompt. +type PullRecordInput struct { + CID string `json:"cid" jsonschema:"Content Identifier (CID) of the record to pull (required)"` + OutputPath string `json:"output_path,omitempty" jsonschema:"Where to save the pulled record: file path (e.g., record.json) or empty/stdout to display only (default: stdout)"` +} + +// PullRecord generates a prompt for pulling a record from Directory. +func PullRecord(_ context.Context, req *mcp.GetPromptRequest) (*mcp.GetPromptResult, error) { + // Parse arguments from the request + args := req.Params.Arguments + + cid := args["cid"] + if cid == "" { + cid = "[User will provide CID]" + } + + outputPath := args["output_path"] + if outputPath == "" { + outputPath = stdoutOutput + } + + // Determine output action + outputAction := "Display the record (do not save to file)" + if outputPath != stdoutOutput && outputPath != "-" && outputPath != "" { + outputAction = "Save the record to: " + outputPath + } + + // Build prompt text + promptText := fmt.Sprintf(`Pull an OASF agent record from the local Directory node by its CID. + +CID: %s +Output: %s + +WORKFLOW: + +1. Validate: Ensure the CID format is valid +2. Pull: Call 'agntcy_dir_pull_record' tool with cid: "%s" +3. Display: Show the record data +4. Parse: If the record is valid JSON, parse and display it formatted +5. Save: %s + +NOTES: +- The pulled record is content-addressable and can be validated against its hash +- Use 'agntcy_oasf_validate_record' tool to validate the record against OASF schema`, cid, outputPath, cid, outputAction) + + return &mcp.GetPromptResult{ + Messages: []*mcp.PromptMessage{ + { + Role: "user", + Content: &mcp.TextContent{ + Text: promptText, + }, + }, + }, + }, nil +} + +// MarshalPullRecordInput marshals input to JSON for testing/debugging. +func MarshalPullRecordInput(input PullRecordInput) (string, error) { + b, err := json.Marshal(input) + + return string(b), err +} diff --git a/mcp/prompts/pull_record_test.go b/mcp/prompts/pull_record_test.go index a6012a4e7..49f849721 100644 --- a/mcp/prompts/pull_record_test.go +++ b/mcp/prompts/pull_record_test.go @@ -1,120 +1,120 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package prompts - -import ( - "context" - "fmt" - "testing" - - "github.com/modelcontextprotocol/go-sdk/mcp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func convertToStringMap(m map[string]interface{}) map[string]string { - result := make(map[string]string) - for k, v := range m { - result[k] = fmt.Sprintf("%v", v) - } - - return result -} - -func TestPullRecord(t *testing.T) { - tests := []struct { - name string - arguments map[string]interface{} - expectError bool - expectedInText []string - }{ - { - name: "basic pull", - arguments: map[string]interface{}{ - "cid": "bafkreiabcd1234567890", - }, - expectError: false, - expectedInText: []string{ - "CID: bafkreiabcd1234567890", - "agntcy_dir_pull_record", - }, - }, - { - name: "missing CID uses placeholder", - arguments: map[string]interface{}{}, - expectError: false, - expectedInText: []string{ - "[User will provide CID]", - "agntcy_dir_pull_record", - }, - }, - { - name: "with output path", - arguments: map[string]interface{}{ - "cid": "bafkreiabcd1234567890", - "output_path": "my-record.json", - }, - expectError: false, - expectedInText: []string{ - "Output: my-record.json", - "Save the record to: my-record.json", - }, - }, - { - name: "with stdout output", - arguments: map[string]interface{}{ - "cid": "bafkreiabcd1234567890", - "output_path": "stdout", - }, - expectError: false, - expectedInText: []string{ - "Output: stdout", - "Display the record (do not save to file)", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Name: "pull_record", - Arguments: convertToStringMap(tt.arguments), - }, - } - - result, err := PullRecord(context.Background(), req) - - if tt.expectError { - require.Error(t, err) - - return - } - - require.NoError(t, err) - require.NotNil(t, result) - require.Len(t, result.Messages, 1) - - // Extract text content - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok, "Expected TextContent type") - - for _, expected := range tt.expectedInText { - assert.Contains(t, textContent.Text, expected) - } - }) - } -} - -func TestMarshalPullRecordInput(t *testing.T) { - input := PullRecordInput{ - CID: "bafkreiabcd1234567890", - OutputPath: "record.json", - } - - jsonStr, err := MarshalPullRecordInput(input) - require.NoError(t, err) - assert.Contains(t, jsonStr, "bafkreiabcd1234567890") - assert.Contains(t, jsonStr, "output_path") -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package prompts + +import ( + "context" + "fmt" + "testing" + + "github.com/modelcontextprotocol/go-sdk/mcp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func convertToStringMap(m map[string]interface{}) map[string]string { + result := make(map[string]string) + for k, v := range m { + result[k] = fmt.Sprintf("%v", v) + } + + return result +} + +func TestPullRecord(t *testing.T) { + tests := []struct { + name string + arguments map[string]interface{} + expectError bool + expectedInText []string + }{ + { + name: "basic pull", + arguments: map[string]interface{}{ + "cid": "bafkreiabcd1234567890", + }, + expectError: false, + expectedInText: []string{ + "CID: bafkreiabcd1234567890", + "agntcy_dir_pull_record", + }, + }, + { + name: "missing CID uses placeholder", + arguments: map[string]interface{}{}, + expectError: false, + expectedInText: []string{ + "[User will provide CID]", + "agntcy_dir_pull_record", + }, + }, + { + name: "with output path", + arguments: map[string]interface{}{ + "cid": "bafkreiabcd1234567890", + "output_path": "my-record.json", + }, + expectError: false, + expectedInText: []string{ + "Output: my-record.json", + "Save the record to: my-record.json", + }, + }, + { + name: "with stdout output", + arguments: map[string]interface{}{ + "cid": "bafkreiabcd1234567890", + "output_path": "stdout", + }, + expectError: false, + expectedInText: []string{ + "Output: stdout", + "Display the record (do not save to file)", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Name: "pull_record", + Arguments: convertToStringMap(tt.arguments), + }, + } + + result, err := PullRecord(context.Background(), req) + + if tt.expectError { + require.Error(t, err) + + return + } + + require.NoError(t, err) + require.NotNil(t, result) + require.Len(t, result.Messages, 1) + + // Extract text content + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok, "Expected TextContent type") + + for _, expected := range tt.expectedInText { + assert.Contains(t, textContent.Text, expected) + } + }) + } +} + +func TestMarshalPullRecordInput(t *testing.T) { + input := PullRecordInput{ + CID: "bafkreiabcd1234567890", + OutputPath: "record.json", + } + + jsonStr, err := MarshalPullRecordInput(input) + require.NoError(t, err) + assert.Contains(t, jsonStr, "bafkreiabcd1234567890") + assert.Contains(t, jsonStr, "output_path") +} diff --git a/mcp/prompts/push_record.go b/mcp/prompts/push_record.go index c3bf72e56..ba3002fe4 100644 --- a/mcp/prompts/push_record.go +++ b/mcp/prompts/push_record.go @@ -1,61 +1,61 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package prompts - -import ( - "context" - "fmt" - "strings" - - "github.com/modelcontextprotocol/go-sdk/mcp" -) - -// PushRecordInput defines the input parameters for the push_record prompt. -type PushRecordInput struct { - RecordPath string `json:"record_path" jsonschema:"Path to the OASF record JSON file to validate and push"` -} - -// PushRecord implements the push_record prompt. -// It guides users through the complete workflow of validating and pushing a record. -func PushRecord(_ context.Context, req *mcp.GetPromptRequest) ( - *mcp.GetPromptResult, - error, -) { - // Parse arguments from the request - args := req.Params.Arguments - - recordPath := args["record_path"] - if recordPath == "" { - recordPath = "" - } - - promptText := fmt.Sprintf(strings.TrimSpace(` -I'll validate and push the OASF agent record to the Directory server. - -Record file: %s - -Here's the complete workflow: - -1. **Read Record**: Load the record from the file -2. **Validate Schema**: Use the agntcy_oasf_validate_record tool to verify the record is valid OASF (0.3.1 or 0.7.0) -3. **Check Server**: Confirm Directory server is configured (DIRECTORY_CLIENT_SERVER_ADDRESS environment variable) -4. **Push Record**: Use the agntcy_dir_push_record tool to upload the validated record to the Directory server -5. **Return CID**: Display the Content Identifier (CID) and server address for the stored record - -**Note**: The DIRECTORY_CLIENT_SERVER_ADDRESS environment variable must be set. - -Let me start by reading and validating the record using the agntcy_oasf_validate_record tool. - `), recordPath) - - return &mcp.GetPromptResult{ - Messages: []*mcp.PromptMessage{ - { - Role: "user", - Content: &mcp.TextContent{ - Text: promptText, - }, - }, - }, - }, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package prompts + +import ( + "context" + "fmt" + "strings" + + "github.com/modelcontextprotocol/go-sdk/mcp" +) + +// PushRecordInput defines the input parameters for the push_record prompt. +type PushRecordInput struct { + RecordPath string `json:"record_path" jsonschema:"Path to the OASF record JSON file to validate and push"` +} + +// PushRecord implements the push_record prompt. +// It guides users through the complete workflow of validating and pushing a record. +func PushRecord(_ context.Context, req *mcp.GetPromptRequest) ( + *mcp.GetPromptResult, + error, +) { + // Parse arguments from the request + args := req.Params.Arguments + + recordPath := args["record_path"] + if recordPath == "" { + recordPath = "" + } + + promptText := fmt.Sprintf(strings.TrimSpace(` +I'll validate and push the OASF agent record to the Directory server. + +Record file: %s + +Here's the complete workflow: + +1. **Read Record**: Load the record from the file +2. **Validate Schema**: Use the agntcy_oasf_validate_record tool to verify the record is valid OASF (0.3.1 or 0.7.0) +3. **Check Server**: Confirm Directory server is configured (DIRECTORY_CLIENT_SERVER_ADDRESS environment variable) +4. **Push Record**: Use the agntcy_dir_push_record tool to upload the validated record to the Directory server +5. **Return CID**: Display the Content Identifier (CID) and server address for the stored record + +**Note**: The DIRECTORY_CLIENT_SERVER_ADDRESS environment variable must be set. + +Let me start by reading and validating the record using the agntcy_oasf_validate_record tool. + `), recordPath) + + return &mcp.GetPromptResult{ + Messages: []*mcp.PromptMessage{ + { + Role: "user", + Content: &mcp.TextContent{ + Text: promptText, + }, + }, + }, + }, nil +} diff --git a/mcp/prompts/push_record_test.go b/mcp/prompts/push_record_test.go index db546d19f..4a975f12c 100644 --- a/mcp/prompts/push_record_test.go +++ b/mcp/prompts/push_record_test.go @@ -1,61 +1,61 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package prompts - -import ( - "context" - "testing" - - "github.com/modelcontextprotocol/go-sdk/mcp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestPushRecord(t *testing.T) { - t.Run("should return prompt with record path", func(t *testing.T) { - ctx := context.Background() - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Arguments: map[string]string{ - "record_path": "agent.json", - }, - }, - } - - result, err := PushRecord(ctx, req) - require.NoError(t, err) - assert.NotNil(t, result) - assert.NotEmpty(t, result.Messages) - assert.Len(t, result.Messages, 1) - assert.Equal(t, mcp.Role("user"), result.Messages[0].Role) - - // Check that prompt contains important elements - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok, "Content should be TextContent") - - content := textContent.Text - assert.Contains(t, content, "agent.json") - assert.Contains(t, content, "agntcy_oasf_validate_record") - assert.Contains(t, content, "agntcy_dir_push_record") - }) - - t.Run("should handle missing record_path", func(t *testing.T) { - ctx := context.Background() - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Arguments: map[string]string{}, - }, - } - - result, err := PushRecord(ctx, req) - require.NoError(t, err) - assert.NotNil(t, result) - - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok, "Content should be TextContent") - - content := textContent.Text - assert.Contains(t, content, "push") - }) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package prompts + +import ( + "context" + "testing" + + "github.com/modelcontextprotocol/go-sdk/mcp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPushRecord(t *testing.T) { + t.Run("should return prompt with record path", func(t *testing.T) { + ctx := context.Background() + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Arguments: map[string]string{ + "record_path": "agent.json", + }, + }, + } + + result, err := PushRecord(ctx, req) + require.NoError(t, err) + assert.NotNil(t, result) + assert.NotEmpty(t, result.Messages) + assert.Len(t, result.Messages, 1) + assert.Equal(t, mcp.Role("user"), result.Messages[0].Role) + + // Check that prompt contains important elements + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok, "Content should be TextContent") + + content := textContent.Text + assert.Contains(t, content, "agent.json") + assert.Contains(t, content, "agntcy_oasf_validate_record") + assert.Contains(t, content, "agntcy_dir_push_record") + }) + + t.Run("should handle missing record_path", func(t *testing.T) { + ctx := context.Background() + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Arguments: map[string]string{}, + }, + } + + result, err := PushRecord(ctx, req) + require.NoError(t, err) + assert.NotNil(t, result) + + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok, "Content should be TextContent") + + content := textContent.Text + assert.Contains(t, content, "push") + }) +} diff --git a/mcp/prompts/search_records.go b/mcp/prompts/search_records.go index 483a29983..f7cda33f6 100644 --- a/mcp/prompts/search_records.go +++ b/mcp/prompts/search_records.go @@ -1,82 +1,82 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package prompts - -import ( - "context" - "encoding/json" - - "github.com/modelcontextprotocol/go-sdk/mcp" -) - -// SearchRecordsInput defines the input for the search_records prompt. -type SearchRecordsInput struct { - Query string `json:"query" jsonschema:"Free-text search query (e.g. 'find agents that can process images' or 'agents for text translation')"` -} - -// SearchRecords provides a guided workflow for free-text search queries. -func SearchRecords(_ context.Context, req *mcp.GetPromptRequest) (*mcp.GetPromptResult, error) { - // Parse query from arguments - args := req.Params.Arguments - query := args["query"] - - if query == "" { - query = "[User will provide their search query]" - } - - // Build the prompt text - promptText := `Search for AI agent records in the Directory using OASF (Open Agent Schema Format). - -USER QUERY: "` + query + `" - -WORKFLOW: - -1. Get schema: Call 'agntcy_oasf_get_schema' to see available skills/domains -2. Translate query to search parameters (names, versions, skill_ids, skill_names, locators, module_names, domain_ids, domain_names, created_ats, authors, schema_versions, module_ids) -3. Execute: Call 'agntcy_dir_search_local' with parameters -4. Display: Extract ALL CIDs from the 'record_cids' array in the response and list them clearly with the count - -PARAMETERS: -- names: Agent patterns (e.g., "*gpt*") -- versions: Version patterns (e.g., "v1.*") -- skill_ids: Exact IDs (e.g., "10201") -- skill_names: Skill patterns (e.g., "*python*") -- locators: Locator patterns (e.g., "docker-image:*") -- module_names: Module name patterns (e.g., "integration/mcp") -- domain_ids: Exact domain IDs (e.g., "604") -- domain_names: Domain patterns (e.g., "*education*", "healthcare/*") -- created_ats: Created_at timestamp patterns (e.g., "2024-*", ">=2025-01-01") -- authors: Author name patterns (e.g., "john*") -- schema_versions: Schema version patterns (e.g., "0.8.*") -- module_ids: Exact module IDs (e.g., "201") - -WILDCARDS: * (zero+), ? (one), [abc] (char class) - -EXAMPLES: -"find Python agents" → { "skill_names": ["*python*"] } -"image processing v2" → { "skill_names": ["*image*"], "versions": ["v2.*"] } -"docker translation" → { "skill_names": ["*translation*"], "locators": ["docker-image:*"] } -"education agents with Python" → { "domain_names": ["*education*"] } -"agents by author john" → { "authors": ["john*"] } -"MCP servers" → { "module_ids": ["202"] }` - - return &mcp.GetPromptResult{ - Description: "Guided workflow for searching agent records using free-text queries", - Messages: []*mcp.PromptMessage{ - { - Role: "user", - Content: &mcp.TextContent{ - Text: promptText, - }, - }, - }, - }, nil -} - -// MarshalSearchRecordsInput marshals input to JSON for testing/debugging. -func MarshalSearchRecordsInput(input SearchRecordsInput) (string, error) { - b, err := json.Marshal(input) - - return string(b), err -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package prompts + +import ( + "context" + "encoding/json" + + "github.com/modelcontextprotocol/go-sdk/mcp" +) + +// SearchRecordsInput defines the input for the search_records prompt. +type SearchRecordsInput struct { + Query string `json:"query" jsonschema:"Free-text search query (e.g. 'find agents that can process images' or 'agents for text translation')"` +} + +// SearchRecords provides a guided workflow for free-text search queries. +func SearchRecords(_ context.Context, req *mcp.GetPromptRequest) (*mcp.GetPromptResult, error) { + // Parse query from arguments + args := req.Params.Arguments + query := args["query"] + + if query == "" { + query = "[User will provide their search query]" + } + + // Build the prompt text + promptText := `Search for AI agent records in the Directory using OASF (Open Agent Schema Format). + +USER QUERY: "` + query + `" + +WORKFLOW: + +1. Get schema: Call 'agntcy_oasf_get_schema' to see available skills/domains +2. Translate query to search parameters (names, versions, skill_ids, skill_names, locators, module_names, domain_ids, domain_names, created_ats, authors, schema_versions, module_ids) +3. Execute: Call 'agntcy_dir_search_local' with parameters +4. Display: Extract ALL CIDs from the 'record_cids' array in the response and list them clearly with the count + +PARAMETERS: +- names: Agent patterns (e.g., "*gpt*") +- versions: Version patterns (e.g., "v1.*") +- skill_ids: Exact IDs (e.g., "10201") +- skill_names: Skill patterns (e.g., "*python*") +- locators: Locator patterns (e.g., "docker-image:*") +- module_names: Module name patterns (e.g., "integration/mcp") +- domain_ids: Exact domain IDs (e.g., "604") +- domain_names: Domain patterns (e.g., "*education*", "healthcare/*") +- created_ats: Created_at timestamp patterns (e.g., "2024-*", ">=2025-01-01") +- authors: Author name patterns (e.g., "john*") +- schema_versions: Schema version patterns (e.g., "0.8.*") +- module_ids: Exact module IDs (e.g., "201") + +WILDCARDS: * (zero+), ? (one), [abc] (char class) + +EXAMPLES: +"find Python agents" → { "skill_names": ["*python*"] } +"image processing v2" → { "skill_names": ["*image*"], "versions": ["v2.*"] } +"docker translation" → { "skill_names": ["*translation*"], "locators": ["docker-image:*"] } +"education agents with Python" → { "domain_names": ["*education*"] } +"agents by author john" → { "authors": ["john*"] } +"MCP servers" → { "module_ids": ["202"] }` + + return &mcp.GetPromptResult{ + Description: "Guided workflow for searching agent records using free-text queries", + Messages: []*mcp.PromptMessage{ + { + Role: "user", + Content: &mcp.TextContent{ + Text: promptText, + }, + }, + }, + }, nil +} + +// MarshalSearchRecordsInput marshals input to JSON for testing/debugging. +func MarshalSearchRecordsInput(input SearchRecordsInput) (string, error) { + b, err := json.Marshal(input) + + return string(b), err +} diff --git a/mcp/prompts/search_records_test.go b/mcp/prompts/search_records_test.go index 353f91127..daf672196 100644 --- a/mcp/prompts/search_records_test.go +++ b/mcp/prompts/search_records_test.go @@ -1,205 +1,205 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package prompts - -import ( - "context" - "testing" - - "github.com/modelcontextprotocol/go-sdk/mcp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestSearchRecords(t *testing.T) { - tests := []struct { - name string - query string - expectInText []string - expectDefault bool - }{ - { - name: "with query provided", - query: "find Python agents", - expectInText: []string{ - "find Python agents", - "agntcy_oasf_get_schema", - "agntcy_dir_search_local", - "WORKFLOW", - }, - expectDefault: false, - }, - { - name: "with complex query", - query: "docker-based translation services version 2", - expectInText: []string{ - "docker-based translation services version 2", - "Translate query to search parameters", - "skill_names", - "locators", - }, - expectDefault: false, - }, - { - name: "with domain search query", - query: "education agents with Python", - expectInText: []string{ - "education agents with Python", - "domain_ids", - "domain_names", - "skill_names", - }, - expectDefault: false, - }, - { - name: "empty query defaults to placeholder", - query: "", - expectInText: []string{ - "[User will provide their search query]", - "WORKFLOW", - }, - expectDefault: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Name: "search_records", - Arguments: map[string]string{ - "query": tt.query, - }, - }, - } - - result, err := SearchRecords(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, result) - - // Check description - assert.Contains(t, result.Description, "free-text") - - // Check messages - require.Len(t, result.Messages, 1) - assert.Equal(t, mcp.Role("user"), result.Messages[0].Role) - - // Get text content - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok, "Expected TextContent type") - - // Check expected strings in prompt - for _, expected := range tt.expectInText { - assert.Contains(t, textContent.Text, expected) - } - }) - } -} - -func TestSearchRecordsWithNoArguments(t *testing.T) { - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Name: "search_records", - Arguments: nil, - }, - } - - result, err := SearchRecords(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, result) - - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok) - - // Should use default placeholder - assert.Contains(t, textContent.Text, "[User will provide their search query]") -} - -func TestSearchRecordsWithNonStringQuery(t *testing.T) { - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Name: "search_records", - Arguments: map[string]string{ - "query": "", // Empty string instead of wrong type since Arguments is map[string]string - }, - }, - } - - result, err := SearchRecords(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, result) - - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok) - - // Should use default placeholder when type assertion fails - assert.Contains(t, textContent.Text, "[User will provide their search query]") -} - -func TestMarshalSearchRecordsInput(t *testing.T) { - input := SearchRecordsInput{ - Query: "find Python agents", - } - - jsonStr, err := MarshalSearchRecordsInput(input) - require.NoError(t, err) - assert.Contains(t, jsonStr, "find Python agents") - assert.Contains(t, jsonStr, "query") -} - -func TestSearchRecordsPromptContainsDomainParameters(t *testing.T) { - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Name: "search_records", - Arguments: map[string]string{ - "query": "test", - }, - }, - } - - result, err := SearchRecords(context.Background(), req) - require.NoError(t, err) - require.NotNil(t, result) - - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok) - - // Verify domain parameters are documented - assert.Contains(t, textContent.Text, "domain_ids") - assert.Contains(t, textContent.Text, "domain_names") - - // Verify domain example exists - assert.Contains(t, textContent.Text, "education agents with Python") -} - -func TestSearchRecordsPromptParameterDocumentation(t *testing.T) { - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Name: "search_records", - Arguments: map[string]string{}, - }, - } - - result, err := SearchRecords(context.Background(), req) - require.NoError(t, err) - - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok) - - // Verify all parameters are documented - expectedParams := []string{ - "names", - "versions", - "skill_ids", - "skill_names", - "locators", - "module_names", - "domain_ids", - "domain_names", - } - - for _, param := range expectedParams { - assert.Contains(t, textContent.Text, param, "Parameter %s should be documented", param) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package prompts + +import ( + "context" + "testing" + + "github.com/modelcontextprotocol/go-sdk/mcp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSearchRecords(t *testing.T) { + tests := []struct { + name string + query string + expectInText []string + expectDefault bool + }{ + { + name: "with query provided", + query: "find Python agents", + expectInText: []string{ + "find Python agents", + "agntcy_oasf_get_schema", + "agntcy_dir_search_local", + "WORKFLOW", + }, + expectDefault: false, + }, + { + name: "with complex query", + query: "docker-based translation services version 2", + expectInText: []string{ + "docker-based translation services version 2", + "Translate query to search parameters", + "skill_names", + "locators", + }, + expectDefault: false, + }, + { + name: "with domain search query", + query: "education agents with Python", + expectInText: []string{ + "education agents with Python", + "domain_ids", + "domain_names", + "skill_names", + }, + expectDefault: false, + }, + { + name: "empty query defaults to placeholder", + query: "", + expectInText: []string{ + "[User will provide their search query]", + "WORKFLOW", + }, + expectDefault: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Name: "search_records", + Arguments: map[string]string{ + "query": tt.query, + }, + }, + } + + result, err := SearchRecords(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, result) + + // Check description + assert.Contains(t, result.Description, "free-text") + + // Check messages + require.Len(t, result.Messages, 1) + assert.Equal(t, mcp.Role("user"), result.Messages[0].Role) + + // Get text content + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok, "Expected TextContent type") + + // Check expected strings in prompt + for _, expected := range tt.expectInText { + assert.Contains(t, textContent.Text, expected) + } + }) + } +} + +func TestSearchRecordsWithNoArguments(t *testing.T) { + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Name: "search_records", + Arguments: nil, + }, + } + + result, err := SearchRecords(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, result) + + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok) + + // Should use default placeholder + assert.Contains(t, textContent.Text, "[User will provide their search query]") +} + +func TestSearchRecordsWithNonStringQuery(t *testing.T) { + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Name: "search_records", + Arguments: map[string]string{ + "query": "", // Empty string instead of wrong type since Arguments is map[string]string + }, + }, + } + + result, err := SearchRecords(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, result) + + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok) + + // Should use default placeholder when type assertion fails + assert.Contains(t, textContent.Text, "[User will provide their search query]") +} + +func TestMarshalSearchRecordsInput(t *testing.T) { + input := SearchRecordsInput{ + Query: "find Python agents", + } + + jsonStr, err := MarshalSearchRecordsInput(input) + require.NoError(t, err) + assert.Contains(t, jsonStr, "find Python agents") + assert.Contains(t, jsonStr, "query") +} + +func TestSearchRecordsPromptContainsDomainParameters(t *testing.T) { + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Name: "search_records", + Arguments: map[string]string{ + "query": "test", + }, + }, + } + + result, err := SearchRecords(context.Background(), req) + require.NoError(t, err) + require.NotNil(t, result) + + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok) + + // Verify domain parameters are documented + assert.Contains(t, textContent.Text, "domain_ids") + assert.Contains(t, textContent.Text, "domain_names") + + // Verify domain example exists + assert.Contains(t, textContent.Text, "education agents with Python") +} + +func TestSearchRecordsPromptParameterDocumentation(t *testing.T) { + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Name: "search_records", + Arguments: map[string]string{}, + }, + } + + result, err := SearchRecords(context.Background(), req) + require.NoError(t, err) + + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok) + + // Verify all parameters are documented + expectedParams := []string{ + "names", + "versions", + "skill_ids", + "skill_names", + "locators", + "module_names", + "domain_ids", + "domain_names", + } + + for _, param := range expectedParams { + assert.Contains(t, textContent.Text, param, "Parameter %s should be documented", param) + } +} diff --git a/mcp/prompts/validate_record.go b/mcp/prompts/validate_record.go index a0cfe1da6..2e213fc5b 100644 --- a/mcp/prompts/validate_record.go +++ b/mcp/prompts/validate_record.go @@ -1,56 +1,56 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package prompts - -import ( - "context" - "fmt" - "strings" - - "github.com/modelcontextprotocol/go-sdk/mcp" -) - -// ValidateRecordInput defines the input parameters for the validate_record prompt. -type ValidateRecordInput struct { - RecordPath string `json:"record_path" jsonschema:"Path to the OASF record JSON file to validate"` -} - -// ValidateRecord implements the validate_record prompt. -// It guides users through validating an OASF agent record. -func ValidateRecord(_ context.Context, req *mcp.GetPromptRequest) ( - *mcp.GetPromptResult, - error, -) { - // Parse arguments from the request - args := req.Params.Arguments - - recordPath := args["record_path"] - if recordPath == "" { - recordPath = "" - } - - promptText := fmt.Sprintf(strings.TrimSpace(` -I'll validate the OASF agent record at: %s - -Here's the workflow I'll follow: - -1. **Read File**: Load the record from the specified path -2. **Parse JSON**: Verify the JSON is well-formed -3. **Validate Schema**: Use the agntcy_oasf_validate_record tool to check the record against the OASF schema -4. **Report Results**: Show you any validation errors or confirm the record is valid - -Let me start by reading and validating the record using the agntcy_oasf_validate_record tool. - `), recordPath) - - return &mcp.GetPromptResult{ - Messages: []*mcp.PromptMessage{ - { - Role: "user", - Content: &mcp.TextContent{ - Text: promptText, - }, - }, - }, - }, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package prompts + +import ( + "context" + "fmt" + "strings" + + "github.com/modelcontextprotocol/go-sdk/mcp" +) + +// ValidateRecordInput defines the input parameters for the validate_record prompt. +type ValidateRecordInput struct { + RecordPath string `json:"record_path" jsonschema:"Path to the OASF record JSON file to validate"` +} + +// ValidateRecord implements the validate_record prompt. +// It guides users through validating an OASF agent record. +func ValidateRecord(_ context.Context, req *mcp.GetPromptRequest) ( + *mcp.GetPromptResult, + error, +) { + // Parse arguments from the request + args := req.Params.Arguments + + recordPath := args["record_path"] + if recordPath == "" { + recordPath = "" + } + + promptText := fmt.Sprintf(strings.TrimSpace(` +I'll validate the OASF agent record at: %s + +Here's the workflow I'll follow: + +1. **Read File**: Load the record from the specified path +2. **Parse JSON**: Verify the JSON is well-formed +3. **Validate Schema**: Use the agntcy_oasf_validate_record tool to check the record against the OASF schema +4. **Report Results**: Show you any validation errors or confirm the record is valid + +Let me start by reading and validating the record using the agntcy_oasf_validate_record tool. + `), recordPath) + + return &mcp.GetPromptResult{ + Messages: []*mcp.PromptMessage{ + { + Role: "user", + Content: &mcp.TextContent{ + Text: promptText, + }, + }, + }, + }, nil +} diff --git a/mcp/prompts/validate_record_test.go b/mcp/prompts/validate_record_test.go index 530122171..fccefb3af 100644 --- a/mcp/prompts/validate_record_test.go +++ b/mcp/prompts/validate_record_test.go @@ -1,60 +1,60 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package prompts - -import ( - "context" - "testing" - - "github.com/modelcontextprotocol/go-sdk/mcp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestValidateRecord(t *testing.T) { - t.Run("should return prompt with record path", func(t *testing.T) { - ctx := context.Background() - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Arguments: map[string]string{ - "record_path": "agent.json", - }, - }, - } - - result, err := ValidateRecord(ctx, req) - require.NoError(t, err) - assert.NotNil(t, result) - assert.NotEmpty(t, result.Messages) - assert.Len(t, result.Messages, 1) - assert.Equal(t, mcp.Role("user"), result.Messages[0].Role) - - // Check that prompt contains important elements - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok, "Content should be TextContent") - - content := textContent.Text - assert.Contains(t, content, "agent.json") - assert.Contains(t, content, "agntcy_oasf_validate_record") - }) - - t.Run("should handle missing record_path", func(t *testing.T) { - ctx := context.Background() - req := &mcp.GetPromptRequest{ - Params: &mcp.GetPromptParams{ - Arguments: map[string]string{}, - }, - } - - result, err := ValidateRecord(ctx, req) - require.NoError(t, err) - assert.NotNil(t, result) - - textContent, ok := result.Messages[0].Content.(*mcp.TextContent) - require.True(t, ok, "Content should be TextContent") - - content := textContent.Text - assert.Contains(t, content, "validate") - }) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package prompts + +import ( + "context" + "testing" + + "github.com/modelcontextprotocol/go-sdk/mcp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestValidateRecord(t *testing.T) { + t.Run("should return prompt with record path", func(t *testing.T) { + ctx := context.Background() + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Arguments: map[string]string{ + "record_path": "agent.json", + }, + }, + } + + result, err := ValidateRecord(ctx, req) + require.NoError(t, err) + assert.NotNil(t, result) + assert.NotEmpty(t, result.Messages) + assert.Len(t, result.Messages, 1) + assert.Equal(t, mcp.Role("user"), result.Messages[0].Role) + + // Check that prompt contains important elements + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok, "Content should be TextContent") + + content := textContent.Text + assert.Contains(t, content, "agent.json") + assert.Contains(t, content, "agntcy_oasf_validate_record") + }) + + t.Run("should handle missing record_path", func(t *testing.T) { + ctx := context.Background() + req := &mcp.GetPromptRequest{ + Params: &mcp.GetPromptParams{ + Arguments: map[string]string{}, + }, + } + + result, err := ValidateRecord(ctx, req) + require.NoError(t, err) + assert.NotNil(t, result) + + textContent, ok := result.Messages[0].Content.(*mcp.TextContent) + require.True(t, ok, "Content should be TextContent") + + content := textContent.Text + assert.Contains(t, content, "validate") + }) +} diff --git a/mcp/server/server.go b/mcp/server/server.go index 2a0b9be53..92de78bac 100644 --- a/mcp/server/server.go +++ b/mcp/server/server.go @@ -1,420 +1,420 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package server - -import ( - "context" - "fmt" - "os" - "strings" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/mcp/prompts" - "github.com/agntcy/dir/mcp/tools" - "github.com/modelcontextprotocol/go-sdk/mcp" -) - -// Serve creates and runs the MCP server with all configured tools and prompts. -// It accepts a context and runs the server over stdin/stdout using the stdio transport. -// -//nolint:maintidx // Function registers all MCP tools and prompts, complexity is acceptable -func Serve(ctx context.Context) error { - // Configure OASF validation - // Note: Logging to stderr is intentional - MCP servers communicate over stdin/stdout, - // so stderr is used for logging/debugging messages that don't interfere with the protocol. - disableAPIValidation := os.Getenv("OASF_API_VALIDATION_DISABLE") == "true" - if disableAPIValidation { - corev1.SetDisableAPIValidation(true) - fmt.Fprintf(os.Stderr, "[MCP Server] OASF API validation disabled, using embedded schemas\n") - } else { - // Read schema URL from environment variable (default to public OASF server) - schemaURL := os.Getenv("OASF_API_VALIDATION_SCHEMA_URL") - if schemaURL == "" { - schemaURL = corev1.DefaultSchemaURL - } - - // Read strict validation setting (default to strict for safety) - strictValidation := os.Getenv("OASF_API_VALIDATION_STRICT_MODE") != "false" - - corev1.SetSchemaURL(schemaURL) - corev1.SetDisableAPIValidation(false) - corev1.SetStrictValidation(strictValidation) - - fmt.Fprintf(os.Stderr, "[MCP Server] OASF API validator configured with schema_url=%s, strict mode=%t\n", - schemaURL, strictValidation) - } - - // Create MCP server for Directory operations - server := mcp.NewServer(&mcp.Implementation{ - Name: "dir-mcp-server", - Version: "v0.1.0", - }, nil) - - // Add tool for listing available OASF schema versions - mcp.AddTool(server, &mcp.Tool{ - Name: "agntcy_oasf_list_versions", - Description: strings.TrimSpace(` -Lists all available OASF schema versions supported by the server. -This tool provides a simple way to discover what schema versions are available -without having to make requests with specific version numbers. - -Use this tool to see what OASF schema versions you can work with. - `), - }, tools.ListVersions) - - // Add tool for getting OASF schema content - mcp.AddTool(server, &mcp.Tool{ - Name: "agntcy_oasf_get_schema", - Description: strings.TrimSpace(` -Retrieves the complete OASF schema JSON content for the specified version. -This tool provides direct access to the full schema definition including: -- All domain definitions and their IDs -- All skill definitions and their IDs -- Complete validation rules and constraints -- Schema structure and required fields - -Use this tool to get the complete schema for reference when creating or validating agent records. - `), - }, tools.GetSchema) - - // Add tool for getting OASF schema skills - mcp.AddTool(server, &mcp.Tool{ - Name: "agntcy_oasf_get_schema_skills", - Description: strings.TrimSpace(` -Retrieves skills from the OASF schema for the specified version. -This tool supports hierarchical skill navigation: -- Without parent_skill: Returns all top-level skill categories (e.g., "analytical_skills", "natural_language_processing") -- With parent_skill: Returns sub-skills under that parent (e.g., parent="retrieval_augmented_generation" returns its children) - -Each skill includes: -- name: The skill identifier used in OASF records -- caption: Human-readable display name -- id: Numeric skill identifier - -Use this tool to discover valid skills when creating or enriching agent records. -Essential for LLM-based enrichment to ensure skills match the schema taxonomy. - `), - }, tools.GetSchemaSkills) - - // Add tool for getting OASF schema domains - mcp.AddTool(server, &mcp.Tool{ - Name: "agntcy_oasf_get_schema_domains", - Description: strings.TrimSpace(` -Retrieves domains from the OASF schema for the specified version. -This tool supports hierarchical domain navigation: -- Without parent_domain: Returns all top-level domain categories (e.g., "artificial_intelligence", "software_development") -- With parent_domain: Returns sub-domains under that parent (e.g., parent="artificial_intelligence" returns its children) - -Each domain includes: -- name: The domain identifier used in OASF records -- caption: Human-readable display name -- id: Numeric domain identifier - -Use this tool to discover valid domains when creating or enriching agent records. -Essential for LLM-based enrichment to ensure domains match the schema taxonomy. - `), - }, tools.GetSchemaDomains) - - // Add tool for validating OASF agent records - mcp.AddTool(server, &mcp.Tool{ - Name: "agntcy_oasf_validate_record", - Description: strings.TrimSpace(` -Validates an AGNTCY OASF agent record against the OASF schema. -This tool performs comprehensive validation including: -- Required fields check -- Field type validation -- Schema-specific constraints -- Domain and skill taxonomy validation - -Returns detailed validation errors to help fix issues. -Use this tool to ensure a record meets all OASF requirements before pushing. - `), - }, tools.ValidateRecord) - - // Add tool for pushing records to Directory server - mcp.AddTool(server, &mcp.Tool{ - Name: "agntcy_dir_push_record", - Description: strings.TrimSpace(` -Pushes an OASF agent record to a Directory server. -This tool validates and uploads the record to the configured Directory server, returning: -- Content Identifier (CID) for the pushed record -- Server address where the record was stored - -The record must be a valid OASF agent record. -Server configuration is set via environment variables (DIRECTORY_CLIENT_SERVER_ADDRESS). - -Use this tool after validating your record to store it in the Directory. - `), - }, tools.PushRecord) - - // Add tool for searching local records - mcp.AddTool(server, &mcp.Tool{ - Name: "agntcy_dir_search_local", - Description: strings.TrimSpace(` -Searches for agent records on the local directory node using structured query filters. -This tool supports flexible wildcard patterns for matching records based on: -- Agent names (e.g., "gpt*", "agent-?", "web-[0-9]") -- Versions (e.g., "v1.*", "*-beta", "v?.0.?") -- Skill IDs (exact match only, e.g., "10201") -- Skill names (e.g., "*python*", "Image*", "[A-M]*") -- Locators (e.g., "docker-image:*", "http*") -- Modules (e.g., "*-plugin", "core*") - -Multiple filters are combined with OR logic (matches any filter). -Results are streamed and paginated for efficient handling of large result sets. - -Server configuration is set via environment variables (DIRECTORY_CLIENT_SERVER_ADDRESS). - -Use this tool for direct, structured searches when you know the exact filters to apply. - `), - }, tools.SearchLocal) - - // Add tool for pulling records from Directory - mcp.AddTool(server, &mcp.Tool{ - Name: "agntcy_dir_pull_record", - Description: strings.TrimSpace(` -Pulls an OASF agent record from the local Directory node by its CID (Content Identifier). -The pulled record is content-addressable and can be validated against its hash. - -Server configuration is set via environment variables (DIRECTORY_CLIENT_SERVER_ADDRESS). - -Use this tool to retrieve agent records by their CID for inspection or validation. - `), - }, tools.PullRecord) - - // Add tool for exporting OASF records to other formats - mcp.AddTool(server, &mcp.Tool{ - Name: "agntcy_oasf_export_record", - Description: strings.TrimSpace(` -Exports an OASF agent record to a different format using the OASF SDK translator. -This tool takes an OASF record in JSON format and converts it to the specified target format. - -Currently supported target formats: -- "a2a": Agent-to-Agent (A2A) format -- "ghcopilot": GitHub Copilot MCP configuration format - -**Input Format**: -Provide the OASF record as a standard JSON object (no wrapper needed). - -**Output Format**: -The output structure depends on the target format: -- For "a2a": Returns the A2A card directly as a JSON object -- For "ghcopilot": Returns the GitHub Copilot MCP configuration as a JSON object - -Use this tool when you need to convert OASF records to other format specifications. - `), - }, tools.ExportRecord) - - // Add tool for importing records from other formats to OASF - mcp.AddTool(server, &mcp.Tool{ - Name: "agntcy_oasf_import_record", - Description: strings.TrimSpace(` -Imports data from a different format to an OASF agent record using the OASF SDK translator. -This tool takes data in a source format and converts it to OASF record format. - -Currently supported source formats: -- "mcp": Model Context Protocol format -- "a2a": Agent-to-Agent (A2A) format - -**CRITICAL - Input Format Requirements**: -The source_data MUST be wrapped in a format-specific object: - -For "mcp" format, wrap the MCP server data in a "server" object: -{ - "server": { - "name": "example-server", - "version": "1.0.0", - ... (rest of MCP server data) - } -} - -For "a2a" format, wrap the A2A card data in an "a2aCard" object: -{ - "a2aCard": { - "name": "example-agent", - "version": "1.0.0", - "description": "...", - ... (rest of A2A card data) - } -} - -**Important - Enrichment Required**: The domains and skills in the resulting OASF record -from the oasf-sdk translator are incomplete and MUST be enriched. Follow these steps: - -1. Remove any existing domains and skills fields from the imported record -2. Use agntcy_oasf_get_schema_domains to discover valid domain options: - - First get top-level domains (without parent_domain parameter) - - Then explore sub-domains using the parent_domain parameter if needed -3. Use agntcy_oasf_get_schema_skills to discover valid skill options: - - First get top-level skill categories (without parent_skill parameter) - - Then explore sub-skills using the parent_skill parameter if needed -4. Analyze the source content to select the most relevant domains and skills -5. Add the selected domains and skills to the record with proper names and IDs - -Use this tool when you need to convert records from other format specifications to OASF. -For a complete guided workflow including enrichment and validation, use the import_record prompt. - `), - }, tools.ImportRecord) - - // Add prompt for creating agent records - server.AddPrompt(&mcp.Prompt{ - Name: "create_record", - Description: strings.TrimSpace(` -Analyzes the current directory codebase and automatically creates a complete OASF agent record. - `), - Arguments: []*mcp.PromptArgument{ - { - Name: "output_path", - Description: "Where to output the record: file path (e.g., agent.json) to save to file, or empty for default (stdout)", - Required: false, - }, - { - Name: "schema_version", - Description: "OASF schema version to use (e.g., 0.7.0, 0.3.1). Defaults to 0.7.0", - Required: false, - }, - }, - }, prompts.CreateRecord) - - // Add prompt for validating records - server.AddPrompt(&mcp.Prompt{ - Name: "validate_record", - Description: strings.TrimSpace(` -Validates an existing OASF agent record against the schema. - `), - Arguments: []*mcp.PromptArgument{ - { - Name: "record_path", - Description: "Path to the OASF record JSON file to validate", - Required: true, - }, - }, - }, prompts.ValidateRecord) - - // Add prompt for pushing records - server.AddPrompt(&mcp.Prompt{ - Name: "push_record", - Description: strings.TrimSpace(` -Complete workflow for validating and pushing an OASF record to the Directory server. - `), - Arguments: []*mcp.PromptArgument{ - { - Name: "record_path", - Description: "Path to the OASF record JSON file to validate and push", - Required: true, - }, - }, - }, prompts.PushRecord) - - // Add prompt for searching records with free-text - server.AddPrompt(&mcp.Prompt{ - Name: "search_records", - Description: strings.TrimSpace(` -Guided workflow for searching agent records using free-text queries. -Automatically translates natural language queries into structured search parameters -using OASF schema knowledge. Examples: "find Python agents", "agents that can process images". - `), - Arguments: []*mcp.PromptArgument{ - { - Name: "query", - Description: "Free-text search query describing what agents you're looking for", - Required: true, - }, - }, - }, prompts.SearchRecords) - - // Add prompt for pulling records - server.AddPrompt(&mcp.Prompt{ - Name: "pull_record", - Description: strings.TrimSpace(` -Guided workflow for pulling an OASF agent record from Directory by its CID. -Optionally saves the result to a file. - `), - Arguments: []*mcp.PromptArgument{ - { - Name: "cid", - Description: "Content Identifier (CID) of the record to pull", - Required: true, - }, - { - Name: "output_path", - Description: "Where to save the pulled record: file path (e.g., record.json) or empty for default (stdout)", - Required: false, - }, - }, - }, prompts.PullRecord) - - // Add prompt for importing records from other formats - server.AddPrompt(&mcp.Prompt{ - Name: "import_record", - Description: strings.TrimSpace(` -Complete workflow for importing data from other formats to OASF with enrichment and validation. -This guided workflow includes: -- Format conversion using the OASF SDK translator -- Domain and skill enrichment using OASF schema -- Comprehensive validation -- Optional output to file - `), - Arguments: []*mcp.PromptArgument{ - { - Name: "source_data_path", - Description: "Path to the source data file to import", - Required: true, - }, - { - Name: "source_format", - Description: "Source format to import from (e.g., 'mcp', 'a2a')", - Required: true, - }, - { - Name: "output_path", - Description: "Where to save the imported OASF record: file path (e.g., record.json) or empty for stdout", - Required: false, - }, - { - Name: "schema_version", - Description: "OASF schema version to use for validation (e.g., 0.7.0, 0.8.0). Defaults to 0.8.0", - Required: false, - }, - }, - }, prompts.ImportRecord) - - // Add prompt for exporting records to other formats - server.AddPrompt(&mcp.Prompt{ - Name: "export_record", - Description: strings.TrimSpace(` -Complete workflow for validating and exporting an OASF record to other formats. -This guided workflow includes: -- OASF record validation -- Schema compatibility check -- Format conversion using the OASF SDK translator -- Export verification -- Optional output to file - `), - Arguments: []*mcp.PromptArgument{ - { - Name: "record_path", - Description: "Path to the OASF record JSON file to export", - Required: true, - }, - { - Name: "target_format", - Description: "Target format to export to (e.g., 'a2a', 'ghcopilot')", - Required: true, - }, - { - Name: "output_path", - Description: "Where to save the exported data: file path (e.g., output.json) or empty for stdout", - Required: false, - }, - }, - }, prompts.ExportRecord) - - // Run the server over stdin/stdout - if err := server.Run(ctx, &mcp.StdioTransport{}); err != nil { - return fmt.Errorf("failed to run MCP server: %w", err) - } - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package server + +import ( + "context" + "fmt" + "os" + "strings" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/mcp/prompts" + "github.com/agntcy/dir/mcp/tools" + "github.com/modelcontextprotocol/go-sdk/mcp" +) + +// Serve creates and runs the MCP server with all configured tools and prompts. +// It accepts a context and runs the server over stdin/stdout using the stdio transport. +// +//nolint:maintidx // Function registers all MCP tools and prompts, complexity is acceptable +func Serve(ctx context.Context) error { + // Configure OASF validation + // Note: Logging to stderr is intentional - MCP servers communicate over stdin/stdout, + // so stderr is used for logging/debugging messages that don't interfere with the protocol. + disableAPIValidation := os.Getenv("OASF_API_VALIDATION_DISABLE") == "true" + if disableAPIValidation { + corev1.SetDisableAPIValidation(true) + fmt.Fprintf(os.Stderr, "[MCP Server] OASF API validation disabled, using embedded schemas\n") + } else { + // Read schema URL from environment variable (default to public OASF server) + schemaURL := os.Getenv("OASF_API_VALIDATION_SCHEMA_URL") + if schemaURL == "" { + schemaURL = corev1.DefaultSchemaURL + } + + // Read strict validation setting (default to strict for safety) + strictValidation := os.Getenv("OASF_API_VALIDATION_STRICT_MODE") != "false" + + corev1.SetSchemaURL(schemaURL) + corev1.SetDisableAPIValidation(false) + corev1.SetStrictValidation(strictValidation) + + fmt.Fprintf(os.Stderr, "[MCP Server] OASF API validator configured with schema_url=%s, strict mode=%t\n", + schemaURL, strictValidation) + } + + // Create MCP server for Directory operations + server := mcp.NewServer(&mcp.Implementation{ + Name: "dir-mcp-server", + Version: "v0.1.0", + }, nil) + + // Add tool for listing available OASF schema versions + mcp.AddTool(server, &mcp.Tool{ + Name: "agntcy_oasf_list_versions", + Description: strings.TrimSpace(` +Lists all available OASF schema versions supported by the server. +This tool provides a simple way to discover what schema versions are available +without having to make requests with specific version numbers. + +Use this tool to see what OASF schema versions you can work with. + `), + }, tools.ListVersions) + + // Add tool for getting OASF schema content + mcp.AddTool(server, &mcp.Tool{ + Name: "agntcy_oasf_get_schema", + Description: strings.TrimSpace(` +Retrieves the complete OASF schema JSON content for the specified version. +This tool provides direct access to the full schema definition including: +- All domain definitions and their IDs +- All skill definitions and their IDs +- Complete validation rules and constraints +- Schema structure and required fields + +Use this tool to get the complete schema for reference when creating or validating agent records. + `), + }, tools.GetSchema) + + // Add tool for getting OASF schema skills + mcp.AddTool(server, &mcp.Tool{ + Name: "agntcy_oasf_get_schema_skills", + Description: strings.TrimSpace(` +Retrieves skills from the OASF schema for the specified version. +This tool supports hierarchical skill navigation: +- Without parent_skill: Returns all top-level skill categories (e.g., "analytical_skills", "natural_language_processing") +- With parent_skill: Returns sub-skills under that parent (e.g., parent="retrieval_augmented_generation" returns its children) + +Each skill includes: +- name: The skill identifier used in OASF records +- caption: Human-readable display name +- id: Numeric skill identifier + +Use this tool to discover valid skills when creating or enriching agent records. +Essential for LLM-based enrichment to ensure skills match the schema taxonomy. + `), + }, tools.GetSchemaSkills) + + // Add tool for getting OASF schema domains + mcp.AddTool(server, &mcp.Tool{ + Name: "agntcy_oasf_get_schema_domains", + Description: strings.TrimSpace(` +Retrieves domains from the OASF schema for the specified version. +This tool supports hierarchical domain navigation: +- Without parent_domain: Returns all top-level domain categories (e.g., "artificial_intelligence", "software_development") +- With parent_domain: Returns sub-domains under that parent (e.g., parent="artificial_intelligence" returns its children) + +Each domain includes: +- name: The domain identifier used in OASF records +- caption: Human-readable display name +- id: Numeric domain identifier + +Use this tool to discover valid domains when creating or enriching agent records. +Essential for LLM-based enrichment to ensure domains match the schema taxonomy. + `), + }, tools.GetSchemaDomains) + + // Add tool for validating OASF agent records + mcp.AddTool(server, &mcp.Tool{ + Name: "agntcy_oasf_validate_record", + Description: strings.TrimSpace(` +Validates an AGNTCY OASF agent record against the OASF schema. +This tool performs comprehensive validation including: +- Required fields check +- Field type validation +- Schema-specific constraints +- Domain and skill taxonomy validation + +Returns detailed validation errors to help fix issues. +Use this tool to ensure a record meets all OASF requirements before pushing. + `), + }, tools.ValidateRecord) + + // Add tool for pushing records to Directory server + mcp.AddTool(server, &mcp.Tool{ + Name: "agntcy_dir_push_record", + Description: strings.TrimSpace(` +Pushes an OASF agent record to a Directory server. +This tool validates and uploads the record to the configured Directory server, returning: +- Content Identifier (CID) for the pushed record +- Server address where the record was stored + +The record must be a valid OASF agent record. +Server configuration is set via environment variables (DIRECTORY_CLIENT_SERVER_ADDRESS). + +Use this tool after validating your record to store it in the Directory. + `), + }, tools.PushRecord) + + // Add tool for searching local records + mcp.AddTool(server, &mcp.Tool{ + Name: "agntcy_dir_search_local", + Description: strings.TrimSpace(` +Searches for agent records on the local directory node using structured query filters. +This tool supports flexible wildcard patterns for matching records based on: +- Agent names (e.g., "gpt*", "agent-?", "web-[0-9]") +- Versions (e.g., "v1.*", "*-beta", "v?.0.?") +- Skill IDs (exact match only, e.g., "10201") +- Skill names (e.g., "*python*", "Image*", "[A-M]*") +- Locators (e.g., "docker-image:*", "http*") +- Modules (e.g., "*-plugin", "core*") + +Multiple filters are combined with OR logic (matches any filter). +Results are streamed and paginated for efficient handling of large result sets. + +Server configuration is set via environment variables (DIRECTORY_CLIENT_SERVER_ADDRESS). + +Use this tool for direct, structured searches when you know the exact filters to apply. + `), + }, tools.SearchLocal) + + // Add tool for pulling records from Directory + mcp.AddTool(server, &mcp.Tool{ + Name: "agntcy_dir_pull_record", + Description: strings.TrimSpace(` +Pulls an OASF agent record from the local Directory node by its CID (Content Identifier). +The pulled record is content-addressable and can be validated against its hash. + +Server configuration is set via environment variables (DIRECTORY_CLIENT_SERVER_ADDRESS). + +Use this tool to retrieve agent records by their CID for inspection or validation. + `), + }, tools.PullRecord) + + // Add tool for exporting OASF records to other formats + mcp.AddTool(server, &mcp.Tool{ + Name: "agntcy_oasf_export_record", + Description: strings.TrimSpace(` +Exports an OASF agent record to a different format using the OASF SDK translator. +This tool takes an OASF record in JSON format and converts it to the specified target format. + +Currently supported target formats: +- "a2a": Agent-to-Agent (A2A) format +- "ghcopilot": GitHub Copilot MCP configuration format + +**Input Format**: +Provide the OASF record as a standard JSON object (no wrapper needed). + +**Output Format**: +The output structure depends on the target format: +- For "a2a": Returns the A2A card directly as a JSON object +- For "ghcopilot": Returns the GitHub Copilot MCP configuration as a JSON object + +Use this tool when you need to convert OASF records to other format specifications. + `), + }, tools.ExportRecord) + + // Add tool for importing records from other formats to OASF + mcp.AddTool(server, &mcp.Tool{ + Name: "agntcy_oasf_import_record", + Description: strings.TrimSpace(` +Imports data from a different format to an OASF agent record using the OASF SDK translator. +This tool takes data in a source format and converts it to OASF record format. + +Currently supported source formats: +- "mcp": Model Context Protocol format +- "a2a": Agent-to-Agent (A2A) format + +**CRITICAL - Input Format Requirements**: +The source_data MUST be wrapped in a format-specific object: + +For "mcp" format, wrap the MCP server data in a "server" object: +{ + "server": { + "name": "example-server", + "version": "1.0.0", + ... (rest of MCP server data) + } +} + +For "a2a" format, wrap the A2A card data in an "a2aCard" object: +{ + "a2aCard": { + "name": "example-agent", + "version": "1.0.0", + "description": "...", + ... (rest of A2A card data) + } +} + +**Important - Enrichment Required**: The domains and skills in the resulting OASF record +from the oasf-sdk translator are incomplete and MUST be enriched. Follow these steps: + +1. Remove any existing domains and skills fields from the imported record +2. Use agntcy_oasf_get_schema_domains to discover valid domain options: + - First get top-level domains (without parent_domain parameter) + - Then explore sub-domains using the parent_domain parameter if needed +3. Use agntcy_oasf_get_schema_skills to discover valid skill options: + - First get top-level skill categories (without parent_skill parameter) + - Then explore sub-skills using the parent_skill parameter if needed +4. Analyze the source content to select the most relevant domains and skills +5. Add the selected domains and skills to the record with proper names and IDs + +Use this tool when you need to convert records from other format specifications to OASF. +For a complete guided workflow including enrichment and validation, use the import_record prompt. + `), + }, tools.ImportRecord) + + // Add prompt for creating agent records + server.AddPrompt(&mcp.Prompt{ + Name: "create_record", + Description: strings.TrimSpace(` +Analyzes the current directory codebase and automatically creates a complete OASF agent record. + `), + Arguments: []*mcp.PromptArgument{ + { + Name: "output_path", + Description: "Where to output the record: file path (e.g., agent.json) to save to file, or empty for default (stdout)", + Required: false, + }, + { + Name: "schema_version", + Description: "OASF schema version to use (e.g., 0.7.0, 0.3.1). Defaults to 0.7.0", + Required: false, + }, + }, + }, prompts.CreateRecord) + + // Add prompt for validating records + server.AddPrompt(&mcp.Prompt{ + Name: "validate_record", + Description: strings.TrimSpace(` +Validates an existing OASF agent record against the schema. + `), + Arguments: []*mcp.PromptArgument{ + { + Name: "record_path", + Description: "Path to the OASF record JSON file to validate", + Required: true, + }, + }, + }, prompts.ValidateRecord) + + // Add prompt for pushing records + server.AddPrompt(&mcp.Prompt{ + Name: "push_record", + Description: strings.TrimSpace(` +Complete workflow for validating and pushing an OASF record to the Directory server. + `), + Arguments: []*mcp.PromptArgument{ + { + Name: "record_path", + Description: "Path to the OASF record JSON file to validate and push", + Required: true, + }, + }, + }, prompts.PushRecord) + + // Add prompt for searching records with free-text + server.AddPrompt(&mcp.Prompt{ + Name: "search_records", + Description: strings.TrimSpace(` +Guided workflow for searching agent records using free-text queries. +Automatically translates natural language queries into structured search parameters +using OASF schema knowledge. Examples: "find Python agents", "agents that can process images". + `), + Arguments: []*mcp.PromptArgument{ + { + Name: "query", + Description: "Free-text search query describing what agents you're looking for", + Required: true, + }, + }, + }, prompts.SearchRecords) + + // Add prompt for pulling records + server.AddPrompt(&mcp.Prompt{ + Name: "pull_record", + Description: strings.TrimSpace(` +Guided workflow for pulling an OASF agent record from Directory by its CID. +Optionally saves the result to a file. + `), + Arguments: []*mcp.PromptArgument{ + { + Name: "cid", + Description: "Content Identifier (CID) of the record to pull", + Required: true, + }, + { + Name: "output_path", + Description: "Where to save the pulled record: file path (e.g., record.json) or empty for default (stdout)", + Required: false, + }, + }, + }, prompts.PullRecord) + + // Add prompt for importing records from other formats + server.AddPrompt(&mcp.Prompt{ + Name: "import_record", + Description: strings.TrimSpace(` +Complete workflow for importing data from other formats to OASF with enrichment and validation. +This guided workflow includes: +- Format conversion using the OASF SDK translator +- Domain and skill enrichment using OASF schema +- Comprehensive validation +- Optional output to file + `), + Arguments: []*mcp.PromptArgument{ + { + Name: "source_data_path", + Description: "Path to the source data file to import", + Required: true, + }, + { + Name: "source_format", + Description: "Source format to import from (e.g., 'mcp', 'a2a')", + Required: true, + }, + { + Name: "output_path", + Description: "Where to save the imported OASF record: file path (e.g., record.json) or empty for stdout", + Required: false, + }, + { + Name: "schema_version", + Description: "OASF schema version to use for validation (e.g., 0.7.0, 0.8.0). Defaults to 0.8.0", + Required: false, + }, + }, + }, prompts.ImportRecord) + + // Add prompt for exporting records to other formats + server.AddPrompt(&mcp.Prompt{ + Name: "export_record", + Description: strings.TrimSpace(` +Complete workflow for validating and exporting an OASF record to other formats. +This guided workflow includes: +- OASF record validation +- Schema compatibility check +- Format conversion using the OASF SDK translator +- Export verification +- Optional output to file + `), + Arguments: []*mcp.PromptArgument{ + { + Name: "record_path", + Description: "Path to the OASF record JSON file to export", + Required: true, + }, + { + Name: "target_format", + Description: "Target format to export to (e.g., 'a2a', 'ghcopilot')", + Required: true, + }, + { + Name: "output_path", + Description: "Where to save the exported data: file path (e.g., output.json) or empty for stdout", + Required: false, + }, + }, + }, prompts.ExportRecord) + + // Run the server over stdin/stdout + if err := server.Run(ctx, &mcp.StdioTransport{}); err != nil { + return fmt.Errorf("failed to run MCP server: %w", err) + } + + return nil +} diff --git a/mcp/server/server_test.go b/mcp/server/server_test.go index add7f34f9..835ca51f6 100644 --- a/mcp/server/server_test.go +++ b/mcp/server/server_test.go @@ -1,102 +1,102 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package server - -import ( - "context" - "testing" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/stretchr/testify/assert" -) - -func TestServe_ValidationConfiguration(t *testing.T) { - tests := []struct { - name string - disableAPIValidationEnv string - oasfSchemaURLEnv string - strictValidationEnv string - wantDisableAPI bool - wantSchemaURL string - wantStrict bool - }{ - { - name: "disable API validation", - disableAPIValidationEnv: "true", - wantDisableAPI: true, - }, - { - name: "enable API validation with default schema URL", - disableAPIValidationEnv: "false", - wantDisableAPI: false, - wantSchemaURL: corev1.DefaultSchemaURL, - wantStrict: true, - }, - { - name: "enable API validation with custom schema URL", - disableAPIValidationEnv: "false", - oasfSchemaURLEnv: "https://custom.schema.url", - wantDisableAPI: false, - wantSchemaURL: "https://custom.schema.url", - wantStrict: true, - }, - { - name: "enable API validation with strict=false", - disableAPIValidationEnv: "false", - strictValidationEnv: "false", - wantDisableAPI: false, - wantSchemaURL: corev1.DefaultSchemaURL, - wantStrict: false, - }, - { - name: "enable API validation with strict=true (default)", - disableAPIValidationEnv: "false", - strictValidationEnv: "true", - wantDisableAPI: false, - wantSchemaURL: corev1.DefaultSchemaURL, - wantStrict: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Reset package-level config after test - defer func() { - corev1.SetDisableAPIValidation(false) - corev1.SetSchemaURL(corev1.DefaultSchemaURL) - corev1.SetStrictValidation(true) - }() - - // Set test env vars - if tt.disableAPIValidationEnv != "" { - t.Setenv("OASF_API_VALIDATION_DISABLE", tt.disableAPIValidationEnv) - } - - if tt.oasfSchemaURLEnv != "" { - t.Setenv("OASF_API_VALIDATION_SCHEMA_URL", tt.oasfSchemaURLEnv) - } - - if tt.strictValidationEnv != "" { - t.Setenv("OASF_API_VALIDATION_STRICT_MODE", tt.strictValidationEnv) - } - - // Create a context that will be cancelled immediately to stop Serve early - ctx, cancel := context.WithCancel(context.Background()) - cancel() // Cancel immediately so Serve returns quickly - - // Call Serve - it will configure validation and then return due to cancelled context - err := Serve(ctx) - - // Verify that validation was configured correctly - // We can't directly check the internal state, but we can verify - // that the configuration functions were called by checking if - // validation still works with the expected settings - assert.Error(t, err) // Should error due to cancelled context - - // Note: We can't easily verify the exact configuration without - // exposing internal state, but the fact that Serve runs without - // panicking and configures the validators is sufficient coverage - }) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package server + +import ( + "context" + "testing" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/stretchr/testify/assert" +) + +func TestServe_ValidationConfiguration(t *testing.T) { + tests := []struct { + name string + disableAPIValidationEnv string + oasfSchemaURLEnv string + strictValidationEnv string + wantDisableAPI bool + wantSchemaURL string + wantStrict bool + }{ + { + name: "disable API validation", + disableAPIValidationEnv: "true", + wantDisableAPI: true, + }, + { + name: "enable API validation with default schema URL", + disableAPIValidationEnv: "false", + wantDisableAPI: false, + wantSchemaURL: corev1.DefaultSchemaURL, + wantStrict: true, + }, + { + name: "enable API validation with custom schema URL", + disableAPIValidationEnv: "false", + oasfSchemaURLEnv: "https://custom.schema.url", + wantDisableAPI: false, + wantSchemaURL: "https://custom.schema.url", + wantStrict: true, + }, + { + name: "enable API validation with strict=false", + disableAPIValidationEnv: "false", + strictValidationEnv: "false", + wantDisableAPI: false, + wantSchemaURL: corev1.DefaultSchemaURL, + wantStrict: false, + }, + { + name: "enable API validation with strict=true (default)", + disableAPIValidationEnv: "false", + strictValidationEnv: "true", + wantDisableAPI: false, + wantSchemaURL: corev1.DefaultSchemaURL, + wantStrict: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Reset package-level config after test + defer func() { + corev1.SetDisableAPIValidation(false) + corev1.SetSchemaURL(corev1.DefaultSchemaURL) + corev1.SetStrictValidation(true) + }() + + // Set test env vars + if tt.disableAPIValidationEnv != "" { + t.Setenv("OASF_API_VALIDATION_DISABLE", tt.disableAPIValidationEnv) + } + + if tt.oasfSchemaURLEnv != "" { + t.Setenv("OASF_API_VALIDATION_SCHEMA_URL", tt.oasfSchemaURLEnv) + } + + if tt.strictValidationEnv != "" { + t.Setenv("OASF_API_VALIDATION_STRICT_MODE", tt.strictValidationEnv) + } + + // Create a context that will be cancelled immediately to stop Serve early + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately so Serve returns quickly + + // Call Serve - it will configure validation and then return due to cancelled context + err := Serve(ctx) + + // Verify that validation was configured correctly + // We can't directly check the internal state, but we can verify + // that the configuration functions were called by checking if + // validation still works with the expected settings + assert.Error(t, err) // Should error due to cancelled context + + // Note: We can't easily verify the exact configuration without + // exposing internal state, but the fact that Serve runs without + // panicking and configures the validators is sufficient coverage + }) + } +} diff --git a/mcp/tools/export_record.go b/mcp/tools/export_record.go index 84ce8708e..efd95c0ee 100644 --- a/mcp/tools/export_record.go +++ b/mcp/tools/export_record.go @@ -1,106 +1,106 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package tools - -import ( - "context" - "encoding/json" - "fmt" - "strings" - - "github.com/agntcy/oasf-sdk/pkg/translator" - "github.com/modelcontextprotocol/go-sdk/mcp" - "google.golang.org/protobuf/encoding/protojson" - "google.golang.org/protobuf/types/known/structpb" -) - -// ExportRecordInput defines the input parameters for exporting a record. -type ExportRecordInput struct { - RecordJSON string `json:"record_json" jsonschema:"JSON string of the OASF agent record to export (required)"` - TargetFormat string `json:"target_format" jsonschema:"Target format to export to (e.g., 'mcp') (required)"` -} - -// ExportRecordOutput defines the output of exporting a record. -type ExportRecordOutput struct { - ExportedData string `json:"exported_data,omitempty" jsonschema:"The exported data in the target format (JSON string)"` - ErrorMessage string `json:"error_message,omitempty" jsonschema:"Error message if export failed"` -} - -// ExportRecord exports an OASF agent record to a different format using the OASF SDK translator. -// Currently supported formats: -// - "a2a": Agent-to-Agent (A2A) format. -// - "ghcopilot": GitHub Copilot MCP configuration format. -func ExportRecord(ctx context.Context, _ *mcp.CallToolRequest, input ExportRecordInput) ( - *mcp.CallToolResult, - ExportRecordOutput, - error, -) { - // Validate input - if input.RecordJSON == "" { - return nil, ExportRecordOutput{ - ErrorMessage: "record_json is required", - }, nil - } - - if input.TargetFormat == "" { - return nil, ExportRecordOutput{ - ErrorMessage: "target_format is required", - }, nil - } - - // Parse the record JSON into a structpb.Struct - var recordStruct structpb.Struct - if err := protojson.Unmarshal([]byte(input.RecordJSON), &recordStruct); err != nil { - return nil, ExportRecordOutput{ - ErrorMessage: fmt.Sprintf("Failed to parse record JSON: %v", err), - }, nil - } - - // Normalize the target format to lowercase for comparison - targetFormat := strings.ToLower(strings.TrimSpace(input.TargetFormat)) - - // Export based on target format - var exportedJSON []byte - - switch targetFormat { - case "a2a": - a2aCard, err := translator.RecordToA2A(&recordStruct) - if err != nil { - return nil, ExportRecordOutput{ - ErrorMessage: fmt.Sprintf("Failed to export to A2A format: %v", err), - }, nil - } - // Use regular JSON marshaling since A2ACard is not a protobuf message - exportedJSON, err = json.MarshalIndent(a2aCard, "", " ") - if err != nil { - return nil, ExportRecordOutput{ - ErrorMessage: fmt.Sprintf("Failed to marshal A2A data to JSON: %v", err), - }, nil - } - - case "ghcopilot": - ghCopilotConfig, err := translator.RecordToGHCopilot(&recordStruct) - if err != nil { - return nil, ExportRecordOutput{ - ErrorMessage: fmt.Sprintf("Failed to export to GitHub Copilot format: %v", err), - }, nil - } - // Use regular JSON marshaling since GHCopilotMCPConfig is not a protobuf message - exportedJSON, err = json.MarshalIndent(ghCopilotConfig, "", " ") - if err != nil { - return nil, ExportRecordOutput{ - ErrorMessage: fmt.Sprintf("Failed to marshal GitHub Copilot data to JSON: %v", err), - }, nil - } - - default: - return nil, ExportRecordOutput{ - ErrorMessage: fmt.Sprintf("Unsupported target format: %s. Supported formats: a2a, ghcopilot", input.TargetFormat), - }, nil - } - - return nil, ExportRecordOutput{ - ExportedData: string(exportedJSON), - }, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package tools + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/agntcy/oasf-sdk/pkg/translator" + "github.com/modelcontextprotocol/go-sdk/mcp" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/structpb" +) + +// ExportRecordInput defines the input parameters for exporting a record. +type ExportRecordInput struct { + RecordJSON string `json:"record_json" jsonschema:"JSON string of the OASF agent record to export (required)"` + TargetFormat string `json:"target_format" jsonschema:"Target format to export to (e.g., 'mcp') (required)"` +} + +// ExportRecordOutput defines the output of exporting a record. +type ExportRecordOutput struct { + ExportedData string `json:"exported_data,omitempty" jsonschema:"The exported data in the target format (JSON string)"` + ErrorMessage string `json:"error_message,omitempty" jsonschema:"Error message if export failed"` +} + +// ExportRecord exports an OASF agent record to a different format using the OASF SDK translator. +// Currently supported formats: +// - "a2a": Agent-to-Agent (A2A) format. +// - "ghcopilot": GitHub Copilot MCP configuration format. +func ExportRecord(ctx context.Context, _ *mcp.CallToolRequest, input ExportRecordInput) ( + *mcp.CallToolResult, + ExportRecordOutput, + error, +) { + // Validate input + if input.RecordJSON == "" { + return nil, ExportRecordOutput{ + ErrorMessage: "record_json is required", + }, nil + } + + if input.TargetFormat == "" { + return nil, ExportRecordOutput{ + ErrorMessage: "target_format is required", + }, nil + } + + // Parse the record JSON into a structpb.Struct + var recordStruct structpb.Struct + if err := protojson.Unmarshal([]byte(input.RecordJSON), &recordStruct); err != nil { + return nil, ExportRecordOutput{ + ErrorMessage: fmt.Sprintf("Failed to parse record JSON: %v", err), + }, nil + } + + // Normalize the target format to lowercase for comparison + targetFormat := strings.ToLower(strings.TrimSpace(input.TargetFormat)) + + // Export based on target format + var exportedJSON []byte + + switch targetFormat { + case "a2a": + a2aCard, err := translator.RecordToA2A(&recordStruct) + if err != nil { + return nil, ExportRecordOutput{ + ErrorMessage: fmt.Sprintf("Failed to export to A2A format: %v", err), + }, nil + } + // Use regular JSON marshaling since A2ACard is not a protobuf message + exportedJSON, err = json.MarshalIndent(a2aCard, "", " ") + if err != nil { + return nil, ExportRecordOutput{ + ErrorMessage: fmt.Sprintf("Failed to marshal A2A data to JSON: %v", err), + }, nil + } + + case "ghcopilot": + ghCopilotConfig, err := translator.RecordToGHCopilot(&recordStruct) + if err != nil { + return nil, ExportRecordOutput{ + ErrorMessage: fmt.Sprintf("Failed to export to GitHub Copilot format: %v", err), + }, nil + } + // Use regular JSON marshaling since GHCopilotMCPConfig is not a protobuf message + exportedJSON, err = json.MarshalIndent(ghCopilotConfig, "", " ") + if err != nil { + return nil, ExportRecordOutput{ + ErrorMessage: fmt.Sprintf("Failed to marshal GitHub Copilot data to JSON: %v", err), + }, nil + } + + default: + return nil, ExportRecordOutput{ + ErrorMessage: fmt.Sprintf("Unsupported target format: %s. Supported formats: a2a, ghcopilot", input.TargetFormat), + }, nil + } + + return nil, ExportRecordOutput{ + ExportedData: string(exportedJSON), + }, nil +} diff --git a/mcp/tools/export_record_test.go b/mcp/tools/export_record_test.go index cf398aab3..c09570c4f 100644 --- a/mcp/tools/export_record_test.go +++ b/mcp/tools/export_record_test.go @@ -1,138 +1,138 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:dupl // Test structure is similar to import_record_test but tests different functionality -package tools - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestExportRecord(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - t.Run("exports record to A2A format", func(t *testing.T) { - t.Parallel() - - // Note: This test verifies that the A2A export path is invoked. - // Actual translation success depends on the record having the required A2A module data, - // which is beyond the scope of this unit test. - - // Sample OASF record JSON - recordJSON := `{ - "schema_version": "0.8.0", - "name": "test-agent", - "version": "1.0.0", - "description": "A test agent" - }` - - input := ExportRecordInput{ - RecordJSON: recordJSON, - TargetFormat: "a2a", - } - - _, output, err := ExportRecord(ctx, nil, input) - - require.NoError(t, err) - // The export may fail if the record doesn't have the required A2A module data, - // which is expected. The important part is that it attempts the export. - if output.ErrorMessage != "" { - assert.Contains(t, output.ErrorMessage, "Failed to export to A2A format") - } - }) - - t.Run("fails when record_json is empty", func(t *testing.T) { - t.Parallel() - - input := ExportRecordInput{ - RecordJSON: "", - TargetFormat: "a2a", - } - - _, output, err := ExportRecord(ctx, nil, input) - - require.NoError(t, err) - assert.Contains(t, output.ErrorMessage, "record_json is required") - assert.Empty(t, output.ExportedData) - }) - - t.Run("fails when target_format is empty", func(t *testing.T) { - t.Parallel() - - input := ExportRecordInput{ - RecordJSON: `{"name": "test"}`, - TargetFormat: "", - } - - _, output, err := ExportRecord(ctx, nil, input) - - require.NoError(t, err) - assert.Contains(t, output.ErrorMessage, "target_format is required") - assert.Empty(t, output.ExportedData) - }) - - t.Run("fails with unsupported target format", func(t *testing.T) { - t.Parallel() - - recordJSON := `{ - "schema_version": "0.8.0", - "name": "test-agent" - }` - - input := ExportRecordInput{ - RecordJSON: recordJSON, - TargetFormat: "unsupported-format", - } - - _, output, err := ExportRecord(ctx, nil, input) - - require.NoError(t, err) - assert.Contains(t, output.ErrorMessage, "Unsupported target format") - assert.Contains(t, output.ErrorMessage, "unsupported-format") - assert.Empty(t, output.ExportedData) - }) - - t.Run("fails with invalid JSON", func(t *testing.T) { - t.Parallel() - - input := ExportRecordInput{ - RecordJSON: `{invalid json}`, - TargetFormat: "a2a", - } - - _, output, err := ExportRecord(ctx, nil, input) - - require.NoError(t, err) - assert.Contains(t, output.ErrorMessage, "Failed to parse record JSON") - assert.Empty(t, output.ExportedData) - }) - - t.Run("handles case-insensitive target format", func(t *testing.T) { - t.Parallel() - - recordJSON := `{ - "schema_version": "0.8.0", - "name": "test-agent" - }` - - input := ExportRecordInput{ - RecordJSON: recordJSON, - TargetFormat: "A2A", - } - - _, output, err := ExportRecord(ctx, nil, input) - - require.NoError(t, err) - // The test verifies that case-insensitive format is handled. - // Actual translation may fail if record lacks required data. - if output.ErrorMessage != "" { - assert.Contains(t, output.ErrorMessage, "Failed to export to A2A format") - } - }) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:dupl // Test structure is similar to import_record_test but tests different functionality +package tools + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestExportRecord(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + t.Run("exports record to A2A format", func(t *testing.T) { + t.Parallel() + + // Note: This test verifies that the A2A export path is invoked. + // Actual translation success depends on the record having the required A2A module data, + // which is beyond the scope of this unit test. + + // Sample OASF record JSON + recordJSON := `{ + "schema_version": "0.8.0", + "name": "test-agent", + "version": "1.0.0", + "description": "A test agent" + }` + + input := ExportRecordInput{ + RecordJSON: recordJSON, + TargetFormat: "a2a", + } + + _, output, err := ExportRecord(ctx, nil, input) + + require.NoError(t, err) + // The export may fail if the record doesn't have the required A2A module data, + // which is expected. The important part is that it attempts the export. + if output.ErrorMessage != "" { + assert.Contains(t, output.ErrorMessage, "Failed to export to A2A format") + } + }) + + t.Run("fails when record_json is empty", func(t *testing.T) { + t.Parallel() + + input := ExportRecordInput{ + RecordJSON: "", + TargetFormat: "a2a", + } + + _, output, err := ExportRecord(ctx, nil, input) + + require.NoError(t, err) + assert.Contains(t, output.ErrorMessage, "record_json is required") + assert.Empty(t, output.ExportedData) + }) + + t.Run("fails when target_format is empty", func(t *testing.T) { + t.Parallel() + + input := ExportRecordInput{ + RecordJSON: `{"name": "test"}`, + TargetFormat: "", + } + + _, output, err := ExportRecord(ctx, nil, input) + + require.NoError(t, err) + assert.Contains(t, output.ErrorMessage, "target_format is required") + assert.Empty(t, output.ExportedData) + }) + + t.Run("fails with unsupported target format", func(t *testing.T) { + t.Parallel() + + recordJSON := `{ + "schema_version": "0.8.0", + "name": "test-agent" + }` + + input := ExportRecordInput{ + RecordJSON: recordJSON, + TargetFormat: "unsupported-format", + } + + _, output, err := ExportRecord(ctx, nil, input) + + require.NoError(t, err) + assert.Contains(t, output.ErrorMessage, "Unsupported target format") + assert.Contains(t, output.ErrorMessage, "unsupported-format") + assert.Empty(t, output.ExportedData) + }) + + t.Run("fails with invalid JSON", func(t *testing.T) { + t.Parallel() + + input := ExportRecordInput{ + RecordJSON: `{invalid json}`, + TargetFormat: "a2a", + } + + _, output, err := ExportRecord(ctx, nil, input) + + require.NoError(t, err) + assert.Contains(t, output.ErrorMessage, "Failed to parse record JSON") + assert.Empty(t, output.ExportedData) + }) + + t.Run("handles case-insensitive target format", func(t *testing.T) { + t.Parallel() + + recordJSON := `{ + "schema_version": "0.8.0", + "name": "test-agent" + }` + + input := ExportRecordInput{ + RecordJSON: recordJSON, + TargetFormat: "A2A", + } + + _, output, err := ExportRecord(ctx, nil, input) + + require.NoError(t, err) + // The test verifies that case-insensitive format is handled. + // Actual translation may fail if record lacks required data. + if output.ErrorMessage != "" { + assert.Contains(t, output.ErrorMessage, "Failed to export to A2A format") + } + }) +} diff --git a/mcp/tools/get_schema.go b/mcp/tools/get_schema.go index 95d567b3f..5f9047574 100644 --- a/mcp/tools/get_schema.go +++ b/mcp/tools/get_schema.go @@ -1,85 +1,85 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package tools - -import ( - "context" - "fmt" - "strings" - - "github.com/agntcy/oasf-sdk/pkg/validator" - "github.com/modelcontextprotocol/go-sdk/mcp" -) - -// GetSchemaInput represents the input for getting OASF schema content. -type GetSchemaInput struct { - Version string `json:"version" jsonschema:"OASF schema version to retrieve (e.g., 0.3.1, 0.7.0)"` -} - -// GetSchemaOutput represents the output after getting OASF schema content. -type GetSchemaOutput struct { - Version string `json:"version" jsonschema:"The requested OASF schema version"` - Schema string `json:"schema" jsonschema:"The complete OASF schema JSON content"` - ErrorMessage string `json:"error_message,omitempty" jsonschema:"Error message if schema retrieval failed"` - AvailableVersions []string `json:"available_versions,omitempty" jsonschema:"List of available OASF schema versions"` -} - -// GetSchema retrieves the OASF schema content for the specified version. -// This tool provides direct access to the complete OASF schema JSON. -func GetSchema(_ context.Context, _ *mcp.CallToolRequest, input GetSchemaInput) ( - *mcp.CallToolResult, - GetSchemaOutput, - error, -) { - // Get available schema versions from the OASF SDK - availableVersions, err := validator.GetAvailableSchemaVersions() - if err != nil { - return nil, GetSchemaOutput{ - ErrorMessage: fmt.Sprintf("Failed to get available schema versions: %v", err), - }, nil - } - - // Validate the version parameter - if input.Version == "" { - return nil, GetSchemaOutput{ - ErrorMessage: "Version parameter is required. Available versions: " + strings.Join(availableVersions, ", "), - AvailableVersions: availableVersions, - }, nil - } - - // Check if the requested version is available - versionValid := false - - for _, version := range availableVersions { - if input.Version == version { - versionValid = true - - break - } - } - - if !versionValid { - return nil, GetSchemaOutput{ - ErrorMessage: fmt.Sprintf("Invalid version '%s'. Available versions: %s", input.Version, strings.Join(availableVersions, ", ")), - AvailableVersions: availableVersions, - }, nil - } - - // Get schema content using the OASF SDK - schemaContent, err := validator.GetSchemaContent(input.Version) - if err != nil { - return nil, GetSchemaOutput{ - Version: input.Version, - ErrorMessage: fmt.Sprintf("Failed to get OASF %s schema: %v", input.Version, err), - AvailableVersions: availableVersions, - }, nil - } - - // Return the schema content - return nil, GetSchemaOutput{ - Version: input.Version, - Schema: string(schemaContent), - AvailableVersions: availableVersions, - }, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package tools + +import ( + "context" + "fmt" + "strings" + + "github.com/agntcy/oasf-sdk/pkg/validator" + "github.com/modelcontextprotocol/go-sdk/mcp" +) + +// GetSchemaInput represents the input for getting OASF schema content. +type GetSchemaInput struct { + Version string `json:"version" jsonschema:"OASF schema version to retrieve (e.g., 0.3.1, 0.7.0)"` +} + +// GetSchemaOutput represents the output after getting OASF schema content. +type GetSchemaOutput struct { + Version string `json:"version" jsonschema:"The requested OASF schema version"` + Schema string `json:"schema" jsonschema:"The complete OASF schema JSON content"` + ErrorMessage string `json:"error_message,omitempty" jsonschema:"Error message if schema retrieval failed"` + AvailableVersions []string `json:"available_versions,omitempty" jsonschema:"List of available OASF schema versions"` +} + +// GetSchema retrieves the OASF schema content for the specified version. +// This tool provides direct access to the complete OASF schema JSON. +func GetSchema(_ context.Context, _ *mcp.CallToolRequest, input GetSchemaInput) ( + *mcp.CallToolResult, + GetSchemaOutput, + error, +) { + // Get available schema versions from the OASF SDK + availableVersions, err := validator.GetAvailableSchemaVersions() + if err != nil { + return nil, GetSchemaOutput{ + ErrorMessage: fmt.Sprintf("Failed to get available schema versions: %v", err), + }, nil + } + + // Validate the version parameter + if input.Version == "" { + return nil, GetSchemaOutput{ + ErrorMessage: "Version parameter is required. Available versions: " + strings.Join(availableVersions, ", "), + AvailableVersions: availableVersions, + }, nil + } + + // Check if the requested version is available + versionValid := false + + for _, version := range availableVersions { + if input.Version == version { + versionValid = true + + break + } + } + + if !versionValid { + return nil, GetSchemaOutput{ + ErrorMessage: fmt.Sprintf("Invalid version '%s'. Available versions: %s", input.Version, strings.Join(availableVersions, ", ")), + AvailableVersions: availableVersions, + }, nil + } + + // Get schema content using the OASF SDK + schemaContent, err := validator.GetSchemaContent(input.Version) + if err != nil { + return nil, GetSchemaOutput{ + Version: input.Version, + ErrorMessage: fmt.Sprintf("Failed to get OASF %s schema: %v", input.Version, err), + AvailableVersions: availableVersions, + }, nil + } + + // Return the schema content + return nil, GetSchemaOutput{ + Version: input.Version, + Schema: string(schemaContent), + AvailableVersions: availableVersions, + }, nil +} diff --git a/mcp/tools/get_schema_domains.go b/mcp/tools/get_schema_domains.go index aad4bf8a7..0fcf44784 100644 --- a/mcp/tools/get_schema_domains.go +++ b/mcp/tools/get_schema_domains.go @@ -1,111 +1,111 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:dupl // Intentional duplication with skills file for separate domain/skill handling -package tools - -import ( - "context" - "fmt" - - "github.com/agntcy/oasf-sdk/pkg/validator" - "github.com/modelcontextprotocol/go-sdk/mcp" -) - -// GetSchemaDomainsInput represents the input for getting OASF schema domains. -type GetSchemaDomainsInput struct { - Version string `json:"version" jsonschema:"OASF schema version to retrieve domains from (e.g., 0.7.0, 0.8.0)"` - ParentDomain string `json:"parent_domain,omitempty" jsonschema:"Optional parent domain name to filter sub-domains (e.g., 'artificial_intelligence')"` -} - -// DomainItem represents a domain in the OASF schema. -type DomainItem struct { - Name string `json:"name"` - Caption string `json:"caption,omitempty"` - ID int `json:"id,omitempty"` -} - -// GetSchemaDomainsOutput represents the output after getting OASF schema domains. -type GetSchemaDomainsOutput struct { - Version string `json:"version" jsonschema:"The requested OASF schema version"` - Domains []DomainItem `json:"domains" jsonschema:"List of domains (top-level or filtered by parent)"` - ParentDomain string `json:"parent_domain,omitempty" jsonschema:"The parent domain filter if specified"` - ErrorMessage string `json:"error_message,omitempty" jsonschema:"Error message if domain retrieval failed"` - AvailableVersions []string `json:"available_versions,omitempty" jsonschema:"List of available OASF schema versions"` -} - -// GetSchemaDomains retrieves domains from the OASF schema for the specified version. -// If parent_domain is provided, returns only sub-domains under that parent. -// Otherwise, returns all top-level domains. -func GetSchemaDomains(_ context.Context, _ *mcp.CallToolRequest, input GetSchemaDomainsInput) ( - *mcp.CallToolResult, - GetSchemaDomainsOutput, - error, -) { - availableVersions, err := validateVersion(input.Version) - if err != nil { - //nolint:nilerr // MCP tools communicate errors through output, not error return - return nil, GetSchemaDomainsOutput{ - ErrorMessage: err.Error(), - AvailableVersions: availableVersions, - }, nil - } - - domainsJSON, err := validator.GetSchemaDomains(input.Version) - if err != nil { - //nolint:nilerr // MCP tools communicate errors through output, not error return - return nil, GetSchemaDomainsOutput{ - Version: input.Version, - ErrorMessage: fmt.Sprintf("Failed to get domains from OASF %s schema: %v", input.Version, err), - AvailableVersions: availableVersions, - }, nil - } - - allDomains, err := parseSchemaData(domainsJSON, parseItemFromSchema) - if err != nil { - //nolint:nilerr // MCP tools communicate errors through output, not error return - return nil, GetSchemaDomainsOutput{ - Version: input.Version, - ErrorMessage: err.Error(), - AvailableVersions: availableVersions, - }, nil - } - - resultDomains, err := filterDomains(allDomains, input.ParentDomain) - if err != nil { - //nolint:nilerr // MCP tools communicate errors through output, not error return - return nil, GetSchemaDomainsOutput{ - Version: input.Version, - ParentDomain: input.ParentDomain, - ErrorMessage: err.Error(), - AvailableVersions: availableVersions, - }, nil - } - - return nil, GetSchemaDomainsOutput{ - Version: input.Version, - Domains: convertToDomainItems(resultDomains), - ParentDomain: input.ParentDomain, - AvailableVersions: availableVersions, - }, nil -} - -// filterDomains filters domains based on parent parameter. -func filterDomains(allDomains []schemaClass, parent string) ([]schemaClass, error) { - if parent != "" { - return filterChildItems(allDomains, parent) - } - - return extractTopLevelCategories(allDomains), nil -} - -// convertToDomainItems converts generic schema items to DomainItem type. -func convertToDomainItems(items []schemaClass) []DomainItem { - domains := make([]DomainItem, len(items)) - - for i, item := range items { - domains[i] = DomainItem(item) - } - - return domains -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:dupl // Intentional duplication with skills file for separate domain/skill handling +package tools + +import ( + "context" + "fmt" + + "github.com/agntcy/oasf-sdk/pkg/validator" + "github.com/modelcontextprotocol/go-sdk/mcp" +) + +// GetSchemaDomainsInput represents the input for getting OASF schema domains. +type GetSchemaDomainsInput struct { + Version string `json:"version" jsonschema:"OASF schema version to retrieve domains from (e.g., 0.7.0, 0.8.0)"` + ParentDomain string `json:"parent_domain,omitempty" jsonschema:"Optional parent domain name to filter sub-domains (e.g., 'artificial_intelligence')"` +} + +// DomainItem represents a domain in the OASF schema. +type DomainItem struct { + Name string `json:"name"` + Caption string `json:"caption,omitempty"` + ID int `json:"id,omitempty"` +} + +// GetSchemaDomainsOutput represents the output after getting OASF schema domains. +type GetSchemaDomainsOutput struct { + Version string `json:"version" jsonschema:"The requested OASF schema version"` + Domains []DomainItem `json:"domains" jsonschema:"List of domains (top-level or filtered by parent)"` + ParentDomain string `json:"parent_domain,omitempty" jsonschema:"The parent domain filter if specified"` + ErrorMessage string `json:"error_message,omitempty" jsonschema:"Error message if domain retrieval failed"` + AvailableVersions []string `json:"available_versions,omitempty" jsonschema:"List of available OASF schema versions"` +} + +// GetSchemaDomains retrieves domains from the OASF schema for the specified version. +// If parent_domain is provided, returns only sub-domains under that parent. +// Otherwise, returns all top-level domains. +func GetSchemaDomains(_ context.Context, _ *mcp.CallToolRequest, input GetSchemaDomainsInput) ( + *mcp.CallToolResult, + GetSchemaDomainsOutput, + error, +) { + availableVersions, err := validateVersion(input.Version) + if err != nil { + //nolint:nilerr // MCP tools communicate errors through output, not error return + return nil, GetSchemaDomainsOutput{ + ErrorMessage: err.Error(), + AvailableVersions: availableVersions, + }, nil + } + + domainsJSON, err := validator.GetSchemaDomains(input.Version) + if err != nil { + //nolint:nilerr // MCP tools communicate errors through output, not error return + return nil, GetSchemaDomainsOutput{ + Version: input.Version, + ErrorMessage: fmt.Sprintf("Failed to get domains from OASF %s schema: %v", input.Version, err), + AvailableVersions: availableVersions, + }, nil + } + + allDomains, err := parseSchemaData(domainsJSON, parseItemFromSchema) + if err != nil { + //nolint:nilerr // MCP tools communicate errors through output, not error return + return nil, GetSchemaDomainsOutput{ + Version: input.Version, + ErrorMessage: err.Error(), + AvailableVersions: availableVersions, + }, nil + } + + resultDomains, err := filterDomains(allDomains, input.ParentDomain) + if err != nil { + //nolint:nilerr // MCP tools communicate errors through output, not error return + return nil, GetSchemaDomainsOutput{ + Version: input.Version, + ParentDomain: input.ParentDomain, + ErrorMessage: err.Error(), + AvailableVersions: availableVersions, + }, nil + } + + return nil, GetSchemaDomainsOutput{ + Version: input.Version, + Domains: convertToDomainItems(resultDomains), + ParentDomain: input.ParentDomain, + AvailableVersions: availableVersions, + }, nil +} + +// filterDomains filters domains based on parent parameter. +func filterDomains(allDomains []schemaClass, parent string) ([]schemaClass, error) { + if parent != "" { + return filterChildItems(allDomains, parent) + } + + return extractTopLevelCategories(allDomains), nil +} + +// convertToDomainItems converts generic schema items to DomainItem type. +func convertToDomainItems(items []schemaClass) []DomainItem { + domains := make([]DomainItem, len(items)) + + for i, item := range items { + domains[i] = DomainItem(item) + } + + return domains +} diff --git a/mcp/tools/get_schema_domains_test.go b/mcp/tools/get_schema_domains_test.go index 28956878e..d7f5b5b09 100644 --- a/mcp/tools/get_schema_domains_test.go +++ b/mcp/tools/get_schema_domains_test.go @@ -1,194 +1,194 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:dupl // Intentional duplication with skills test for separate domain/skill testing -package tools - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestGetSchemaDomains(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - tests := []struct { - name string - input GetSchemaDomainsInput - expectError bool - expectDomains bool - checkCallback func(t *testing.T, output GetSchemaDomainsOutput) - }{ - { - name: "Get top-level domains for version 0.7.0", - input: GetSchemaDomainsInput{ - Version: "0.7.0", - }, - expectError: false, - expectDomains: true, - checkCallback: func(t *testing.T, output GetSchemaDomainsOutput) { - t.Helper() - - assert.Equal(t, "0.7.0", output.Version) - assert.Empty(t, output.ErrorMessage) - assert.NotEmpty(t, output.Domains) - - // Check that top-level domains have expected fields - for _, domain := range output.Domains { - assert.NotEmpty(t, domain.Name, "Each domain should have a name") - } - }, - }, - { - name: "Get sub-domains for a parent domain", - input: GetSchemaDomainsInput{ - Version: "0.7.0", - ParentDomain: "technology", - }, - expectError: false, - expectDomains: true, - checkCallback: func(t *testing.T, output GetSchemaDomainsOutput) { - t.Helper() - - assert.Equal(t, "0.7.0", output.Version) - assert.Equal(t, "technology", output.ParentDomain) - assert.Empty(t, output.ErrorMessage) - assert.NotEmpty(t, output.Domains) - - // All returned domains should be sub-domains - for _, domain := range output.Domains { - assert.NotEmpty(t, domain.Name, "Each sub-domain should have a name") - } - }, - }, - { - name: "Invalid version", - input: GetSchemaDomainsInput{ - Version: "99.99.99", - }, - expectError: false, - expectDomains: false, - checkCallback: func(t *testing.T, output GetSchemaDomainsOutput) { - t.Helper() - - assert.NotEmpty(t, output.ErrorMessage) - assert.Contains(t, output.ErrorMessage, "invalid version") - assert.NotEmpty(t, output.AvailableVersions) - }, - }, - { - name: "Missing version parameter", - input: GetSchemaDomainsInput{ - Version: "", - }, - expectError: false, - expectDomains: false, - checkCallback: func(t *testing.T, output GetSchemaDomainsOutput) { - t.Helper() - - assert.NotEmpty(t, output.ErrorMessage) - assert.Contains(t, output.ErrorMessage, "version parameter is required") - assert.NotEmpty(t, output.AvailableVersions) - }, - }, - { - name: "Non-existent parent domain", - input: GetSchemaDomainsInput{ - Version: "0.7.0", - ParentDomain: "non_existent_domain", - }, - expectError: false, - expectDomains: false, - checkCallback: func(t *testing.T, output GetSchemaDomainsOutput) { - t.Helper() - - assert.NotEmpty(t, output.ErrorMessage) - assert.Contains(t, output.ErrorMessage, "not found") - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - result, output, err := GetSchemaDomains(ctx, nil, tt.input) - - if tt.expectError { - require.Error(t, err) - - return - } - - require.NoError(t, err) - assert.Nil(t, result) // Tool handlers typically return nil for result - - if tt.expectDomains { - assert.NotEmpty(t, output.Domains, "Expected to receive domains") - } - - if tt.checkCallback != nil { - tt.checkCallback(t, output) - } - }) - } -} - -func TestParseItemFromSchemaForDomains(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - defMap map[string]interface{} - expected schemaClass - }{ - { - name: "Parse domain with name, caption (title), and ID", - defMap: map[string]interface{}{ - "title": "Test Domain Caption", - "properties": map[string]interface{}{ - "name": map[string]interface{}{ - "const": "test_domain", - }, - "id": map[string]interface{}{ - "const": float64(123), - }, - }, - }, - expected: schemaClass{ - Name: "test_domain", - Caption: "Test Domain Caption", - ID: 123, - }, - }, - { - name: "Parse domain with missing fields", - defMap: map[string]interface{}{ - "properties": map[string]interface{}{ - "name": map[string]interface{}{ - "const": "minimal_domain", - }, - }, - }, - expected: schemaClass{ - Name: "minimal_domain", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - result := parseItemFromSchema(tt.defMap) - assert.Equal(t, tt.expected.Name, result.Name) - assert.Equal(t, tt.expected.Caption, result.Caption) - assert.Equal(t, tt.expected.ID, result.ID) - }) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:dupl // Intentional duplication with skills test for separate domain/skill testing +package tools + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGetSchemaDomains(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + tests := []struct { + name string + input GetSchemaDomainsInput + expectError bool + expectDomains bool + checkCallback func(t *testing.T, output GetSchemaDomainsOutput) + }{ + { + name: "Get top-level domains for version 0.7.0", + input: GetSchemaDomainsInput{ + Version: "0.7.0", + }, + expectError: false, + expectDomains: true, + checkCallback: func(t *testing.T, output GetSchemaDomainsOutput) { + t.Helper() + + assert.Equal(t, "0.7.0", output.Version) + assert.Empty(t, output.ErrorMessage) + assert.NotEmpty(t, output.Domains) + + // Check that top-level domains have expected fields + for _, domain := range output.Domains { + assert.NotEmpty(t, domain.Name, "Each domain should have a name") + } + }, + }, + { + name: "Get sub-domains for a parent domain", + input: GetSchemaDomainsInput{ + Version: "0.7.0", + ParentDomain: "technology", + }, + expectError: false, + expectDomains: true, + checkCallback: func(t *testing.T, output GetSchemaDomainsOutput) { + t.Helper() + + assert.Equal(t, "0.7.0", output.Version) + assert.Equal(t, "technology", output.ParentDomain) + assert.Empty(t, output.ErrorMessage) + assert.NotEmpty(t, output.Domains) + + // All returned domains should be sub-domains + for _, domain := range output.Domains { + assert.NotEmpty(t, domain.Name, "Each sub-domain should have a name") + } + }, + }, + { + name: "Invalid version", + input: GetSchemaDomainsInput{ + Version: "99.99.99", + }, + expectError: false, + expectDomains: false, + checkCallback: func(t *testing.T, output GetSchemaDomainsOutput) { + t.Helper() + + assert.NotEmpty(t, output.ErrorMessage) + assert.Contains(t, output.ErrorMessage, "invalid version") + assert.NotEmpty(t, output.AvailableVersions) + }, + }, + { + name: "Missing version parameter", + input: GetSchemaDomainsInput{ + Version: "", + }, + expectError: false, + expectDomains: false, + checkCallback: func(t *testing.T, output GetSchemaDomainsOutput) { + t.Helper() + + assert.NotEmpty(t, output.ErrorMessage) + assert.Contains(t, output.ErrorMessage, "version parameter is required") + assert.NotEmpty(t, output.AvailableVersions) + }, + }, + { + name: "Non-existent parent domain", + input: GetSchemaDomainsInput{ + Version: "0.7.0", + ParentDomain: "non_existent_domain", + }, + expectError: false, + expectDomains: false, + checkCallback: func(t *testing.T, output GetSchemaDomainsOutput) { + t.Helper() + + assert.NotEmpty(t, output.ErrorMessage) + assert.Contains(t, output.ErrorMessage, "not found") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + result, output, err := GetSchemaDomains(ctx, nil, tt.input) + + if tt.expectError { + require.Error(t, err) + + return + } + + require.NoError(t, err) + assert.Nil(t, result) // Tool handlers typically return nil for result + + if tt.expectDomains { + assert.NotEmpty(t, output.Domains, "Expected to receive domains") + } + + if tt.checkCallback != nil { + tt.checkCallback(t, output) + } + }) + } +} + +func TestParseItemFromSchemaForDomains(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + defMap map[string]interface{} + expected schemaClass + }{ + { + name: "Parse domain with name, caption (title), and ID", + defMap: map[string]interface{}{ + "title": "Test Domain Caption", + "properties": map[string]interface{}{ + "name": map[string]interface{}{ + "const": "test_domain", + }, + "id": map[string]interface{}{ + "const": float64(123), + }, + }, + }, + expected: schemaClass{ + Name: "test_domain", + Caption: "Test Domain Caption", + ID: 123, + }, + }, + { + name: "Parse domain with missing fields", + defMap: map[string]interface{}{ + "properties": map[string]interface{}{ + "name": map[string]interface{}{ + "const": "minimal_domain", + }, + }, + }, + expected: schemaClass{ + Name: "minimal_domain", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + result := parseItemFromSchema(tt.defMap) + assert.Equal(t, tt.expected.Name, result.Name) + assert.Equal(t, tt.expected.Caption, result.Caption) + assert.Equal(t, tt.expected.ID, result.ID) + }) + } +} diff --git a/mcp/tools/get_schema_skills.go b/mcp/tools/get_schema_skills.go index 5d62074e5..c8998aac3 100644 --- a/mcp/tools/get_schema_skills.go +++ b/mcp/tools/get_schema_skills.go @@ -1,111 +1,111 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:dupl // Intentional duplication with domains file for separate domain/skill handling -package tools - -import ( - "context" - "fmt" - - "github.com/agntcy/oasf-sdk/pkg/validator" - "github.com/modelcontextprotocol/go-sdk/mcp" -) - -// GetSchemaSkillsInput represents the input for getting OASF schema skills. -type GetSchemaSkillsInput struct { - Version string `json:"version" jsonschema:"OASF schema version to retrieve skills from (e.g., 0.7.0, 0.8.0)"` - ParentSkill string `json:"parent_skill,omitempty" jsonschema:"Optional parent skill name to filter sub-skills (e.g., 'retrieval_augmented_generation')"` -} - -// SkillItem represents a skill in the OASF schema. -type SkillItem struct { - Name string `json:"name"` - Caption string `json:"caption,omitempty"` - ID int `json:"id,omitempty"` -} - -// GetSchemaSkillsOutput represents the output after getting OASF schema skills. -type GetSchemaSkillsOutput struct { - Version string `json:"version" jsonschema:"The requested OASF schema version"` - Skills []SkillItem `json:"skills" jsonschema:"List of skills (top-level or filtered by parent)"` - ParentSkill string `json:"parent_skill,omitempty" jsonschema:"The parent skill filter if specified"` - ErrorMessage string `json:"error_message,omitempty" jsonschema:"Error message if skill retrieval failed"` - AvailableVersions []string `json:"available_versions,omitempty" jsonschema:"List of available OASF schema versions"` -} - -// GetSchemaSkills retrieves skills from the OASF schema for the specified version. -// If parent_skill is provided, returns only sub-skills under that parent. -// Otherwise, returns all top-level skills. -func GetSchemaSkills(_ context.Context, _ *mcp.CallToolRequest, input GetSchemaSkillsInput) ( - *mcp.CallToolResult, - GetSchemaSkillsOutput, - error, -) { - availableVersions, err := validateVersion(input.Version) - if err != nil { - //nolint:nilerr // MCP tools communicate errors through output, not error return - return nil, GetSchemaSkillsOutput{ - ErrorMessage: err.Error(), - AvailableVersions: availableVersions, - }, nil - } - - skillsJSON, err := validator.GetSchemaSkills(input.Version) - if err != nil { - //nolint:nilerr // MCP tools communicate errors through output, not error return - return nil, GetSchemaSkillsOutput{ - Version: input.Version, - ErrorMessage: fmt.Sprintf("Failed to get skills from OASF %s schema: %v", input.Version, err), - AvailableVersions: availableVersions, - }, nil - } - - allSkills, err := parseSchemaData(skillsJSON, parseItemFromSchema) - if err != nil { - //nolint:nilerr // MCP tools communicate errors through output, not error return - return nil, GetSchemaSkillsOutput{ - Version: input.Version, - ErrorMessage: err.Error(), - AvailableVersions: availableVersions, - }, nil - } - - resultSkills, err := filterSkills(allSkills, input.ParentSkill) - if err != nil { - //nolint:nilerr // MCP tools communicate errors through output, not error return - return nil, GetSchemaSkillsOutput{ - Version: input.Version, - ParentSkill: input.ParentSkill, - ErrorMessage: err.Error(), - AvailableVersions: availableVersions, - }, nil - } - - return nil, GetSchemaSkillsOutput{ - Version: input.Version, - Skills: convertToSkillItems(resultSkills), - ParentSkill: input.ParentSkill, - AvailableVersions: availableVersions, - }, nil -} - -// filterSkills filters skills based on parent parameter. -func filterSkills(allSkills []schemaClass, parent string) ([]schemaClass, error) { - if parent != "" { - return filterChildItems(allSkills, parent) - } - - return extractTopLevelCategories(allSkills), nil -} - -// convertToSkillItems converts generic schema items to SkillItem type. -func convertToSkillItems(items []schemaClass) []SkillItem { - skills := make([]SkillItem, len(items)) - - for i, item := range items { - skills[i] = SkillItem(item) - } - - return skills -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:dupl // Intentional duplication with domains file for separate domain/skill handling +package tools + +import ( + "context" + "fmt" + + "github.com/agntcy/oasf-sdk/pkg/validator" + "github.com/modelcontextprotocol/go-sdk/mcp" +) + +// GetSchemaSkillsInput represents the input for getting OASF schema skills. +type GetSchemaSkillsInput struct { + Version string `json:"version" jsonschema:"OASF schema version to retrieve skills from (e.g., 0.7.0, 0.8.0)"` + ParentSkill string `json:"parent_skill,omitempty" jsonschema:"Optional parent skill name to filter sub-skills (e.g., 'retrieval_augmented_generation')"` +} + +// SkillItem represents a skill in the OASF schema. +type SkillItem struct { + Name string `json:"name"` + Caption string `json:"caption,omitempty"` + ID int `json:"id,omitempty"` +} + +// GetSchemaSkillsOutput represents the output after getting OASF schema skills. +type GetSchemaSkillsOutput struct { + Version string `json:"version" jsonschema:"The requested OASF schema version"` + Skills []SkillItem `json:"skills" jsonschema:"List of skills (top-level or filtered by parent)"` + ParentSkill string `json:"parent_skill,omitempty" jsonschema:"The parent skill filter if specified"` + ErrorMessage string `json:"error_message,omitempty" jsonschema:"Error message if skill retrieval failed"` + AvailableVersions []string `json:"available_versions,omitempty" jsonschema:"List of available OASF schema versions"` +} + +// GetSchemaSkills retrieves skills from the OASF schema for the specified version. +// If parent_skill is provided, returns only sub-skills under that parent. +// Otherwise, returns all top-level skills. +func GetSchemaSkills(_ context.Context, _ *mcp.CallToolRequest, input GetSchemaSkillsInput) ( + *mcp.CallToolResult, + GetSchemaSkillsOutput, + error, +) { + availableVersions, err := validateVersion(input.Version) + if err != nil { + //nolint:nilerr // MCP tools communicate errors through output, not error return + return nil, GetSchemaSkillsOutput{ + ErrorMessage: err.Error(), + AvailableVersions: availableVersions, + }, nil + } + + skillsJSON, err := validator.GetSchemaSkills(input.Version) + if err != nil { + //nolint:nilerr // MCP tools communicate errors through output, not error return + return nil, GetSchemaSkillsOutput{ + Version: input.Version, + ErrorMessage: fmt.Sprintf("Failed to get skills from OASF %s schema: %v", input.Version, err), + AvailableVersions: availableVersions, + }, nil + } + + allSkills, err := parseSchemaData(skillsJSON, parseItemFromSchema) + if err != nil { + //nolint:nilerr // MCP tools communicate errors through output, not error return + return nil, GetSchemaSkillsOutput{ + Version: input.Version, + ErrorMessage: err.Error(), + AvailableVersions: availableVersions, + }, nil + } + + resultSkills, err := filterSkills(allSkills, input.ParentSkill) + if err != nil { + //nolint:nilerr // MCP tools communicate errors through output, not error return + return nil, GetSchemaSkillsOutput{ + Version: input.Version, + ParentSkill: input.ParentSkill, + ErrorMessage: err.Error(), + AvailableVersions: availableVersions, + }, nil + } + + return nil, GetSchemaSkillsOutput{ + Version: input.Version, + Skills: convertToSkillItems(resultSkills), + ParentSkill: input.ParentSkill, + AvailableVersions: availableVersions, + }, nil +} + +// filterSkills filters skills based on parent parameter. +func filterSkills(allSkills []schemaClass, parent string) ([]schemaClass, error) { + if parent != "" { + return filterChildItems(allSkills, parent) + } + + return extractTopLevelCategories(allSkills), nil +} + +// convertToSkillItems converts generic schema items to SkillItem type. +func convertToSkillItems(items []schemaClass) []SkillItem { + skills := make([]SkillItem, len(items)) + + for i, item := range items { + skills[i] = SkillItem(item) + } + + return skills +} diff --git a/mcp/tools/get_schema_skills_test.go b/mcp/tools/get_schema_skills_test.go index 54ad20952..52b301c5d 100644 --- a/mcp/tools/get_schema_skills_test.go +++ b/mcp/tools/get_schema_skills_test.go @@ -1,194 +1,194 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:dupl // Intentional duplication with domains test for separate domain/skill testing -package tools - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestGetSchemaSkills(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - tests := []struct { - name string - input GetSchemaSkillsInput - expectError bool - expectSkills bool - checkCallback func(t *testing.T, output GetSchemaSkillsOutput) - }{ - { - name: "Get top-level skills for version 0.7.0", - input: GetSchemaSkillsInput{ - Version: "0.7.0", - }, - expectError: false, - expectSkills: true, - checkCallback: func(t *testing.T, output GetSchemaSkillsOutput) { - t.Helper() - - assert.Equal(t, "0.7.0", output.Version) - assert.Empty(t, output.ErrorMessage) - assert.NotEmpty(t, output.Skills) - - // Check that top-level skills have expected fields - for _, skill := range output.Skills { - assert.NotEmpty(t, skill.Name, "Each skill should have a name") - } - }, - }, - { - name: "Get sub-skills for a parent skill", - input: GetSchemaSkillsInput{ - Version: "0.7.0", - ParentSkill: "retrieval_augmented_generation", - }, - expectError: false, - expectSkills: true, - checkCallback: func(t *testing.T, output GetSchemaSkillsOutput) { - t.Helper() - - assert.Equal(t, "0.7.0", output.Version) - assert.Equal(t, "retrieval_augmented_generation", output.ParentSkill) - assert.Empty(t, output.ErrorMessage) - assert.NotEmpty(t, output.Skills) - - // All returned skills should be sub-skills - for _, skill := range output.Skills { - assert.NotEmpty(t, skill.Name, "Each sub-skill should have a name") - } - }, - }, - { - name: "Invalid version", - input: GetSchemaSkillsInput{ - Version: "99.99.99", - }, - expectError: false, - expectSkills: false, - checkCallback: func(t *testing.T, output GetSchemaSkillsOutput) { - t.Helper() - - assert.NotEmpty(t, output.ErrorMessage) - assert.Contains(t, output.ErrorMessage, "invalid version") - assert.NotEmpty(t, output.AvailableVersions) - }, - }, - { - name: "Missing version parameter", - input: GetSchemaSkillsInput{ - Version: "", - }, - expectError: false, - expectSkills: false, - checkCallback: func(t *testing.T, output GetSchemaSkillsOutput) { - t.Helper() - - assert.NotEmpty(t, output.ErrorMessage) - assert.Contains(t, output.ErrorMessage, "version parameter is required") - assert.NotEmpty(t, output.AvailableVersions) - }, - }, - { - name: "Non-existent parent skill", - input: GetSchemaSkillsInput{ - Version: "0.7.0", - ParentSkill: "non_existent_skill", - }, - expectError: false, - expectSkills: false, - checkCallback: func(t *testing.T, output GetSchemaSkillsOutput) { - t.Helper() - - assert.NotEmpty(t, output.ErrorMessage) - assert.Contains(t, output.ErrorMessage, "not found") - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - result, output, err := GetSchemaSkills(ctx, nil, tt.input) - - if tt.expectError { - require.Error(t, err) - - return - } - - require.NoError(t, err) - assert.Nil(t, result) // Tool handlers typically return nil for result - - if tt.expectSkills { - assert.NotEmpty(t, output.Skills, "Expected to receive skills") - } - - if tt.checkCallback != nil { - tt.checkCallback(t, output) - } - }) - } -} - -func TestParseItemFromSchemaForSkills(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - defMap map[string]interface{} - expected schemaClass - }{ - { - name: "Parse skill with name, caption (title), and ID", - defMap: map[string]interface{}{ - "title": "Test Skill Caption", - "properties": map[string]interface{}{ - "name": map[string]interface{}{ - "const": "test_skill", - }, - "id": map[string]interface{}{ - "const": float64(123), - }, - }, - }, - expected: schemaClass{ - Name: "test_skill", - Caption: "Test Skill Caption", - ID: 123, - }, - }, - { - name: "Parse skill with missing fields", - defMap: map[string]interface{}{ - "properties": map[string]interface{}{ - "name": map[string]interface{}{ - "const": "minimal_skill", - }, - }, - }, - expected: schemaClass{ - Name: "minimal_skill", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - result := parseItemFromSchema(tt.defMap) - assert.Equal(t, tt.expected.Name, result.Name) - assert.Equal(t, tt.expected.Caption, result.Caption) - assert.Equal(t, tt.expected.ID, result.ID) - }) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:dupl // Intentional duplication with domains test for separate domain/skill testing +package tools + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGetSchemaSkills(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + tests := []struct { + name string + input GetSchemaSkillsInput + expectError bool + expectSkills bool + checkCallback func(t *testing.T, output GetSchemaSkillsOutput) + }{ + { + name: "Get top-level skills for version 0.7.0", + input: GetSchemaSkillsInput{ + Version: "0.7.0", + }, + expectError: false, + expectSkills: true, + checkCallback: func(t *testing.T, output GetSchemaSkillsOutput) { + t.Helper() + + assert.Equal(t, "0.7.0", output.Version) + assert.Empty(t, output.ErrorMessage) + assert.NotEmpty(t, output.Skills) + + // Check that top-level skills have expected fields + for _, skill := range output.Skills { + assert.NotEmpty(t, skill.Name, "Each skill should have a name") + } + }, + }, + { + name: "Get sub-skills for a parent skill", + input: GetSchemaSkillsInput{ + Version: "0.7.0", + ParentSkill: "retrieval_augmented_generation", + }, + expectError: false, + expectSkills: true, + checkCallback: func(t *testing.T, output GetSchemaSkillsOutput) { + t.Helper() + + assert.Equal(t, "0.7.0", output.Version) + assert.Equal(t, "retrieval_augmented_generation", output.ParentSkill) + assert.Empty(t, output.ErrorMessage) + assert.NotEmpty(t, output.Skills) + + // All returned skills should be sub-skills + for _, skill := range output.Skills { + assert.NotEmpty(t, skill.Name, "Each sub-skill should have a name") + } + }, + }, + { + name: "Invalid version", + input: GetSchemaSkillsInput{ + Version: "99.99.99", + }, + expectError: false, + expectSkills: false, + checkCallback: func(t *testing.T, output GetSchemaSkillsOutput) { + t.Helper() + + assert.NotEmpty(t, output.ErrorMessage) + assert.Contains(t, output.ErrorMessage, "invalid version") + assert.NotEmpty(t, output.AvailableVersions) + }, + }, + { + name: "Missing version parameter", + input: GetSchemaSkillsInput{ + Version: "", + }, + expectError: false, + expectSkills: false, + checkCallback: func(t *testing.T, output GetSchemaSkillsOutput) { + t.Helper() + + assert.NotEmpty(t, output.ErrorMessage) + assert.Contains(t, output.ErrorMessage, "version parameter is required") + assert.NotEmpty(t, output.AvailableVersions) + }, + }, + { + name: "Non-existent parent skill", + input: GetSchemaSkillsInput{ + Version: "0.7.0", + ParentSkill: "non_existent_skill", + }, + expectError: false, + expectSkills: false, + checkCallback: func(t *testing.T, output GetSchemaSkillsOutput) { + t.Helper() + + assert.NotEmpty(t, output.ErrorMessage) + assert.Contains(t, output.ErrorMessage, "not found") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + result, output, err := GetSchemaSkills(ctx, nil, tt.input) + + if tt.expectError { + require.Error(t, err) + + return + } + + require.NoError(t, err) + assert.Nil(t, result) // Tool handlers typically return nil for result + + if tt.expectSkills { + assert.NotEmpty(t, output.Skills, "Expected to receive skills") + } + + if tt.checkCallback != nil { + tt.checkCallback(t, output) + } + }) + } +} + +func TestParseItemFromSchemaForSkills(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + defMap map[string]interface{} + expected schemaClass + }{ + { + name: "Parse skill with name, caption (title), and ID", + defMap: map[string]interface{}{ + "title": "Test Skill Caption", + "properties": map[string]interface{}{ + "name": map[string]interface{}{ + "const": "test_skill", + }, + "id": map[string]interface{}{ + "const": float64(123), + }, + }, + }, + expected: schemaClass{ + Name: "test_skill", + Caption: "Test Skill Caption", + ID: 123, + }, + }, + { + name: "Parse skill with missing fields", + defMap: map[string]interface{}{ + "properties": map[string]interface{}{ + "name": map[string]interface{}{ + "const": "minimal_skill", + }, + }, + }, + expected: schemaClass{ + Name: "minimal_skill", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + result := parseItemFromSchema(tt.defMap) + assert.Equal(t, tt.expected.Name, result.Name) + assert.Equal(t, tt.expected.Caption, result.Caption) + assert.Equal(t, tt.expected.ID, result.ID) + }) + } +} diff --git a/mcp/tools/get_schema_test.go b/mcp/tools/get_schema_test.go index 78d0ea4b6..9edb0b997 100644 --- a/mcp/tools/get_schema_test.go +++ b/mcp/tools/get_schema_test.go @@ -1,51 +1,51 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package tools - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestGetSchema(t *testing.T) { - t.Run("should return schema for valid version", func(t *testing.T) { - ctx := context.Background() - input := GetSchemaInput{Version: "0.7.0"} - - _, output, err := GetSchema(ctx, nil, input) - - require.NoError(t, err) - assert.Empty(t, output.ErrorMessage) - assert.Equal(t, "0.7.0", output.Version) - assert.NotEmpty(t, output.Schema) - assert.Greater(t, len(output.Schema), 100, "Schema should be a reasonable size") - assert.NotEmpty(t, output.AvailableVersions) - }) - - t.Run("should return error for invalid version", func(t *testing.T) { - ctx := context.Background() - input := GetSchemaInput{Version: "99.99.99"} - - _, output, err := GetSchema(ctx, nil, input) - - require.NoError(t, err) - assert.NotEmpty(t, output.ErrorMessage) - assert.Contains(t, output.ErrorMessage, "Invalid version") - assert.Empty(t, output.Schema) - }) - - t.Run("should return error for empty version", func(t *testing.T) { - ctx := context.Background() - input := GetSchemaInput{Version: ""} - - _, output, err := GetSchema(ctx, nil, input) - - require.NoError(t, err) - assert.NotEmpty(t, output.ErrorMessage) - assert.Contains(t, output.ErrorMessage, "required") - }) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package tools + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGetSchema(t *testing.T) { + t.Run("should return schema for valid version", func(t *testing.T) { + ctx := context.Background() + input := GetSchemaInput{Version: "0.7.0"} + + _, output, err := GetSchema(ctx, nil, input) + + require.NoError(t, err) + assert.Empty(t, output.ErrorMessage) + assert.Equal(t, "0.7.0", output.Version) + assert.NotEmpty(t, output.Schema) + assert.Greater(t, len(output.Schema), 100, "Schema should be a reasonable size") + assert.NotEmpty(t, output.AvailableVersions) + }) + + t.Run("should return error for invalid version", func(t *testing.T) { + ctx := context.Background() + input := GetSchemaInput{Version: "99.99.99"} + + _, output, err := GetSchema(ctx, nil, input) + + require.NoError(t, err) + assert.NotEmpty(t, output.ErrorMessage) + assert.Contains(t, output.ErrorMessage, "Invalid version") + assert.Empty(t, output.Schema) + }) + + t.Run("should return error for empty version", func(t *testing.T) { + ctx := context.Background() + input := GetSchemaInput{Version: ""} + + _, output, err := GetSchema(ctx, nil, input) + + require.NoError(t, err) + assert.NotEmpty(t, output.ErrorMessage) + assert.Contains(t, output.ErrorMessage, "required") + }) +} diff --git a/mcp/tools/import_record.go b/mcp/tools/import_record.go index d654131f8..1c7636edf 100644 --- a/mcp/tools/import_record.go +++ b/mcp/tools/import_record.go @@ -1,103 +1,103 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package tools - -import ( - "context" - "fmt" - "strings" - - "github.com/agntcy/oasf-sdk/pkg/translator" - "github.com/modelcontextprotocol/go-sdk/mcp" - "google.golang.org/protobuf/encoding/protojson" - "google.golang.org/protobuf/types/known/structpb" -) - -// ImportRecordInput defines the input parameters for importing a record. -type ImportRecordInput struct { - SourceData string `json:"source_data" jsonschema:"JSON string of the source data to import (required)"` - SourceFormat string `json:"source_format" jsonschema:"Source format to import from (e.g., 'mcp') (required)"` -} - -// ImportRecordOutput defines the output of importing a record. -type ImportRecordOutput struct { - RecordJSON string `json:"record_json,omitempty" jsonschema:"The imported OASF record (JSON string)"` - ErrorMessage string `json:"error_message,omitempty" jsonschema:"Error message if import failed"` -} - -// ImportRecord imports data from a different format to an OASF agent record using the OASF SDK translator. -// Currently supported formats: -// - "mcp": Model Context Protocol format. -// - "a2a": Agent-to-Agent (A2A) format. -func ImportRecord(ctx context.Context, _ *mcp.CallToolRequest, input ImportRecordInput) ( - *mcp.CallToolResult, - ImportRecordOutput, - error, -) { - // Validate input - if input.SourceData == "" { - return nil, ImportRecordOutput{ - ErrorMessage: "source_data is required", - }, nil - } - - if input.SourceFormat == "" { - return nil, ImportRecordOutput{ - ErrorMessage: "source_format is required", - }, nil - } - - // Parse the source data into a structpb.Struct - var sourceStruct structpb.Struct - if err := protojson.Unmarshal([]byte(input.SourceData), &sourceStruct); err != nil { - return nil, ImportRecordOutput{ - ErrorMessage: fmt.Sprintf("Failed to parse source data JSON: %v", err), - }, nil - } - - // Normalize the source format to lowercase for comparison - sourceFormat := strings.ToLower(strings.TrimSpace(input.SourceFormat)) - - // Import based on source format - var recordStruct *structpb.Struct - - var err error - - switch sourceFormat { - case "mcp": - recordStruct, err = translator.MCPToRecord(&sourceStruct) - if err != nil { - return nil, ImportRecordOutput{ - ErrorMessage: fmt.Sprintf("Failed to import from MCP format: %v", err), - }, nil - } - - case "a2a": - recordStruct, err = translator.A2AToRecord(&sourceStruct) - if err != nil { - return nil, ImportRecordOutput{ - ErrorMessage: fmt.Sprintf("Failed to import from A2A format: %v", err), - }, nil - } - - default: - return nil, ImportRecordOutput{ - ErrorMessage: fmt.Sprintf("Unsupported source format: %s. Supported formats: mcp, a2a", input.SourceFormat), - }, nil - } - - // Convert the record struct to JSON - recordJSON, err := protojson.MarshalOptions{ - Indent: " ", - }.Marshal(recordStruct) - if err != nil { - return nil, ImportRecordOutput{ - ErrorMessage: fmt.Sprintf("Failed to marshal record to JSON: %v", err), - }, nil - } - - return nil, ImportRecordOutput{ - RecordJSON: string(recordJSON), - }, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package tools + +import ( + "context" + "fmt" + "strings" + + "github.com/agntcy/oasf-sdk/pkg/translator" + "github.com/modelcontextprotocol/go-sdk/mcp" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/structpb" +) + +// ImportRecordInput defines the input parameters for importing a record. +type ImportRecordInput struct { + SourceData string `json:"source_data" jsonschema:"JSON string of the source data to import (required)"` + SourceFormat string `json:"source_format" jsonschema:"Source format to import from (e.g., 'mcp') (required)"` +} + +// ImportRecordOutput defines the output of importing a record. +type ImportRecordOutput struct { + RecordJSON string `json:"record_json,omitempty" jsonschema:"The imported OASF record (JSON string)"` + ErrorMessage string `json:"error_message,omitempty" jsonschema:"Error message if import failed"` +} + +// ImportRecord imports data from a different format to an OASF agent record using the OASF SDK translator. +// Currently supported formats: +// - "mcp": Model Context Protocol format. +// - "a2a": Agent-to-Agent (A2A) format. +func ImportRecord(ctx context.Context, _ *mcp.CallToolRequest, input ImportRecordInput) ( + *mcp.CallToolResult, + ImportRecordOutput, + error, +) { + // Validate input + if input.SourceData == "" { + return nil, ImportRecordOutput{ + ErrorMessage: "source_data is required", + }, nil + } + + if input.SourceFormat == "" { + return nil, ImportRecordOutput{ + ErrorMessage: "source_format is required", + }, nil + } + + // Parse the source data into a structpb.Struct + var sourceStruct structpb.Struct + if err := protojson.Unmarshal([]byte(input.SourceData), &sourceStruct); err != nil { + return nil, ImportRecordOutput{ + ErrorMessage: fmt.Sprintf("Failed to parse source data JSON: %v", err), + }, nil + } + + // Normalize the source format to lowercase for comparison + sourceFormat := strings.ToLower(strings.TrimSpace(input.SourceFormat)) + + // Import based on source format + var recordStruct *structpb.Struct + + var err error + + switch sourceFormat { + case "mcp": + recordStruct, err = translator.MCPToRecord(&sourceStruct) + if err != nil { + return nil, ImportRecordOutput{ + ErrorMessage: fmt.Sprintf("Failed to import from MCP format: %v", err), + }, nil + } + + case "a2a": + recordStruct, err = translator.A2AToRecord(&sourceStruct) + if err != nil { + return nil, ImportRecordOutput{ + ErrorMessage: fmt.Sprintf("Failed to import from A2A format: %v", err), + }, nil + } + + default: + return nil, ImportRecordOutput{ + ErrorMessage: fmt.Sprintf("Unsupported source format: %s. Supported formats: mcp, a2a", input.SourceFormat), + }, nil + } + + // Convert the record struct to JSON + recordJSON, err := protojson.MarshalOptions{ + Indent: " ", + }.Marshal(recordStruct) + if err != nil { + return nil, ImportRecordOutput{ + ErrorMessage: fmt.Sprintf("Failed to marshal record to JSON: %v", err), + }, nil + } + + return nil, ImportRecordOutput{ + RecordJSON: string(recordJSON), + }, nil +} diff --git a/mcp/tools/import_record_test.go b/mcp/tools/import_record_test.go index d8356fb8e..030e7b68d 100644 --- a/mcp/tools/import_record_test.go +++ b/mcp/tools/import_record_test.go @@ -1,136 +1,136 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:dupl // Test structure is similar to export_record_test but tests different functionality -package tools - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestImportRecord(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - t.Run("imports A2A format to OASF record", func(t *testing.T) { - t.Parallel() - - // Note: This test verifies that the A2A import path is invoked. - // Actual translation success depends on the source data having the required A2A structure, - // which is beyond the scope of this unit test. - - // Sample A2A card JSON - sourceData := `{ - "name": "test-agent", - "version": "1.0.0", - "description": "A test agent" - }` - - input := ImportRecordInput{ - SourceData: sourceData, - SourceFormat: "a2a", - } - - _, output, err := ImportRecord(ctx, nil, input) - - require.NoError(t, err) - // The import may fail if the source data doesn't have the required A2A structure, - // which is expected. The important part is that it attempts the import. - if output.ErrorMessage != "" { - assert.Contains(t, output.ErrorMessage, "Failed to import from A2A format") - } - }) - - t.Run("fails when source_data is empty", func(t *testing.T) { - t.Parallel() - - input := ImportRecordInput{ - SourceData: "", - SourceFormat: "a2a", - } - - _, output, err := ImportRecord(ctx, nil, input) - - require.NoError(t, err) - assert.Contains(t, output.ErrorMessage, "source_data is required") - assert.Empty(t, output.RecordJSON) - }) - - t.Run("fails when source_format is empty", func(t *testing.T) { - t.Parallel() - - input := ImportRecordInput{ - SourceData: `{"test": "data"}`, - SourceFormat: "", - } - - _, output, err := ImportRecord(ctx, nil, input) - - require.NoError(t, err) - assert.Contains(t, output.ErrorMessage, "source_format is required") - assert.Empty(t, output.RecordJSON) - }) - - t.Run("fails with unsupported source format", func(t *testing.T) { - t.Parallel() - - sourceData := `{ - "name": "test-data" - }` - - input := ImportRecordInput{ - SourceData: sourceData, - SourceFormat: "unsupported-format", - } - - _, output, err := ImportRecord(ctx, nil, input) - - require.NoError(t, err) - assert.Contains(t, output.ErrorMessage, "Unsupported source format") - assert.Contains(t, output.ErrorMessage, "unsupported-format") - assert.Empty(t, output.RecordJSON) - }) - - t.Run("fails with invalid JSON", func(t *testing.T) { - t.Parallel() - - input := ImportRecordInput{ - SourceData: `{invalid json}`, - SourceFormat: "a2a", - } - - _, output, err := ImportRecord(ctx, nil, input) - - require.NoError(t, err) - assert.Contains(t, output.ErrorMessage, "Failed to parse source data JSON") - assert.Empty(t, output.RecordJSON) - }) - - t.Run("handles case-insensitive source format", func(t *testing.T) { - t.Parallel() - - sourceData := `{ - "name": "test-agent", - "version": "1.0.0" - }` - - input := ImportRecordInput{ - SourceData: sourceData, - SourceFormat: "A2A", - } - - _, output, err := ImportRecord(ctx, nil, input) - - require.NoError(t, err) - // The test verifies that case-insensitive format is handled. - // Actual translation may fail if source data lacks required structure. - if output.ErrorMessage != "" { - assert.Contains(t, output.ErrorMessage, "Failed to import from A2A format") - } - }) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:dupl // Test structure is similar to export_record_test but tests different functionality +package tools + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestImportRecord(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + t.Run("imports A2A format to OASF record", func(t *testing.T) { + t.Parallel() + + // Note: This test verifies that the A2A import path is invoked. + // Actual translation success depends on the source data having the required A2A structure, + // which is beyond the scope of this unit test. + + // Sample A2A card JSON + sourceData := `{ + "name": "test-agent", + "version": "1.0.0", + "description": "A test agent" + }` + + input := ImportRecordInput{ + SourceData: sourceData, + SourceFormat: "a2a", + } + + _, output, err := ImportRecord(ctx, nil, input) + + require.NoError(t, err) + // The import may fail if the source data doesn't have the required A2A structure, + // which is expected. The important part is that it attempts the import. + if output.ErrorMessage != "" { + assert.Contains(t, output.ErrorMessage, "Failed to import from A2A format") + } + }) + + t.Run("fails when source_data is empty", func(t *testing.T) { + t.Parallel() + + input := ImportRecordInput{ + SourceData: "", + SourceFormat: "a2a", + } + + _, output, err := ImportRecord(ctx, nil, input) + + require.NoError(t, err) + assert.Contains(t, output.ErrorMessage, "source_data is required") + assert.Empty(t, output.RecordJSON) + }) + + t.Run("fails when source_format is empty", func(t *testing.T) { + t.Parallel() + + input := ImportRecordInput{ + SourceData: `{"test": "data"}`, + SourceFormat: "", + } + + _, output, err := ImportRecord(ctx, nil, input) + + require.NoError(t, err) + assert.Contains(t, output.ErrorMessage, "source_format is required") + assert.Empty(t, output.RecordJSON) + }) + + t.Run("fails with unsupported source format", func(t *testing.T) { + t.Parallel() + + sourceData := `{ + "name": "test-data" + }` + + input := ImportRecordInput{ + SourceData: sourceData, + SourceFormat: "unsupported-format", + } + + _, output, err := ImportRecord(ctx, nil, input) + + require.NoError(t, err) + assert.Contains(t, output.ErrorMessage, "Unsupported source format") + assert.Contains(t, output.ErrorMessage, "unsupported-format") + assert.Empty(t, output.RecordJSON) + }) + + t.Run("fails with invalid JSON", func(t *testing.T) { + t.Parallel() + + input := ImportRecordInput{ + SourceData: `{invalid json}`, + SourceFormat: "a2a", + } + + _, output, err := ImportRecord(ctx, nil, input) + + require.NoError(t, err) + assert.Contains(t, output.ErrorMessage, "Failed to parse source data JSON") + assert.Empty(t, output.RecordJSON) + }) + + t.Run("handles case-insensitive source format", func(t *testing.T) { + t.Parallel() + + sourceData := `{ + "name": "test-agent", + "version": "1.0.0" + }` + + input := ImportRecordInput{ + SourceData: sourceData, + SourceFormat: "A2A", + } + + _, output, err := ImportRecord(ctx, nil, input) + + require.NoError(t, err) + // The test verifies that case-insensitive format is handled. + // Actual translation may fail if source data lacks required structure. + if output.ErrorMessage != "" { + assert.Contains(t, output.ErrorMessage, "Failed to import from A2A format") + } + }) +} diff --git a/mcp/tools/list_versions.go b/mcp/tools/list_versions.go index 562ba6b82..c6e5a5cf2 100644 --- a/mcp/tools/list_versions.go +++ b/mcp/tools/list_versions.go @@ -1,46 +1,46 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package tools - -import ( - "context" - "fmt" - - "github.com/agntcy/oasf-sdk/pkg/validator" - "github.com/modelcontextprotocol/go-sdk/mcp" -) - -// ListVersionsInput represents the input for listing available OASF schema versions. -type ListVersionsInput struct { - // No input parameters needed -} - -// ListVersionsOutput represents the output after listing available OASF schema versions. -type ListVersionsOutput struct { - AvailableVersions []string `json:"available_versions" jsonschema:"List of available OASF schema versions"` - Count int `json:"count" jsonschema:"Number of available schema versions"` - ErrorMessage string `json:"error_message,omitempty" jsonschema:"Error message if version listing failed"` -} - -// ListVersions retrieves the list of available OASF schema versions. -// This tool provides a simple way to discover what schema versions are supported. -func ListVersions(_ context.Context, _ *mcp.CallToolRequest, _ ListVersionsInput) ( - *mcp.CallToolResult, - ListVersionsOutput, - error, -) { - // Get available schema versions from the OASF SDK - availableVersions, err := validator.GetAvailableSchemaVersions() - if err != nil { - return nil, ListVersionsOutput{ - ErrorMessage: fmt.Sprintf("Failed to get available schema versions: %v", err), - }, nil - } - - // Return the available versions - return nil, ListVersionsOutput{ - AvailableVersions: availableVersions, - Count: len(availableVersions), - }, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package tools + +import ( + "context" + "fmt" + + "github.com/agntcy/oasf-sdk/pkg/validator" + "github.com/modelcontextprotocol/go-sdk/mcp" +) + +// ListVersionsInput represents the input for listing available OASF schema versions. +type ListVersionsInput struct { + // No input parameters needed +} + +// ListVersionsOutput represents the output after listing available OASF schema versions. +type ListVersionsOutput struct { + AvailableVersions []string `json:"available_versions" jsonschema:"List of available OASF schema versions"` + Count int `json:"count" jsonschema:"Number of available schema versions"` + ErrorMessage string `json:"error_message,omitempty" jsonschema:"Error message if version listing failed"` +} + +// ListVersions retrieves the list of available OASF schema versions. +// This tool provides a simple way to discover what schema versions are supported. +func ListVersions(_ context.Context, _ *mcp.CallToolRequest, _ ListVersionsInput) ( + *mcp.CallToolResult, + ListVersionsOutput, + error, +) { + // Get available schema versions from the OASF SDK + availableVersions, err := validator.GetAvailableSchemaVersions() + if err != nil { + return nil, ListVersionsOutput{ + ErrorMessage: fmt.Sprintf("Failed to get available schema versions: %v", err), + }, nil + } + + // Return the available versions + return nil, ListVersionsOutput{ + AvailableVersions: availableVersions, + Count: len(availableVersions), + }, nil +} diff --git a/mcp/tools/list_versions_test.go b/mcp/tools/list_versions_test.go index b85ebc07a..1d185754d 100644 --- a/mcp/tools/list_versions_test.go +++ b/mcp/tools/list_versions_test.go @@ -1,38 +1,38 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package tools - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestListVersions(t *testing.T) { - t.Run("should return available versions", func(t *testing.T) { - ctx := context.Background() - input := ListVersionsInput{} - - _, output, err := ListVersions(ctx, nil, input) - - require.NoError(t, err) - assert.Empty(t, output.ErrorMessage) - assert.NotEmpty(t, output.AvailableVersions) - assert.Positive(t, output.Count) - assert.Len(t, output.AvailableVersions, output.Count) - }) - - t.Run("should include known versions", func(t *testing.T) { - ctx := context.Background() - input := ListVersionsInput{} - - _, output, err := ListVersions(ctx, nil, input) - - require.NoError(t, err) - assert.Contains(t, output.AvailableVersions, "0.7.0") - assert.Contains(t, output.AvailableVersions, "0.3.1") - }) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package tools + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestListVersions(t *testing.T) { + t.Run("should return available versions", func(t *testing.T) { + ctx := context.Background() + input := ListVersionsInput{} + + _, output, err := ListVersions(ctx, nil, input) + + require.NoError(t, err) + assert.Empty(t, output.ErrorMessage) + assert.NotEmpty(t, output.AvailableVersions) + assert.Positive(t, output.Count) + assert.Len(t, output.AvailableVersions, output.Count) + }) + + t.Run("should include known versions", func(t *testing.T) { + ctx := context.Background() + input := ListVersionsInput{} + + _, output, err := ListVersions(ctx, nil, input) + + require.NoError(t, err) + assert.Contains(t, output.AvailableVersions, "0.7.0") + assert.Contains(t, output.AvailableVersions, "0.3.1") + }) +} diff --git a/mcp/tools/pull.go b/mcp/tools/pull.go index 3070ecc5d..add0e4dfd 100644 --- a/mcp/tools/pull.go +++ b/mcp/tools/pull.go @@ -1,79 +1,79 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package tools - -import ( - "context" - "fmt" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/client" - "github.com/modelcontextprotocol/go-sdk/mcp" - "google.golang.org/protobuf/encoding/protojson" -) - -// PullRecordInput defines the input parameters for pulling a record. -type PullRecordInput struct { - CID string `json:"cid" jsonschema:"Content Identifier (CID) of the record to pull (required)"` -} - -// PullRecordOutput defines the output of pulling a record. -type PullRecordOutput struct { - RecordData string `json:"record_data,omitempty" jsonschema:"The record data (JSON string)"` - ErrorMessage string `json:"error_message,omitempty" jsonschema:"Error message if pull failed"` -} - -// PullRecord pulls a record from the Directory by its CID. -func PullRecord(ctx context.Context, _ *mcp.CallToolRequest, input PullRecordInput) ( - *mcp.CallToolResult, - PullRecordOutput, - error, -) { - // Validate input - if input.CID == "" { - return nil, PullRecordOutput{ - ErrorMessage: "CID is required", - }, nil - } - - // Load client configuration - config, err := client.LoadConfig() - if err != nil { - return nil, PullRecordOutput{ - ErrorMessage: fmt.Sprintf("Failed to load client configuration: %v", err), - }, nil - } - - // Create Directory client - c, err := client.New(ctx, client.WithConfig(config)) - if err != nil { - return nil, PullRecordOutput{ - ErrorMessage: fmt.Sprintf("Failed to create Directory client: %v", err), - }, nil - } - defer c.Close() - - // Pull the record - record, err := c.Pull(ctx, &corev1.RecordRef{ - Cid: input.CID, - }) - if err != nil { - return nil, PullRecordOutput{ - ErrorMessage: fmt.Sprintf("Failed to pull record: %v", err), - }, nil - } - - // Marshal record data to JSON - recordData, err := protojson.Marshal(record.GetData()) - if err != nil { - return nil, PullRecordOutput{ - ErrorMessage: fmt.Sprintf("Failed to marshal record data: %v", err), - }, nil - } - - // Return output - return nil, PullRecordOutput{ - RecordData: string(recordData), - }, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package tools + +import ( + "context" + "fmt" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/client" + "github.com/modelcontextprotocol/go-sdk/mcp" + "google.golang.org/protobuf/encoding/protojson" +) + +// PullRecordInput defines the input parameters for pulling a record. +type PullRecordInput struct { + CID string `json:"cid" jsonschema:"Content Identifier (CID) of the record to pull (required)"` +} + +// PullRecordOutput defines the output of pulling a record. +type PullRecordOutput struct { + RecordData string `json:"record_data,omitempty" jsonschema:"The record data (JSON string)"` + ErrorMessage string `json:"error_message,omitempty" jsonschema:"Error message if pull failed"` +} + +// PullRecord pulls a record from the Directory by its CID. +func PullRecord(ctx context.Context, _ *mcp.CallToolRequest, input PullRecordInput) ( + *mcp.CallToolResult, + PullRecordOutput, + error, +) { + // Validate input + if input.CID == "" { + return nil, PullRecordOutput{ + ErrorMessage: "CID is required", + }, nil + } + + // Load client configuration + config, err := client.LoadConfig() + if err != nil { + return nil, PullRecordOutput{ + ErrorMessage: fmt.Sprintf("Failed to load client configuration: %v", err), + }, nil + } + + // Create Directory client + c, err := client.New(ctx, client.WithConfig(config)) + if err != nil { + return nil, PullRecordOutput{ + ErrorMessage: fmt.Sprintf("Failed to create Directory client: %v", err), + }, nil + } + defer c.Close() + + // Pull the record + record, err := c.Pull(ctx, &corev1.RecordRef{ + Cid: input.CID, + }) + if err != nil { + return nil, PullRecordOutput{ + ErrorMessage: fmt.Sprintf("Failed to pull record: %v", err), + }, nil + } + + // Marshal record data to JSON + recordData, err := protojson.Marshal(record.GetData()) + if err != nil { + return nil, PullRecordOutput{ + ErrorMessage: fmt.Sprintf("Failed to marshal record data: %v", err), + }, nil + } + + // Return output + return nil, PullRecordOutput{ + RecordData: string(recordData), + }, nil +} diff --git a/mcp/tools/pull_test.go b/mcp/tools/pull_test.go index e91c08a4d..2cb3618f3 100644 --- a/mcp/tools/pull_test.go +++ b/mcp/tools/pull_test.go @@ -1,50 +1,50 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package tools - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestPullRecordInputValidation(t *testing.T) { - tests := []struct { - name string - input PullRecordInput - expectError bool - }{ - { - name: "valid input", - input: PullRecordInput{ - CID: "bafkreiabcd1234567890", - }, - expectError: false, - }, - { - name: "missing CID", - input: PullRecordInput{ - CID: "", - }, - expectError: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Just test that marshaling works - _, err := json.Marshal(tt.input) - require.NoError(t, err) - - // Validate CID requirement - if tt.expectError { - assert.Empty(t, tt.input.CID) - } else { - assert.NotEmpty(t, tt.input.CID) - } - }) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package tools + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPullRecordInputValidation(t *testing.T) { + tests := []struct { + name string + input PullRecordInput + expectError bool + }{ + { + name: "valid input", + input: PullRecordInput{ + CID: "bafkreiabcd1234567890", + }, + expectError: false, + }, + { + name: "missing CID", + input: PullRecordInput{ + CID: "", + }, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Just test that marshaling works + _, err := json.Marshal(tt.input) + require.NoError(t, err) + + // Validate CID requirement + if tt.expectError { + assert.Empty(t, tt.input.CID) + } else { + assert.NotEmpty(t, tt.input.CID) + } + }) + } +} diff --git a/mcp/tools/push_record.go b/mcp/tools/push_record.go index fbb64e0f5..2f3635709 100644 --- a/mcp/tools/push_record.go +++ b/mcp/tools/push_record.go @@ -1,86 +1,86 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package tools - -import ( - "context" - "fmt" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/client" - "github.com/modelcontextprotocol/go-sdk/mcp" -) - -// PushRecordInput defines the input parameters for the push_record tool. -type PushRecordInput struct { - RecordJSON string `json:"record_json" jsonschema:"JSON string of the OASF agent record to push to Directory server"` -} - -// PushRecordOutput defines the output structure for the push_record tool. -type PushRecordOutput struct { - CID string `json:"cid,omitempty" jsonschema:"Content identifier (CID) of the pushed record"` - ServerAddress string `json:"server_address,omitempty" jsonschema:"Directory server address where the record was pushed"` - ErrorMessage string `json:"error_message,omitempty" jsonschema:"Error message if push failed"` -} - -// PushRecord implements the agntcy_dir_push_record tool. -// It pushes an OASF agent record to a Directory server and returns the CID. -func PushRecord(ctx context.Context, _ *mcp.CallToolRequest, input PushRecordInput) ( - *mcp.CallToolResult, - PushRecordOutput, - error, -) { - // Load client configuration from environment variables - config, err := client.LoadConfig() - if err != nil { - return nil, PushRecordOutput{ - ErrorMessage: fmt.Sprintf("Failed to load client configuration: %v", err), - }, nil - } - - // Parse the record JSON - record, err := corev1.UnmarshalRecord([]byte(input.RecordJSON)) - if err != nil { - return nil, PushRecordOutput{ - ErrorMessage: fmt.Sprintf("Failed to parse record JSON: %v", err), - }, nil - } - - // Validate the record before pushing - valid, validationErrors, err := record.Validate(ctx) - if err != nil { - return nil, PushRecordOutput{ - ErrorMessage: fmt.Sprintf("Failed to validate record: %v", err), - }, nil - } - - if !valid { - return nil, PushRecordOutput{ - ErrorMessage: fmt.Sprintf("Record validation failed: %v", validationErrors), - }, nil - } - - // Create Directory client - c, err := client.New(ctx, client.WithConfig(config)) - if err != nil { - return nil, PushRecordOutput{ - ErrorMessage: fmt.Sprintf("Failed to create Directory client: %v", err), - }, nil - } - defer c.Close() - - // Push the record - recordRef, err := c.Push(ctx, record) - if err != nil { - return nil, PushRecordOutput{ - ErrorMessage: fmt.Sprintf("Failed to push record to server: %v", err), - }, nil - } - - // Return success with CID and server address - return nil, PushRecordOutput{ - CID: recordRef.GetCid(), - ServerAddress: config.ServerAddress, - }, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package tools + +import ( + "context" + "fmt" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/client" + "github.com/modelcontextprotocol/go-sdk/mcp" +) + +// PushRecordInput defines the input parameters for the push_record tool. +type PushRecordInput struct { + RecordJSON string `json:"record_json" jsonschema:"JSON string of the OASF agent record to push to Directory server"` +} + +// PushRecordOutput defines the output structure for the push_record tool. +type PushRecordOutput struct { + CID string `json:"cid,omitempty" jsonschema:"Content identifier (CID) of the pushed record"` + ServerAddress string `json:"server_address,omitempty" jsonschema:"Directory server address where the record was pushed"` + ErrorMessage string `json:"error_message,omitempty" jsonschema:"Error message if push failed"` +} + +// PushRecord implements the agntcy_dir_push_record tool. +// It pushes an OASF agent record to a Directory server and returns the CID. +func PushRecord(ctx context.Context, _ *mcp.CallToolRequest, input PushRecordInput) ( + *mcp.CallToolResult, + PushRecordOutput, + error, +) { + // Load client configuration from environment variables + config, err := client.LoadConfig() + if err != nil { + return nil, PushRecordOutput{ + ErrorMessage: fmt.Sprintf("Failed to load client configuration: %v", err), + }, nil + } + + // Parse the record JSON + record, err := corev1.UnmarshalRecord([]byte(input.RecordJSON)) + if err != nil { + return nil, PushRecordOutput{ + ErrorMessage: fmt.Sprintf("Failed to parse record JSON: %v", err), + }, nil + } + + // Validate the record before pushing + valid, validationErrors, err := record.Validate(ctx) + if err != nil { + return nil, PushRecordOutput{ + ErrorMessage: fmt.Sprintf("Failed to validate record: %v", err), + }, nil + } + + if !valid { + return nil, PushRecordOutput{ + ErrorMessage: fmt.Sprintf("Record validation failed: %v", validationErrors), + }, nil + } + + // Create Directory client + c, err := client.New(ctx, client.WithConfig(config)) + if err != nil { + return nil, PushRecordOutput{ + ErrorMessage: fmt.Sprintf("Failed to create Directory client: %v", err), + }, nil + } + defer c.Close() + + // Push the record + recordRef, err := c.Push(ctx, record) + if err != nil { + return nil, PushRecordOutput{ + ErrorMessage: fmt.Sprintf("Failed to push record to server: %v", err), + }, nil + } + + // Return success with CID and server address + return nil, PushRecordOutput{ + CID: recordRef.GetCid(), + ServerAddress: config.ServerAddress, + }, nil +} diff --git a/mcp/tools/push_record_test.go b/mcp/tools/push_record_test.go index 997d8454b..372a14163 100644 --- a/mcp/tools/push_record_test.go +++ b/mcp/tools/push_record_test.go @@ -1,94 +1,94 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package tools - -import ( - "context" - "testing" - - "github.com/modelcontextprotocol/go-sdk/mcp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestPushRecord(t *testing.T) { - tests := []struct { - name string - input PushRecordInput - wantError bool - wantErrorMsg string - validateOutput func(t *testing.T, output PushRecordOutput) - }{ - { - name: "invalid JSON", - input: PushRecordInput{ - RecordJSON: "invalid json", - }, - wantError: true, - wantErrorMsg: "Failed to parse record JSON", - }, - { - name: "empty JSON", - input: PushRecordInput{ - RecordJSON: "{}", - }, - wantError: true, - wantErrorMsg: "Failed to parse record JSON", - }, - { - name: "valid but invalid record structure", - input: PushRecordInput{ - RecordJSON: `{"invalid": "structure"}`, - }, - wantError: true, - wantErrorMsg: "Failed to parse record JSON", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - req := &mcp.CallToolRequest{} - - _, output, err := PushRecord(ctx, req, tt.input) - - if tt.wantError { - require.NoError(t, err) // PushRecord returns nil error, error in output - // result can be nil when there's an error - error info is in output.ErrorMessage - assert.Contains(t, output.ErrorMessage, tt.wantErrorMsg) - assert.Empty(t, output.CID) - } else { - if err != nil { - t.Errorf("PushRecord() unexpected error: %v", err) - } - - if tt.validateOutput != nil { - tt.validateOutput(t, output) - } - } - }) - } -} - -func TestPushRecord_InvalidRecord(t *testing.T) { - // Test with a record that will fail validation (missing required fields) - invalidRecordJSON := `{ - "schema_version": "0.7.0", - "name": "test-agent" - }` - - ctx := context.Background() - req := &mcp.CallToolRequest{} - - _, output, err := PushRecord(ctx, req, PushRecordInput{ - RecordJSON: invalidRecordJSON, - }) - - require.NoError(t, err) // PushRecord returns nil error, error in output - // result can be nil when there's an error - error info is in output.ErrorMessage - // Should have error message about validation failure - assert.NotEmpty(t, output.ErrorMessage) - assert.Contains(t, output.ErrorMessage, "validation") - assert.Empty(t, output.CID) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package tools + +import ( + "context" + "testing" + + "github.com/modelcontextprotocol/go-sdk/mcp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPushRecord(t *testing.T) { + tests := []struct { + name string + input PushRecordInput + wantError bool + wantErrorMsg string + validateOutput func(t *testing.T, output PushRecordOutput) + }{ + { + name: "invalid JSON", + input: PushRecordInput{ + RecordJSON: "invalid json", + }, + wantError: true, + wantErrorMsg: "Failed to parse record JSON", + }, + { + name: "empty JSON", + input: PushRecordInput{ + RecordJSON: "{}", + }, + wantError: true, + wantErrorMsg: "Failed to parse record JSON", + }, + { + name: "valid but invalid record structure", + input: PushRecordInput{ + RecordJSON: `{"invalid": "structure"}`, + }, + wantError: true, + wantErrorMsg: "Failed to parse record JSON", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + req := &mcp.CallToolRequest{} + + _, output, err := PushRecord(ctx, req, tt.input) + + if tt.wantError { + require.NoError(t, err) // PushRecord returns nil error, error in output + // result can be nil when there's an error - error info is in output.ErrorMessage + assert.Contains(t, output.ErrorMessage, tt.wantErrorMsg) + assert.Empty(t, output.CID) + } else { + if err != nil { + t.Errorf("PushRecord() unexpected error: %v", err) + } + + if tt.validateOutput != nil { + tt.validateOutput(t, output) + } + } + }) + } +} + +func TestPushRecord_InvalidRecord(t *testing.T) { + // Test with a record that will fail validation (missing required fields) + invalidRecordJSON := `{ + "schema_version": "0.7.0", + "name": "test-agent" + }` + + ctx := context.Background() + req := &mcp.CallToolRequest{} + + _, output, err := PushRecord(ctx, req, PushRecordInput{ + RecordJSON: invalidRecordJSON, + }) + + require.NoError(t, err) // PushRecord returns nil error, error in output + // result can be nil when there's an error - error info is in output.ErrorMessage + // Should have error message about validation failure + assert.NotEmpty(t, output.ErrorMessage) + assert.Contains(t, output.ErrorMessage, "validation") + assert.Empty(t, output.CID) +} diff --git a/mcp/tools/schema_helpers.go b/mcp/tools/schema_helpers.go index 7fdec0f0c..45da681f2 100644 --- a/mcp/tools/schema_helpers.go +++ b/mcp/tools/schema_helpers.go @@ -1,152 +1,152 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package tools - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/agntcy/oasf-sdk/pkg/validator" -) - -// schemaClass represents a generic schema class (domain or skill). -type schemaClass struct { - Name string - Caption string - ID int -} - -// validateVersion checks if the provided version is valid and returns available versions. -func validateVersion(version string) ([]string, error) { - availableVersions, err := validator.GetAvailableSchemaVersions() - if err != nil { - return nil, fmt.Errorf("failed to get available schema versions: %w", err) - } - - if version == "" { - return availableVersions, fmt.Errorf("version parameter is required. Available versions: %s", - strings.Join(availableVersions, ", ")) - } - - versionValid := false - - for _, v := range availableVersions { - if version == v { - versionValid = true - - break - } - } - - if !versionValid { - return availableVersions, fmt.Errorf("invalid version '%s'. Available versions: %s", - version, strings.Join(availableVersions, ", ")) - } - - return availableVersions, nil -} - -// parseSchemaData parses JSON schema data into a list of schema items. -func parseSchemaData(data []byte, parseFunc func(map[string]interface{}) schemaClass) ([]schemaClass, error) { - var schemaData map[string]interface{} - if err := json.Unmarshal(data, &schemaData); err != nil { - return nil, fmt.Errorf("failed to parse schema data: %w", err) - } - - var items []schemaClass - - for _, itemDef := range schemaData { - defMap, ok := itemDef.(map[string]interface{}) - if !ok { - continue - } - - item := parseFunc(defMap) - if item.Name != "" { - items = append(items, item) - } - } - - return items, nil -} - -// filterChildItems returns child items that are direct descendants of the parent. -func filterChildItems(allItems []schemaClass, parent string) ([]schemaClass, error) { - prefix := parent + "/" - - var children []schemaClass - - for _, item := range allItems { - if !strings.HasPrefix(item.Name, prefix) { - continue - } - - remainder := strings.TrimPrefix(item.Name, prefix) - if !strings.Contains(remainder, "/") { - children = append(children, item) - } - } - - if len(children) == 0 { - return nil, fmt.Errorf("parent '%s' not found or has no children", parent) - } - - return children, nil -} - -// extractTopLevelCategories extracts unique top-level parent categories from items. -func extractTopLevelCategories(allItems []schemaClass) []schemaClass { - parentCategories := make(map[string]bool) - topLevel := make([]schemaClass, 0, len(allItems)) - - for _, item := range allItems { - idx := strings.Index(item.Name, "/") - if idx <= 0 { - continue - } - - parentCategory := item.Name[:idx] - if parentCategories[parentCategory] { - continue - } - - parentCategories[parentCategory] = true - topLevel = append(topLevel, schemaClass{Name: parentCategory}) - } - - return topLevel -} - -// parseItemFromSchema extracts schema item information from the schema definition. -func parseItemFromSchema(defMap map[string]interface{}) schemaClass { - item := schemaClass{} - - // Extract title for caption - if title, ok := defMap["title"].(string); ok { - item.Caption = title - } - - // Extract properties - props, ok := defMap["properties"].(map[string]interface{}) - if !ok { - return item - } - - // Extract name - if nameField, ok := props["name"].(map[string]interface{}); ok { - if constVal, ok := nameField["const"].(string); ok { - item.Name = constVal - } - } - - // Extract ID - if idField, ok := props["id"].(map[string]interface{}); ok { - if constVal, ok := idField["const"].(float64); ok { - item.ID = int(constVal) - } - } - - return item -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package tools + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/agntcy/oasf-sdk/pkg/validator" +) + +// schemaClass represents a generic schema class (domain or skill). +type schemaClass struct { + Name string + Caption string + ID int +} + +// validateVersion checks if the provided version is valid and returns available versions. +func validateVersion(version string) ([]string, error) { + availableVersions, err := validator.GetAvailableSchemaVersions() + if err != nil { + return nil, fmt.Errorf("failed to get available schema versions: %w", err) + } + + if version == "" { + return availableVersions, fmt.Errorf("version parameter is required. Available versions: %s", + strings.Join(availableVersions, ", ")) + } + + versionValid := false + + for _, v := range availableVersions { + if version == v { + versionValid = true + + break + } + } + + if !versionValid { + return availableVersions, fmt.Errorf("invalid version '%s'. Available versions: %s", + version, strings.Join(availableVersions, ", ")) + } + + return availableVersions, nil +} + +// parseSchemaData parses JSON schema data into a list of schema items. +func parseSchemaData(data []byte, parseFunc func(map[string]interface{}) schemaClass) ([]schemaClass, error) { + var schemaData map[string]interface{} + if err := json.Unmarshal(data, &schemaData); err != nil { + return nil, fmt.Errorf("failed to parse schema data: %w", err) + } + + var items []schemaClass + + for _, itemDef := range schemaData { + defMap, ok := itemDef.(map[string]interface{}) + if !ok { + continue + } + + item := parseFunc(defMap) + if item.Name != "" { + items = append(items, item) + } + } + + return items, nil +} + +// filterChildItems returns child items that are direct descendants of the parent. +func filterChildItems(allItems []schemaClass, parent string) ([]schemaClass, error) { + prefix := parent + "/" + + var children []schemaClass + + for _, item := range allItems { + if !strings.HasPrefix(item.Name, prefix) { + continue + } + + remainder := strings.TrimPrefix(item.Name, prefix) + if !strings.Contains(remainder, "/") { + children = append(children, item) + } + } + + if len(children) == 0 { + return nil, fmt.Errorf("parent '%s' not found or has no children", parent) + } + + return children, nil +} + +// extractTopLevelCategories extracts unique top-level parent categories from items. +func extractTopLevelCategories(allItems []schemaClass) []schemaClass { + parentCategories := make(map[string]bool) + topLevel := make([]schemaClass, 0, len(allItems)) + + for _, item := range allItems { + idx := strings.Index(item.Name, "/") + if idx <= 0 { + continue + } + + parentCategory := item.Name[:idx] + if parentCategories[parentCategory] { + continue + } + + parentCategories[parentCategory] = true + topLevel = append(topLevel, schemaClass{Name: parentCategory}) + } + + return topLevel +} + +// parseItemFromSchema extracts schema item information from the schema definition. +func parseItemFromSchema(defMap map[string]interface{}) schemaClass { + item := schemaClass{} + + // Extract title for caption + if title, ok := defMap["title"].(string); ok { + item.Caption = title + } + + // Extract properties + props, ok := defMap["properties"].(map[string]interface{}) + if !ok { + return item + } + + // Extract name + if nameField, ok := props["name"].(map[string]interface{}); ok { + if constVal, ok := nameField["const"].(string); ok { + item.Name = constVal + } + } + + // Extract ID + if idField, ok := props["id"].(map[string]interface{}); ok { + if constVal, ok := idField["const"].(float64); ok { + item.ID = int(constVal) + } + } + + return item +} diff --git a/mcp/tools/search.go b/mcp/tools/search.go index 28d889506..2f8d28968 100644 --- a/mcp/tools/search.go +++ b/mcp/tools/search.go @@ -1,259 +1,259 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package tools - -import ( - "context" - "fmt" - - searchv1 "github.com/agntcy/dir/api/search/v1" - "github.com/agntcy/dir/client" - "github.com/modelcontextprotocol/go-sdk/mcp" -) - -// SearchLocalInput defines the input parameters for local search. -type SearchLocalInput struct { - Limit int `json:"limit,omitempty" jsonschema:"Maximum number of results to return (default: 100 max: 1000)"` - Offset int `json:"offset,omitempty" jsonschema:"Pagination offset (default: 0)"` - Names []string `json:"names,omitempty" jsonschema:"Agent name patterns (supports wildcards: * ? [])"` - Versions []string `json:"versions,omitempty" jsonschema:"Version patterns (supports wildcards: * ? [])"` - SkillIDs []string `json:"skill_ids,omitempty" jsonschema:"Skill ID patterns (exact match only)"` - SkillNames []string `json:"skill_names,omitempty" jsonschema:"Skill name patterns (supports wildcards: * ? [])"` - Locators []string `json:"locators,omitempty" jsonschema:"Locator patterns (supports wildcards: * ? [])"` - ModuleNames []string `json:"module_names,omitempty" jsonschema:"Module name patterns (supports wildcards: * ? [])"` - DomainIDs []string `json:"domain_ids,omitempty" jsonschema:"Domain ID patterns (exact match only)"` - DomainNames []string `json:"domain_names,omitempty" jsonschema:"Domain name patterns (supports wildcards: * ? [])"` - CreatedAts []string `json:"created_ats,omitempty" jsonschema:"Created_at timestamp patterns (supports wildcards: * ? [])"` - Authors []string `json:"authors,omitempty" jsonschema:"Author name patterns (supports wildcards: * ? [])"` - SchemaVersions []string `json:"schema_versions,omitempty" jsonschema:"Schema version patterns (supports wildcards: * ? [])"` - ModuleIDs []string `json:"module_ids,omitempty" jsonschema:"Module ID patterns (exact match only)"` -} - -// SearchLocalOutput defines the output of local search. -type SearchLocalOutput struct { - RecordCIDs []string `json:"record_cids,omitempty" jsonschema:"Array of matching record CIDs"` - Count int `json:"count" jsonschema:"Number of results returned"` - HasMore bool `json:"has_more" jsonschema:"Whether more results are available beyond the limit"` - ErrorMessage string `json:"error_message,omitempty" jsonschema:"Error message if search failed"` -} - -const ( - defaultLimit = 100 - maxLimit = 1000 -) - -// SearchLocal searches for agent records on the local directory node. -// -//nolint:cyclop -func SearchLocal(ctx context.Context, _ *mcp.CallToolRequest, input SearchLocalInput) ( - *mcp.CallToolResult, - SearchLocalOutput, - error, -) { - // Validate and set defaults - limit := defaultLimit - if input.Limit > 0 { - limit = input.Limit - if limit > maxLimit { - return nil, SearchLocalOutput{ - ErrorMessage: fmt.Sprintf("limit cannot exceed %d", maxLimit), - }, nil - } - } else if input.Limit < 0 { - return nil, SearchLocalOutput{ - ErrorMessage: "limit must be positive", - }, nil - } - - offset := 0 - if input.Offset > 0 { - offset = input.Offset - } else if input.Offset < 0 { - return nil, SearchLocalOutput{ - ErrorMessage: "offset cannot be negative", - }, nil - } - - // Build queries from input - queries := buildQueries(input) - if len(queries) == 0 { - return nil, SearchLocalOutput{ - ErrorMessage: "at least one query filter must be provided", - }, nil - } - - // Load client configuration - config, err := client.LoadConfig() - if err != nil { - return nil, SearchLocalOutput{ - ErrorMessage: fmt.Sprintf("Failed to load client configuration: %v", err), - }, nil - } - - // Create Directory client - c, err := client.New(ctx, client.WithConfig(config)) - if err != nil { - return nil, SearchLocalOutput{ - ErrorMessage: fmt.Sprintf("Failed to create Directory client: %v", err), - }, nil - } - defer c.Close() - - // Execute search - // Safe conversions: limit is capped at 1000, offset is validated by client - limit32 := uint32(limit) // #nosec G115 - offset32 := uint32(offset) // #nosec G115 - - result, err := c.SearchCIDs(ctx, &searchv1.SearchCIDsRequest{ - Limit: &limit32, - Offset: &offset32, - Queries: queries, - }) - if err != nil { - return nil, SearchLocalOutput{ - ErrorMessage: fmt.Sprintf("Search failed: %v", err), - }, nil - } - - // Collect results - recordCIDs := make([]string, 0, limit) - -L: - for { - select { - case resp := <-result.ResCh(): - cid := resp.GetRecordCid() - if cid != "" { - recordCIDs = append(recordCIDs, cid) - } - case err := <-result.ErrCh(): - return nil, SearchLocalOutput{ - ErrorMessage: fmt.Sprintf("Search stream error: %v", err), - }, nil - case <-result.DoneCh(): - break L - case <-ctx.Done(): - return nil, SearchLocalOutput{ - ErrorMessage: fmt.Sprintf("Search cancelled: %v", ctx.Err()), - }, nil - } - } - - // Determine if there are more results - hasMore := len(recordCIDs) == limit - - return nil, SearchLocalOutput{ - RecordCIDs: recordCIDs, - Count: len(recordCIDs), - HasMore: hasMore, - }, nil -} - -// buildQueries converts input filters to RecordQuery objects. -func buildQueries(input SearchLocalInput) []*searchv1.RecordQuery { - queries := make([]*searchv1.RecordQuery, 0, - len(input.Names)+len(input.Versions)+len(input.SkillIDs)+ - len(input.SkillNames)+len(input.Locators)+len(input.ModuleNames)+ - len(input.DomainIDs)+len(input.DomainNames)+ - len(input.CreatedAts)+len(input.Authors)+ - len(input.SchemaVersions)+len(input.ModuleIDs)) - - // Add name queries - for _, name := range input.Names { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_NAME, - Value: name, - }) - } - - // Add version queries - for _, version := range input.Versions { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_VERSION, - Value: version, - }) - } - - // Add skill-id queries - for _, skillID := range input.SkillIDs { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL_ID, - Value: skillID, - }) - } - - // Add skill-name queries - for _, skillName := range input.SkillNames { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL_NAME, - Value: skillName, - }) - } - - // Add locator queries - for _, locator := range input.Locators { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, - Value: locator, - }) - } - - // Add module name queries - for _, moduleName := range input.ModuleNames { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE_NAME, - Value: moduleName, - }) - } - - // Add domain-id queries - for _, domainID := range input.DomainIDs { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_ID, - Value: domainID, - }) - } - - // Add domain-name queries - for _, domainName := range input.DomainNames { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_NAME, - Value: domainName, - }) - } - - // Add created-at queries - for _, createdAt := range input.CreatedAts { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_CREATED_AT, - Value: createdAt, - }) - } - - // Add author queries - for _, author := range input.Authors { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_AUTHOR, - Value: author, - }) - } - - // Add schema-version queries - for _, schemaVersion := range input.SchemaVersions { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_SCHEMA_VERSION, - Value: schemaVersion, - }) - } - - // Add module-id queries - for _, moduleID := range input.ModuleIDs { - queries = append(queries, &searchv1.RecordQuery{ - Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE_ID, - Value: moduleID, - }) - } - - return queries -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package tools + +import ( + "context" + "fmt" + + searchv1 "github.com/agntcy/dir/api/search/v1" + "github.com/agntcy/dir/client" + "github.com/modelcontextprotocol/go-sdk/mcp" +) + +// SearchLocalInput defines the input parameters for local search. +type SearchLocalInput struct { + Limit int `json:"limit,omitempty" jsonschema:"Maximum number of results to return (default: 100 max: 1000)"` + Offset int `json:"offset,omitempty" jsonschema:"Pagination offset (default: 0)"` + Names []string `json:"names,omitempty" jsonschema:"Agent name patterns (supports wildcards: * ? [])"` + Versions []string `json:"versions,omitempty" jsonschema:"Version patterns (supports wildcards: * ? [])"` + SkillIDs []string `json:"skill_ids,omitempty" jsonschema:"Skill ID patterns (exact match only)"` + SkillNames []string `json:"skill_names,omitempty" jsonschema:"Skill name patterns (supports wildcards: * ? [])"` + Locators []string `json:"locators,omitempty" jsonschema:"Locator patterns (supports wildcards: * ? [])"` + ModuleNames []string `json:"module_names,omitempty" jsonschema:"Module name patterns (supports wildcards: * ? [])"` + DomainIDs []string `json:"domain_ids,omitempty" jsonschema:"Domain ID patterns (exact match only)"` + DomainNames []string `json:"domain_names,omitempty" jsonschema:"Domain name patterns (supports wildcards: * ? [])"` + CreatedAts []string `json:"created_ats,omitempty" jsonschema:"Created_at timestamp patterns (supports wildcards: * ? [])"` + Authors []string `json:"authors,omitempty" jsonschema:"Author name patterns (supports wildcards: * ? [])"` + SchemaVersions []string `json:"schema_versions,omitempty" jsonschema:"Schema version patterns (supports wildcards: * ? [])"` + ModuleIDs []string `json:"module_ids,omitempty" jsonschema:"Module ID patterns (exact match only)"` +} + +// SearchLocalOutput defines the output of local search. +type SearchLocalOutput struct { + RecordCIDs []string `json:"record_cids,omitempty" jsonschema:"Array of matching record CIDs"` + Count int `json:"count" jsonschema:"Number of results returned"` + HasMore bool `json:"has_more" jsonschema:"Whether more results are available beyond the limit"` + ErrorMessage string `json:"error_message,omitempty" jsonschema:"Error message if search failed"` +} + +const ( + defaultLimit = 100 + maxLimit = 1000 +) + +// SearchLocal searches for agent records on the local directory node. +// +//nolint:cyclop +func SearchLocal(ctx context.Context, _ *mcp.CallToolRequest, input SearchLocalInput) ( + *mcp.CallToolResult, + SearchLocalOutput, + error, +) { + // Validate and set defaults + limit := defaultLimit + if input.Limit > 0 { + limit = input.Limit + if limit > maxLimit { + return nil, SearchLocalOutput{ + ErrorMessage: fmt.Sprintf("limit cannot exceed %d", maxLimit), + }, nil + } + } else if input.Limit < 0 { + return nil, SearchLocalOutput{ + ErrorMessage: "limit must be positive", + }, nil + } + + offset := 0 + if input.Offset > 0 { + offset = input.Offset + } else if input.Offset < 0 { + return nil, SearchLocalOutput{ + ErrorMessage: "offset cannot be negative", + }, nil + } + + // Build queries from input + queries := buildQueries(input) + if len(queries) == 0 { + return nil, SearchLocalOutput{ + ErrorMessage: "at least one query filter must be provided", + }, nil + } + + // Load client configuration + config, err := client.LoadConfig() + if err != nil { + return nil, SearchLocalOutput{ + ErrorMessage: fmt.Sprintf("Failed to load client configuration: %v", err), + }, nil + } + + // Create Directory client + c, err := client.New(ctx, client.WithConfig(config)) + if err != nil { + return nil, SearchLocalOutput{ + ErrorMessage: fmt.Sprintf("Failed to create Directory client: %v", err), + }, nil + } + defer c.Close() + + // Execute search + // Safe conversions: limit is capped at 1000, offset is validated by client + limit32 := uint32(limit) // #nosec G115 + offset32 := uint32(offset) // #nosec G115 + + result, err := c.SearchCIDs(ctx, &searchv1.SearchCIDsRequest{ + Limit: &limit32, + Offset: &offset32, + Queries: queries, + }) + if err != nil { + return nil, SearchLocalOutput{ + ErrorMessage: fmt.Sprintf("Search failed: %v", err), + }, nil + } + + // Collect results + recordCIDs := make([]string, 0, limit) + +L: + for { + select { + case resp := <-result.ResCh(): + cid := resp.GetRecordCid() + if cid != "" { + recordCIDs = append(recordCIDs, cid) + } + case err := <-result.ErrCh(): + return nil, SearchLocalOutput{ + ErrorMessage: fmt.Sprintf("Search stream error: %v", err), + }, nil + case <-result.DoneCh(): + break L + case <-ctx.Done(): + return nil, SearchLocalOutput{ + ErrorMessage: fmt.Sprintf("Search cancelled: %v", ctx.Err()), + }, nil + } + } + + // Determine if there are more results + hasMore := len(recordCIDs) == limit + + return nil, SearchLocalOutput{ + RecordCIDs: recordCIDs, + Count: len(recordCIDs), + HasMore: hasMore, + }, nil +} + +// buildQueries converts input filters to RecordQuery objects. +func buildQueries(input SearchLocalInput) []*searchv1.RecordQuery { + queries := make([]*searchv1.RecordQuery, 0, + len(input.Names)+len(input.Versions)+len(input.SkillIDs)+ + len(input.SkillNames)+len(input.Locators)+len(input.ModuleNames)+ + len(input.DomainIDs)+len(input.DomainNames)+ + len(input.CreatedAts)+len(input.Authors)+ + len(input.SchemaVersions)+len(input.ModuleIDs)) + + // Add name queries + for _, name := range input.Names { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_NAME, + Value: name, + }) + } + + // Add version queries + for _, version := range input.Versions { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_VERSION, + Value: version, + }) + } + + // Add skill-id queries + for _, skillID := range input.SkillIDs { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL_ID, + Value: skillID, + }) + } + + // Add skill-name queries + for _, skillName := range input.SkillNames { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL_NAME, + Value: skillName, + }) + } + + // Add locator queries + for _, locator := range input.Locators { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, + Value: locator, + }) + } + + // Add module name queries + for _, moduleName := range input.ModuleNames { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE_NAME, + Value: moduleName, + }) + } + + // Add domain-id queries + for _, domainID := range input.DomainIDs { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_ID, + Value: domainID, + }) + } + + // Add domain-name queries + for _, domainName := range input.DomainNames { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_NAME, + Value: domainName, + }) + } + + // Add created-at queries + for _, createdAt := range input.CreatedAts { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_CREATED_AT, + Value: createdAt, + }) + } + + // Add author queries + for _, author := range input.Authors { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_AUTHOR, + Value: author, + }) + } + + // Add schema-version queries + for _, schemaVersion := range input.SchemaVersions { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_SCHEMA_VERSION, + Value: schemaVersion, + }) + } + + // Add module-id queries + for _, moduleID := range input.ModuleIDs { + queries = append(queries, &searchv1.RecordQuery{ + Type: searchv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE_ID, + Value: moduleID, + }) + } + + return queries +} diff --git a/mcp/tools/search_test.go b/mcp/tools/search_test.go index 4089da144..e25c1e029 100644 --- a/mcp/tools/search_test.go +++ b/mcp/tools/search_test.go @@ -1,109 +1,109 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package tools - -import ( - "testing" - - searchv1 "github.com/agntcy/dir/api/search/v1" - "github.com/stretchr/testify/assert" -) - -func TestBuildQueries(t *testing.T) { - tests := []struct { - name string - input SearchLocalInput - expected int - }{ - { - name: "single name query", - input: SearchLocalInput{ - Names: []string{"test-agent"}, - }, - expected: 1, - }, - { - name: "multiple query types", - input: SearchLocalInput{ - Names: []string{"agent-*"}, - Versions: []string{"v1.*"}, - SkillNames: []string{"*python*"}, - }, - expected: 3, - }, - { - name: "all query types", - input: SearchLocalInput{ - Names: []string{"agent1", "agent2"}, - Versions: []string{"v1.0.0"}, - SkillIDs: []string{"10201"}, - SkillNames: []string{"Python"}, - Locators: []string{"docker-image:*"}, - ModuleNames: []string{"core-module"}, - DomainIDs: []string{"604"}, - DomainNames: []string{"*education*"}, - }, - expected: 9, - }, - { - name: "no queries", - input: SearchLocalInput{}, - expected: 0, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - queries := buildQueries(tt.input) - assert.Len(t, queries, tt.expected) - }) - } -} - -func TestBuildQueriesTypes(t *testing.T) { - input := SearchLocalInput{ - Names: []string{"test-agent"}, - Versions: []string{"v1.0.0"}, - SkillIDs: []string{"10201"}, - SkillNames: []string{"Python"}, - Locators: []string{"docker-image:test"}, - ModuleNames: []string{"core"}, - DomainIDs: []string{"604"}, - DomainNames: []string{"*education*"}, - } - - queries := buildQueries(input) - assert.Len(t, queries, 8) - - // Verify query types are correctly mapped - expectedTypes := []searchv1.RecordQueryType{ - searchv1.RecordQueryType_RECORD_QUERY_TYPE_NAME, - searchv1.RecordQueryType_RECORD_QUERY_TYPE_VERSION, - searchv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL_ID, - searchv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL_NAME, - searchv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, - searchv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE_NAME, - searchv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_ID, - searchv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_NAME, - } - - for i, query := range queries { - assert.Equal(t, expectedTypes[i], query.GetType()) - } -} - -func TestBuildQueriesValues(t *testing.T) { - input := SearchLocalInput{ - Names: []string{"agent-*", "test-agent"}, - Versions: []string{"v1.*"}, - } - - queries := buildQueries(input) - assert.Len(t, queries, 3) - - // Verify values are preserved - assert.Equal(t, "agent-*", queries[0].GetValue()) - assert.Equal(t, "test-agent", queries[1].GetValue()) - assert.Equal(t, "v1.*", queries[2].GetValue()) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package tools + +import ( + "testing" + + searchv1 "github.com/agntcy/dir/api/search/v1" + "github.com/stretchr/testify/assert" +) + +func TestBuildQueries(t *testing.T) { + tests := []struct { + name string + input SearchLocalInput + expected int + }{ + { + name: "single name query", + input: SearchLocalInput{ + Names: []string{"test-agent"}, + }, + expected: 1, + }, + { + name: "multiple query types", + input: SearchLocalInput{ + Names: []string{"agent-*"}, + Versions: []string{"v1.*"}, + SkillNames: []string{"*python*"}, + }, + expected: 3, + }, + { + name: "all query types", + input: SearchLocalInput{ + Names: []string{"agent1", "agent2"}, + Versions: []string{"v1.0.0"}, + SkillIDs: []string{"10201"}, + SkillNames: []string{"Python"}, + Locators: []string{"docker-image:*"}, + ModuleNames: []string{"core-module"}, + DomainIDs: []string{"604"}, + DomainNames: []string{"*education*"}, + }, + expected: 9, + }, + { + name: "no queries", + input: SearchLocalInput{}, + expected: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + queries := buildQueries(tt.input) + assert.Len(t, queries, tt.expected) + }) + } +} + +func TestBuildQueriesTypes(t *testing.T) { + input := SearchLocalInput{ + Names: []string{"test-agent"}, + Versions: []string{"v1.0.0"}, + SkillIDs: []string{"10201"}, + SkillNames: []string{"Python"}, + Locators: []string{"docker-image:test"}, + ModuleNames: []string{"core"}, + DomainIDs: []string{"604"}, + DomainNames: []string{"*education*"}, + } + + queries := buildQueries(input) + assert.Len(t, queries, 8) + + // Verify query types are correctly mapped + expectedTypes := []searchv1.RecordQueryType{ + searchv1.RecordQueryType_RECORD_QUERY_TYPE_NAME, + searchv1.RecordQueryType_RECORD_QUERY_TYPE_VERSION, + searchv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL_ID, + searchv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL_NAME, + searchv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, + searchv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE_NAME, + searchv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_ID, + searchv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_NAME, + } + + for i, query := range queries { + assert.Equal(t, expectedTypes[i], query.GetType()) + } +} + +func TestBuildQueriesValues(t *testing.T) { + input := SearchLocalInput{ + Names: []string{"agent-*", "test-agent"}, + Versions: []string{"v1.*"}, + } + + queries := buildQueries(input) + assert.Len(t, queries, 3) + + // Verify values are preserved + assert.Equal(t, "agent-*", queries[0].GetValue()) + assert.Equal(t, "test-agent", queries[1].GetValue()) + assert.Equal(t, "v1.*", queries[2].GetValue()) +} diff --git a/mcp/tools/validate_record.go b/mcp/tools/validate_record.go index 672657f2f..f888f117f 100644 --- a/mcp/tools/validate_record.go +++ b/mcp/tools/validate_record.go @@ -1,62 +1,62 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package tools - -import ( - "context" - "fmt" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/modelcontextprotocol/go-sdk/mcp" -) - -// ValidateRecordInput represents the input for validating an agent record. -type ValidateRecordInput struct { - RecordJSON string `json:"record_json" jsonschema:"JSON string of the agent record to validate against OASF schema"` -} - -// ValidateRecordOutput represents the output after validating an agent record. -type ValidateRecordOutput struct { - Valid bool `json:"valid" jsonschema:"Whether the record is valid according to OASF schema validation"` - SchemaVersion string `json:"schema_version,omitempty" jsonschema:"Detected OASF schema version (e.g. 0.3.1 or 0.7.0)"` - ValidationErrors []string `json:"validation_errors,omitempty" jsonschema:"List of validation error messages. Only present if valid=false. Use these to fix the record"` - ErrorMessage string `json:"error_message,omitempty" jsonschema:"General error message if validation process failed"` -} - -// ValidateRecord validates an agent record against the OASF schema. -// This performs full OASF schema validation and returns detailed errors. -func ValidateRecord(ctx context.Context, _ *mcp.CallToolRequest, input ValidateRecordInput) ( - *mcp.CallToolResult, - ValidateRecordOutput, - error, -) { - // Try to unmarshal the JSON into a Record - record, err := corev1.UnmarshalRecord([]byte(input.RecordJSON)) - if err != nil { - return nil, ValidateRecordOutput{ - Valid: false, - ErrorMessage: fmt.Sprintf("Failed to parse record JSON: %v. Please ensure the JSON is valid and follows the OASF schema structure.", err), - }, nil - } - - // Get schema version - schemaVersion := record.GetSchemaVersion() - - // Validate the record using OASF SDK - valid, validationErrors, err := record.Validate(ctx) - if err != nil { - return nil, ValidateRecordOutput{ - Valid: false, - SchemaVersion: schemaVersion, - ErrorMessage: fmt.Sprintf("Validation error: %v", err), - }, nil - } - - // Return validation results - return nil, ValidateRecordOutput{ - Valid: valid, - SchemaVersion: schemaVersion, - ValidationErrors: validationErrors, - }, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package tools + +import ( + "context" + "fmt" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/modelcontextprotocol/go-sdk/mcp" +) + +// ValidateRecordInput represents the input for validating an agent record. +type ValidateRecordInput struct { + RecordJSON string `json:"record_json" jsonschema:"JSON string of the agent record to validate against OASF schema"` +} + +// ValidateRecordOutput represents the output after validating an agent record. +type ValidateRecordOutput struct { + Valid bool `json:"valid" jsonschema:"Whether the record is valid according to OASF schema validation"` + SchemaVersion string `json:"schema_version,omitempty" jsonschema:"Detected OASF schema version (e.g. 0.3.1 or 0.7.0)"` + ValidationErrors []string `json:"validation_errors,omitempty" jsonschema:"List of validation error messages. Only present if valid=false. Use these to fix the record"` + ErrorMessage string `json:"error_message,omitempty" jsonschema:"General error message if validation process failed"` +} + +// ValidateRecord validates an agent record against the OASF schema. +// This performs full OASF schema validation and returns detailed errors. +func ValidateRecord(ctx context.Context, _ *mcp.CallToolRequest, input ValidateRecordInput) ( + *mcp.CallToolResult, + ValidateRecordOutput, + error, +) { + // Try to unmarshal the JSON into a Record + record, err := corev1.UnmarshalRecord([]byte(input.RecordJSON)) + if err != nil { + return nil, ValidateRecordOutput{ + Valid: false, + ErrorMessage: fmt.Sprintf("Failed to parse record JSON: %v. Please ensure the JSON is valid and follows the OASF schema structure.", err), + }, nil + } + + // Get schema version + schemaVersion := record.GetSchemaVersion() + + // Validate the record using OASF SDK + valid, validationErrors, err := record.Validate(ctx) + if err != nil { + return nil, ValidateRecordOutput{ + Valid: false, + SchemaVersion: schemaVersion, + ErrorMessage: fmt.Sprintf("Validation error: %v", err), + }, nil + } + + // Return validation results + return nil, ValidateRecordOutput{ + Valid: valid, + SchemaVersion: schemaVersion, + ValidationErrors: validationErrors, + }, nil +} diff --git a/mcp/tools/validate_record_test.go b/mcp/tools/validate_record_test.go index 6bbe0348e..8047a8894 100644 --- a/mcp/tools/validate_record_test.go +++ b/mcp/tools/validate_record_test.go @@ -1,83 +1,83 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package tools - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestValidateRecord(t *testing.T) { - validRecord := `{ - "schema_version": "0.7.0", - "name": "test-agent", - "version": "1.0.0", - "description": "A test agent", - "authors": ["Test Author "], - "created_at": "2024-01-01T00:00:00Z", - "locators": [ - { - "type": "helm_chart", - "url": "https://example.com/helm-chart.tgz" - } - ], - "skills": [ - { - "name": "natural_language_processing/natural_language_understanding" - } - ] - }` - - t.Run("should validate a valid record", func(t *testing.T) { - ctx := context.Background() - input := ValidateRecordInput{RecordJSON: validRecord} - - _, output, err := ValidateRecord(ctx, nil, input) - - require.NoError(t, err) - assert.Empty(t, output.ErrorMessage) - assert.True(t, output.Valid) - assert.Equal(t, "0.7.0", output.SchemaVersion) - assert.Empty(t, output.ValidationErrors) - }) - - t.Run("should reject invalid JSON", func(t *testing.T) { - ctx := context.Background() - input := ValidateRecordInput{RecordJSON: "not valid json"} - - _, output, err := ValidateRecord(ctx, nil, input) - - require.NoError(t, err) - assert.NotEmpty(t, output.ErrorMessage) - assert.False(t, output.Valid) - assert.Contains(t, output.ErrorMessage, "Failed to parse") - }) - - t.Run("should reject record missing required fields", func(t *testing.T) { - ctx := context.Background() - invalidRecord := `{"schema_version": "0.7.0"}` - input := ValidateRecordInput{RecordJSON: invalidRecord} - - _, output, err := ValidateRecord(ctx, nil, input) - - require.NoError(t, err) - assert.Empty(t, output.ErrorMessage) - assert.False(t, output.Valid) - assert.NotEmpty(t, output.ValidationErrors) - }) - - t.Run("should reject empty input", func(t *testing.T) { - ctx := context.Background() - input := ValidateRecordInput{RecordJSON: ""} - - _, output, err := ValidateRecord(ctx, nil, input) - - require.NoError(t, err) - assert.NotEmpty(t, output.ErrorMessage) - assert.Contains(t, output.ErrorMessage, "Failed to parse") - }) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package tools + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestValidateRecord(t *testing.T) { + validRecord := `{ + "schema_version": "0.7.0", + "name": "test-agent", + "version": "1.0.0", + "description": "A test agent", + "authors": ["Test Author "], + "created_at": "2024-01-01T00:00:00Z", + "locators": [ + { + "type": "helm_chart", + "url": "https://example.com/helm-chart.tgz" + } + ], + "skills": [ + { + "name": "natural_language_processing/natural_language_understanding" + } + ] + }` + + t.Run("should validate a valid record", func(t *testing.T) { + ctx := context.Background() + input := ValidateRecordInput{RecordJSON: validRecord} + + _, output, err := ValidateRecord(ctx, nil, input) + + require.NoError(t, err) + assert.Empty(t, output.ErrorMessage) + assert.True(t, output.Valid) + assert.Equal(t, "0.7.0", output.SchemaVersion) + assert.Empty(t, output.ValidationErrors) + }) + + t.Run("should reject invalid JSON", func(t *testing.T) { + ctx := context.Background() + input := ValidateRecordInput{RecordJSON: "not valid json"} + + _, output, err := ValidateRecord(ctx, nil, input) + + require.NoError(t, err) + assert.NotEmpty(t, output.ErrorMessage) + assert.False(t, output.Valid) + assert.Contains(t, output.ErrorMessage, "Failed to parse") + }) + + t.Run("should reject record missing required fields", func(t *testing.T) { + ctx := context.Background() + invalidRecord := `{"schema_version": "0.7.0"}` + input := ValidateRecordInput{RecordJSON: invalidRecord} + + _, output, err := ValidateRecord(ctx, nil, input) + + require.NoError(t, err) + assert.Empty(t, output.ErrorMessage) + assert.False(t, output.Valid) + assert.NotEmpty(t, output.ValidationErrors) + }) + + t.Run("should reject empty input", func(t *testing.T) { + ctx := context.Background() + input := ValidateRecordInput{RecordJSON: ""} + + _, output, err := ValidateRecord(ctx, nil, input) + + require.NoError(t, err) + assert.NotEmpty(t, output.ErrorMessage) + assert.Contains(t, output.ErrorMessage, "Failed to parse") + }) +} diff --git a/proto/agntcy/dir/core/v1/record.proto b/proto/agntcy/dir/core/v1/record.proto index ff3174b8f..417b2b0cd 100644 --- a/proto/agntcy/dir/core/v1/record.proto +++ b/proto/agntcy/dir/core/v1/record.proto @@ -1,66 +1,66 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package agntcy.dir.core.v1; - -import "google/protobuf/struct.proto"; - -// Defines a reference or a globally unique content identifier of a record. -message RecordRef { - // Globally-unique content identifier (CID) of the record. - // Specs: https://github.com/multiformats/cid - string cid = 1; -} - -// Defines metadata about a record. -message RecordMeta { - // CID of the record. - string cid = 1; - - // Annotations attached to the record. - map annotations = 2; - - // Schema version of the record. - string schema_version = 3; - - // Creation timestamp of the record in the RFC3339 format. - // Specs: https://www.rfc-editor.org/rfc/rfc3339.html - string created_at = 4; -} - -// Record is a generic object that encapsulates data of different Record types. -// -// Supported schemas: -// -// v0.3.1: https://schema.oasf.outshift.com/0.3.1/objects/agent -// v0.7.0: https://schema.oasf.outshift.com/0.7.0/objects/record -message Record { - google.protobuf.Struct data = 1; -} - -// RecordReferrer represents a referrer object or an association -// to a record. The actual structure of the referrer object can vary -// depending on the type of referrer (e.g., signature, public key, etc.). -// -// RecordReferrer types in the `agntcy.dir.` namespace are reserved for -// Directory-specific schemas and will be validated across Dir services. -message RecordReferrer { - // The type of the referrer. - // For example, "agntcy.dir.sign.v1.Signature" for signatures. - string type = 1; - - // Record reference to which this referrer is associated. - core.v1.RecordRef record_ref = 2; - - // Annotations attached to the referrer object. - map annotations = 3; - - // Creation timestamp of the record in the RFC3339 format. - // Specs: https://www.rfc-editor.org/rfc/rfc3339.html - string created_at = 4; - - // The actual data of the referrer. - google.protobuf.Struct data = 5; -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package agntcy.dir.core.v1; + +import "google/protobuf/struct.proto"; + +// Defines a reference or a globally unique content identifier of a record. +message RecordRef { + // Globally-unique content identifier (CID) of the record. + // Specs: https://github.com/multiformats/cid + string cid = 1; +} + +// Defines metadata about a record. +message RecordMeta { + // CID of the record. + string cid = 1; + + // Annotations attached to the record. + map annotations = 2; + + // Schema version of the record. + string schema_version = 3; + + // Creation timestamp of the record in the RFC3339 format. + // Specs: https://www.rfc-editor.org/rfc/rfc3339.html + string created_at = 4; +} + +// Record is a generic object that encapsulates data of different Record types. +// +// Supported schemas: +// +// v0.3.1: https://schema.oasf.outshift.com/0.3.1/objects/agent +// v0.7.0: https://schema.oasf.outshift.com/0.7.0/objects/record +message Record { + google.protobuf.Struct data = 1; +} + +// RecordReferrer represents a referrer object or an association +// to a record. The actual structure of the referrer object can vary +// depending on the type of referrer (e.g., signature, public key, etc.). +// +// RecordReferrer types in the `agntcy.dir.` namespace are reserved for +// Directory-specific schemas and will be validated across Dir services. +message RecordReferrer { + // The type of the referrer. + // For example, "agntcy.dir.sign.v1.Signature" for signatures. + string type = 1; + + // Record reference to which this referrer is associated. + core.v1.RecordRef record_ref = 2; + + // Annotations attached to the referrer object. + map annotations = 3; + + // Creation timestamp of the record in the RFC3339 format. + // Specs: https://www.rfc-editor.org/rfc/rfc3339.html + string created_at = 4; + + // The actual data of the referrer. + google.protobuf.Struct data = 5; +} diff --git a/proto/agntcy/dir/events/v1/event_service.proto b/proto/agntcy/dir/events/v1/event_service.proto index cd5149ead..d884a07da 100644 --- a/proto/agntcy/dir/events/v1/event_service.proto +++ b/proto/agntcy/dir/events/v1/event_service.proto @@ -1,125 +1,125 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package agntcy.dir.events.v1; - -import "google/protobuf/timestamp.proto"; - -// EventService provides real-time event streaming for all system operations. -// Events are delivered from subscription time forward with no history or replay. -// This service enables external applications to react to system changes in real-time. -service EventService { - // Listen establishes a streaming connection to receive events. - // Events are only delivered while the stream is active. - // On disconnect, missed events are not recoverable. - rpc Listen(ListenRequest) returns (stream ListenResponse); -} - -// ListenRequest specifies filters for event subscription. -message ListenRequest { - // Event types to subscribe to. - // If empty, subscribes to all event types. - repeated EventType event_types = 1; - - // Optional label filters (e.g., "/skills/AI", "/domains/research"). - // Only events for records matching these labels are delivered. - // Uses substring matching. - repeated string label_filters = 2; - - // Optional CID filters. - // Only events for specific CIDs are delivered. - repeated string cid_filters = 3; -} - -// ListenResponse is the response message for the Listen RPC. -// Wraps the Event message to allow for future extensions without breaking the Event structure. -message ListenResponse { - // The event that occurred. - Event event = 1; - - // Future fields can be added here without breaking existing clients: - // - Stream metadata - // - Sequence numbers - // - Acknowledgment tokens - // - etc. -} - -// Event represents a system event that occurred. -message Event { - // Unique event identifier (generated by the system). - string id = 1; - - // Type of event that occurred. - EventType type = 2; - - // When the event occurred. - google.protobuf.Timestamp timestamp = 3; - - // Resource identifier (CID for records, sync_id for syncs, etc.). - string resource_id = 4; - - // Optional labels associated with the record (for record events). - repeated string labels = 5; - - // Optional metadata for additional context. - // Used for flexible event-specific data that doesn't fit standard fields. - map metadata = 7; -} - -// EventType represents all valid event types in the system. -// Each value represents a specific operation that can occur. -// -// Supported Events: -// - Store: RECORD_PUSHED, RECORD_PULLED, RECORD_DELETED -// - Routing: RECORD_PUBLISHED, RECORD_UNPUBLISHED -// - Sync: SYNC_CREATED, SYNC_COMPLETED, SYNC_FAILED -// - Sign: RECORD_SIGNED -enum EventType { - // Unknown/unspecified event type. - EVENT_TYPE_UNSPECIFIED = 0; - - // Store service events - record storage operations - - // A record was pushed to local storage. - EVENT_TYPE_RECORD_PUSHED = 1; - - // A record was pulled from storage. - EVENT_TYPE_RECORD_PULLED = 2; - - // A record was deleted from storage. - EVENT_TYPE_RECORD_DELETED = 3; - - // Routing service events - network operations - - // A record was published/announced to the network. - EVENT_TYPE_RECORD_PUBLISHED = 4; - - // A record was unpublished from the network. - EVENT_TYPE_RECORD_UNPUBLISHED = 5; - - // Sync service events - synchronization operations - - // A sync operation was created/initiated. - EVENT_TYPE_SYNC_CREATED = 6; - - // A sync operation completed successfully. - EVENT_TYPE_SYNC_COMPLETED = 7; - - // A sync operation failed. - EVENT_TYPE_SYNC_FAILED = 8; - - // Sign service events - cryptographic operations - - // A record was signed. - EVENT_TYPE_RECORD_SIGNED = 9; - - // Future event types can be added here without breaking existing clients. - // Examples: - // EVENT_TYPE_RECORD_VERIFIED = 10; - // EVENT_TYPE_RECORD_SEARCHED = 11; - // EVENT_TYPE_REMOTE_RECORD_ANNOUNCED = 12; - // EVENT_TYPE_PEER_CONNECTED = 13; - // EVENT_TYPE_PEER_DISCONNECTED = 14; -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package agntcy.dir.events.v1; + +import "google/protobuf/timestamp.proto"; + +// EventService provides real-time event streaming for all system operations. +// Events are delivered from subscription time forward with no history or replay. +// This service enables external applications to react to system changes in real-time. +service EventService { + // Listen establishes a streaming connection to receive events. + // Events are only delivered while the stream is active. + // On disconnect, missed events are not recoverable. + rpc Listen(ListenRequest) returns (stream ListenResponse); +} + +// ListenRequest specifies filters for event subscription. +message ListenRequest { + // Event types to subscribe to. + // If empty, subscribes to all event types. + repeated EventType event_types = 1; + + // Optional label filters (e.g., "/skills/AI", "/domains/research"). + // Only events for records matching these labels are delivered. + // Uses substring matching. + repeated string label_filters = 2; + + // Optional CID filters. + // Only events for specific CIDs are delivered. + repeated string cid_filters = 3; +} + +// ListenResponse is the response message for the Listen RPC. +// Wraps the Event message to allow for future extensions without breaking the Event structure. +message ListenResponse { + // The event that occurred. + Event event = 1; + + // Future fields can be added here without breaking existing clients: + // - Stream metadata + // - Sequence numbers + // - Acknowledgment tokens + // - etc. +} + +// Event represents a system event that occurred. +message Event { + // Unique event identifier (generated by the system). + string id = 1; + + // Type of event that occurred. + EventType type = 2; + + // When the event occurred. + google.protobuf.Timestamp timestamp = 3; + + // Resource identifier (CID for records, sync_id for syncs, etc.). + string resource_id = 4; + + // Optional labels associated with the record (for record events). + repeated string labels = 5; + + // Optional metadata for additional context. + // Used for flexible event-specific data that doesn't fit standard fields. + map metadata = 7; +} + +// EventType represents all valid event types in the system. +// Each value represents a specific operation that can occur. +// +// Supported Events: +// - Store: RECORD_PUSHED, RECORD_PULLED, RECORD_DELETED +// - Routing: RECORD_PUBLISHED, RECORD_UNPUBLISHED +// - Sync: SYNC_CREATED, SYNC_COMPLETED, SYNC_FAILED +// - Sign: RECORD_SIGNED +enum EventType { + // Unknown/unspecified event type. + EVENT_TYPE_UNSPECIFIED = 0; + + // Store service events - record storage operations + + // A record was pushed to local storage. + EVENT_TYPE_RECORD_PUSHED = 1; + + // A record was pulled from storage. + EVENT_TYPE_RECORD_PULLED = 2; + + // A record was deleted from storage. + EVENT_TYPE_RECORD_DELETED = 3; + + // Routing service events - network operations + + // A record was published/announced to the network. + EVENT_TYPE_RECORD_PUBLISHED = 4; + + // A record was unpublished from the network. + EVENT_TYPE_RECORD_UNPUBLISHED = 5; + + // Sync service events - synchronization operations + + // A sync operation was created/initiated. + EVENT_TYPE_SYNC_CREATED = 6; + + // A sync operation completed successfully. + EVENT_TYPE_SYNC_COMPLETED = 7; + + // A sync operation failed. + EVENT_TYPE_SYNC_FAILED = 8; + + // Sign service events - cryptographic operations + + // A record was signed. + EVENT_TYPE_RECORD_SIGNED = 9; + + // Future event types can be added here without breaking existing clients. + // Examples: + // EVENT_TYPE_RECORD_VERIFIED = 10; + // EVENT_TYPE_RECORD_SEARCHED = 11; + // EVENT_TYPE_REMOTE_RECORD_ANNOUNCED = 12; + // EVENT_TYPE_PEER_CONNECTED = 13; + // EVENT_TYPE_PEER_DISCONNECTED = 14; +} diff --git a/proto/agntcy/dir/routing/v1/peer.proto b/proto/agntcy/dir/routing/v1/peer.proto index 0694bc624..e532c1a4b 100644 --- a/proto/agntcy/dir/routing/v1/peer.proto +++ b/proto/agntcy/dir/routing/v1/peer.proto @@ -1,44 +1,44 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package agntcy.dir.routing.v1; - -message Peer { - // ID of a given peer, typically described by a protocol. - // For example: - // - SPIFFE: "spiffe://example.org/service/foo" - // - JWT: "jwt:sub=alice,iss=https://issuer.example.com" - // - Tor: "onion:abcdefghijklmno.onion" - // - DID: "did:example:123456789abcdefghi" - // - IPFS: "ipfs:QmYwAPJzv5CZsnAzt8auVZRn2E6sD1c4x8pN5o6d5cW4D5" - string id = 1; - - // Multiaddrs for a given peer. - // For example: - // - "/ip4/127.0.0.1/tcp/4001" - // - "/ip6/::1/tcp/4001" - // - "/dns4/example.com/tcp/443/https" - repeated string addrs = 2; - - // Additional metadata about the peer. - map annotations = 3; - - // Used to signal the sender's connection capabilities to the peer. - PeerConnectionType connection = 4; -} - -enum PeerConnectionType { - // Sender does not have a connection to peer, and no extra information (default) - PEER_CONNECTION_TYPE_NOT_CONNECTED = 0; - - // Sender has a live connection to peer. - PEER_CONNECTION_TYPE_CONNECTED = 1; - - // Sender recently connected to peer. - PEER_CONNECTION_TYPE_CAN_CONNECT = 2; - - // Sender made strong effort to connect to peer repeatedly but failed. - PEER_CONNECTION_TYPE_CANNOT_CONNECT = 3; -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package agntcy.dir.routing.v1; + +message Peer { + // ID of a given peer, typically described by a protocol. + // For example: + // - SPIFFE: "spiffe://example.org/service/foo" + // - JWT: "jwt:sub=alice,iss=https://issuer.example.com" + // - Tor: "onion:abcdefghijklmno.onion" + // - DID: "did:example:123456789abcdefghi" + // - IPFS: "ipfs:QmYwAPJzv5CZsnAzt8auVZRn2E6sD1c4x8pN5o6d5cW4D5" + string id = 1; + + // Multiaddrs for a given peer. + // For example: + // - "/ip4/127.0.0.1/tcp/4001" + // - "/ip6/::1/tcp/4001" + // - "/dns4/example.com/tcp/443/https" + repeated string addrs = 2; + + // Additional metadata about the peer. + map annotations = 3; + + // Used to signal the sender's connection capabilities to the peer. + PeerConnectionType connection = 4; +} + +enum PeerConnectionType { + // Sender does not have a connection to peer, and no extra information (default) + PEER_CONNECTION_TYPE_NOT_CONNECTED = 0; + + // Sender has a live connection to peer. + PEER_CONNECTION_TYPE_CONNECTED = 1; + + // Sender recently connected to peer. + PEER_CONNECTION_TYPE_CAN_CONNECT = 2; + + // Sender made strong effort to connect to peer repeatedly but failed. + PEER_CONNECTION_TYPE_CANNOT_CONNECT = 3; +} diff --git a/proto/agntcy/dir/routing/v1/publication_service.proto b/proto/agntcy/dir/routing/v1/publication_service.proto index 973cfaa52..e33af4c19 100644 --- a/proto/agntcy/dir/routing/v1/publication_service.proto +++ b/proto/agntcy/dir/routing/v1/publication_service.proto @@ -1,105 +1,105 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package agntcy.dir.routing.v1; - -import "agntcy/dir/routing/v1/routing_service.proto"; - -// PublicationService manages publication requests for announcing records to the DHT. -// -// Publications are stored in the database and processed by a worker that runs every hour. -// The publication workflow: -// 1. Publications are created via routing's Publish RPC by specifying either a query, a list of CIDs, or all records -// 2. Publication requests are added to the database -// 3. PublicationWorker queries the data using the publication request from the database to get the list of CIDs to be published -// 4. PublicationWorker announces the records with these CIDs to the DHT -service PublicationService { - // CreatePublication creates a new publication request that will be processed by the PublicationWorker. - // The publication request can specify either a query, a list of specific CIDs, or all records to be announced to the DHT. - rpc CreatePublication(PublishRequest) returns (CreatePublicationResponse); - - // ListPublications returns a stream of all publication requests in the system. - // This allows monitoring of pending, processing, and completed publication requests. - rpc ListPublications(ListPublicationsRequest) returns (stream ListPublicationsItem); - - // GetPublication retrieves details of a specific publication request by its identifier. - // This includes the current status and any associated metadata. - rpc GetPublication(GetPublicationRequest) returns (GetPublicationResponse); -} - -// CreatePublicationResponse returns the result of creating a publication request. -// This includes the publication ID and any relevant metadata. -message CreatePublicationResponse { - // Unique identifier of the publication operation. - string publication_id = 1; -} - -// ListPublicationsRequest contains optional filters for listing publication requests. -message ListPublicationsRequest { - // Optional limit on the number of results to return. - optional uint32 limit = 2; - - // Optional offset for pagination of results. - optional uint32 offset = 3; -} - -// ListPublicationsItem represents a single publication request in the list response. -// Contains publication details including ID, status, and creation timestamp. -message ListPublicationsItem { - // Unique identifier of the publication operation. - string publication_id = 1; - - // Current status of the publication operation. - PublicationStatus status = 2; - - // Timestamp when the publication operation was created in the RFC3339 format. - // Specs: https://www.rfc-editor.org/rfc/rfc3339.html - string created_time = 3; - - // Timestamp of the most recent status update for this publication in the RFC3339 format. - string last_update_time = 4; -} - -// GetPublicationRequest specifies which publication to retrieve by its identifier. -message GetPublicationRequest { - // Unique identifier of the publication operation to query. - string publication_id = 1; -} - -// GetPublicationResponse contains the full details of a specific publication request. -// Includes status, progress information, and any error details if applicable. -message GetPublicationResponse { - // Unique identifier of the publication operation. - string publication_id = 1; - - // Current status of the publication operation. - PublicationStatus status = 2; - - // Timestamp when the publication operation was created in the RFC3339 format. - // Specs: https://www.rfc-editor.org/rfc/rfc3339.html - string created_time = 3; - - // Timestamp of the most recent status update for this publication in the RFC3339 format. - string last_update_time = 4; -} - -// PublicationStatus represents the current state of a publication request. -// Publications progress from pending to processing to completed or failed states. -enum PublicationStatus { - // Default/unset status - should not be used in practice - PUBLICATION_STATUS_UNSPECIFIED = 0; - - // Sync operation has been created but not yet started - PUBLICATION_STATUS_PENDING = 1; - - // Sync operation is actively discovering and transferring objects - PUBLICATION_STATUS_IN_PROGRESS = 2; - - // Sync operation has been successfully completed - PUBLICATION_STATUS_COMPLETED = 3; - - // Sync operation encountered an error and stopped - PUBLICATION_STATUS_FAILED = 4; -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package agntcy.dir.routing.v1; + +import "agntcy/dir/routing/v1/routing_service.proto"; + +// PublicationService manages publication requests for announcing records to the DHT. +// +// Publications are stored in the database and processed by a worker that runs every hour. +// The publication workflow: +// 1. Publications are created via routing's Publish RPC by specifying either a query, a list of CIDs, or all records +// 2. Publication requests are added to the database +// 3. PublicationWorker queries the data using the publication request from the database to get the list of CIDs to be published +// 4. PublicationWorker announces the records with these CIDs to the DHT +service PublicationService { + // CreatePublication creates a new publication request that will be processed by the PublicationWorker. + // The publication request can specify either a query, a list of specific CIDs, or all records to be announced to the DHT. + rpc CreatePublication(PublishRequest) returns (CreatePublicationResponse); + + // ListPublications returns a stream of all publication requests in the system. + // This allows monitoring of pending, processing, and completed publication requests. + rpc ListPublications(ListPublicationsRequest) returns (stream ListPublicationsItem); + + // GetPublication retrieves details of a specific publication request by its identifier. + // This includes the current status and any associated metadata. + rpc GetPublication(GetPublicationRequest) returns (GetPublicationResponse); +} + +// CreatePublicationResponse returns the result of creating a publication request. +// This includes the publication ID and any relevant metadata. +message CreatePublicationResponse { + // Unique identifier of the publication operation. + string publication_id = 1; +} + +// ListPublicationsRequest contains optional filters for listing publication requests. +message ListPublicationsRequest { + // Optional limit on the number of results to return. + optional uint32 limit = 2; + + // Optional offset for pagination of results. + optional uint32 offset = 3; +} + +// ListPublicationsItem represents a single publication request in the list response. +// Contains publication details including ID, status, and creation timestamp. +message ListPublicationsItem { + // Unique identifier of the publication operation. + string publication_id = 1; + + // Current status of the publication operation. + PublicationStatus status = 2; + + // Timestamp when the publication operation was created in the RFC3339 format. + // Specs: https://www.rfc-editor.org/rfc/rfc3339.html + string created_time = 3; + + // Timestamp of the most recent status update for this publication in the RFC3339 format. + string last_update_time = 4; +} + +// GetPublicationRequest specifies which publication to retrieve by its identifier. +message GetPublicationRequest { + // Unique identifier of the publication operation to query. + string publication_id = 1; +} + +// GetPublicationResponse contains the full details of a specific publication request. +// Includes status, progress information, and any error details if applicable. +message GetPublicationResponse { + // Unique identifier of the publication operation. + string publication_id = 1; + + // Current status of the publication operation. + PublicationStatus status = 2; + + // Timestamp when the publication operation was created in the RFC3339 format. + // Specs: https://www.rfc-editor.org/rfc/rfc3339.html + string created_time = 3; + + // Timestamp of the most recent status update for this publication in the RFC3339 format. + string last_update_time = 4; +} + +// PublicationStatus represents the current state of a publication request. +// Publications progress from pending to processing to completed or failed states. +enum PublicationStatus { + // Default/unset status - should not be used in practice + PUBLICATION_STATUS_UNSPECIFIED = 0; + + // Sync operation has been created but not yet started + PUBLICATION_STATUS_PENDING = 1; + + // Sync operation is actively discovering and transferring objects + PUBLICATION_STATUS_IN_PROGRESS = 2; + + // Sync operation has been successfully completed + PUBLICATION_STATUS_COMPLETED = 3; + + // Sync operation encountered an error and stopped + PUBLICATION_STATUS_FAILED = 4; +} diff --git a/proto/agntcy/dir/routing/v1/record_query.proto b/proto/agntcy/dir/routing/v1/record_query.proto index ef9618e17..642be5de4 100644 --- a/proto/agntcy/dir/routing/v1/record_query.proto +++ b/proto/agntcy/dir/routing/v1/record_query.proto @@ -1,38 +1,38 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package agntcy.dir.routing.v1; - -// A query to match the record against during discovery. -// For example: -// { type: RECORD_QUERY_TYPE_SKILL, value: "Natural Language Processing" } -// { type: RECORD_QUERY_TYPE_LOCATOR, value: "helm-chart" } -// { type: RECORD_QUERY_TYPE_DOMAIN, value: "research" } -// { type: RECORD_QUERY_TYPE_MODULE, value: "core/llm/model" } -message RecordQuery { - // The type of the query to match against. - RecordQueryType type = 1; - - // The query value to match against. - string value = 2; -} - -// Defines a list of supported record query types. -enum RecordQueryType { - // Unspecified query type. - RECORD_QUERY_TYPE_UNSPECIFIED = 0; - - // Query for a skill name. - RECORD_QUERY_TYPE_SKILL = 1; - - // Query for a locator type. - RECORD_QUERY_TYPE_LOCATOR = 2; - - // Query for a domain name. - RECORD_QUERY_TYPE_DOMAIN = 3; - - // Query for a module name. - RECORD_QUERY_TYPE_MODULE = 4; -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package agntcy.dir.routing.v1; + +// A query to match the record against during discovery. +// For example: +// { type: RECORD_QUERY_TYPE_SKILL, value: "Natural Language Processing" } +// { type: RECORD_QUERY_TYPE_LOCATOR, value: "helm-chart" } +// { type: RECORD_QUERY_TYPE_DOMAIN, value: "research" } +// { type: RECORD_QUERY_TYPE_MODULE, value: "core/llm/model" } +message RecordQuery { + // The type of the query to match against. + RecordQueryType type = 1; + + // The query value to match against. + string value = 2; +} + +// Defines a list of supported record query types. +enum RecordQueryType { + // Unspecified query type. + RECORD_QUERY_TYPE_UNSPECIFIED = 0; + + // Query for a skill name. + RECORD_QUERY_TYPE_SKILL = 1; + + // Query for a locator type. + RECORD_QUERY_TYPE_LOCATOR = 2; + + // Query for a domain name. + RECORD_QUERY_TYPE_DOMAIN = 3; + + // Query for a module name. + RECORD_QUERY_TYPE_MODULE = 4; +} diff --git a/proto/agntcy/dir/routing/v1/routing_service.proto b/proto/agntcy/dir/routing/v1/routing_service.proto index b5f59b043..2dd74ae76 100644 --- a/proto/agntcy/dir/routing/v1/routing_service.proto +++ b/proto/agntcy/dir/routing/v1/routing_service.proto @@ -1,133 +1,133 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package agntcy.dir.routing.v1; - -import "agntcy/dir/core/v1/record.proto"; -import "agntcy/dir/routing/v1/peer.proto"; -import "agntcy/dir/routing/v1/record_query.proto"; -import "agntcy/dir/search/v1/record_query.proto"; -import "google/protobuf/empty.proto"; - -// Defines an interface for announcement and discovery -// of records across interconnected network. -// -// Middleware should be used to control who can perform these RPCs. -// Policies for the middleware can be handled via separate service. -service RoutingService { - // Announce to the network that this peer is providing a given record. - // This enables other peers to discover this record and retrieve it - // from this peer. Listeners can use this event to perform custom operations, - // for example by cloning the record. - // - // Items need to be periodically republished (eg. 24h) to the network - // to avoid stale data. Republication should be done in the background. - rpc Publish(PublishRequest) returns (google.protobuf.Empty); - - // Stop serving this record to the network. If other peers try - // to retrieve this record, the peer will refuse the request. - rpc Unpublish(UnpublishRequest) returns (google.protobuf.Empty); - - // Search records based on the request across the network. - // This will search the network for the record with the given parameters. - // - // It is possible that the records are stale or that they do not exist. - // Some records may be provided by multiple peers. - // - // Results from the search can be used as an input - // to Pull operation to retrieve the records. - rpc Search(SearchRequest) returns (stream SearchResponse); - - // List all records that this peer is currently providing - // that match the given parameters. - // This operation does not interact with the network. - rpc List(ListRequest) returns (stream ListResponse); -} - -message PublishRequest { - oneof request { - // References to the records to be published. - RecordRefs record_refs = 1; - - // Queries to match against the records to be published. - RecordQueries queries = 2; - - // TODO: Future enhancement - Publish all stored records. - // bool all_records = 3; - } -} - -message UnpublishRequest { - oneof request { - // References to the records to be unpublished. - RecordRefs record_refs = 1; - - // Queries to match against the records to be unpublished. - RecordQueries queries = 2; - - // TODO: Future enhancement - Unpublish all stored records. - // bool all_records = 3; - } -} - -message RecordRefs { - repeated core.v1.RecordRef refs = 1; -} - -message RecordQueries { - repeated search.v1.RecordQuery queries = 1; -} - -message SearchRequest { - // List of queries to match against the records. - repeated RecordQuery queries = 1; - - // Minimal target query match score. - // For example, if min_match_score=2, it will return records that match - // at least two of the queries. - // If not set, it will return records that match at least one query. - optional uint32 min_match_score = 2; - - // Limit the number of results returned. - // If not set, it will return all discovered records. - // Note that this is a soft limit, as the search may return more results - // than the limit if there are multiple peers providing the same record. - optional uint32 limit = 3; - - // TODO: we may want to add a way to filter results by peer. -} - -message SearchResponse { - // The record that matches the search query. - core.v1.RecordRef record_ref = 1; - - // The peer that provided the record. - Peer peer = 2; - - // The queries that were matched. - repeated RecordQuery match_queries = 3; - - // The score of the search match. - uint32 match_score = 4; -} - -message ListRequest { - // List of queries to match against the records. - // If set, all queries must match for the record to be returned. - repeated RecordQuery queries = 1; - - // Limit the number of results returned. - // If not set, it will return all records that this peer is providing. - optional uint32 limit = 2; -} - -message ListResponse { - // The record that matches the list queries. - core.v1.RecordRef record_ref = 1; - - // Labels associated with this record (skills, domains, modules) - // Derived from the record content for CLI display purposes - repeated string labels = 2; -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package agntcy.dir.routing.v1; + +import "agntcy/dir/core/v1/record.proto"; +import "agntcy/dir/routing/v1/peer.proto"; +import "agntcy/dir/routing/v1/record_query.proto"; +import "agntcy/dir/search/v1/record_query.proto"; +import "google/protobuf/empty.proto"; + +// Defines an interface for announcement and discovery +// of records across interconnected network. +// +// Middleware should be used to control who can perform these RPCs. +// Policies for the middleware can be handled via separate service. +service RoutingService { + // Announce to the network that this peer is providing a given record. + // This enables other peers to discover this record and retrieve it + // from this peer. Listeners can use this event to perform custom operations, + // for example by cloning the record. + // + // Items need to be periodically republished (eg. 24h) to the network + // to avoid stale data. Republication should be done in the background. + rpc Publish(PublishRequest) returns (google.protobuf.Empty); + + // Stop serving this record to the network. If other peers try + // to retrieve this record, the peer will refuse the request. + rpc Unpublish(UnpublishRequest) returns (google.protobuf.Empty); + + // Search records based on the request across the network. + // This will search the network for the record with the given parameters. + // + // It is possible that the records are stale or that they do not exist. + // Some records may be provided by multiple peers. + // + // Results from the search can be used as an input + // to Pull operation to retrieve the records. + rpc Search(SearchRequest) returns (stream SearchResponse); + + // List all records that this peer is currently providing + // that match the given parameters. + // This operation does not interact with the network. + rpc List(ListRequest) returns (stream ListResponse); +} + +message PublishRequest { + oneof request { + // References to the records to be published. + RecordRefs record_refs = 1; + + // Queries to match against the records to be published. + RecordQueries queries = 2; + + // TODO: Future enhancement - Publish all stored records. + // bool all_records = 3; + } +} + +message UnpublishRequest { + oneof request { + // References to the records to be unpublished. + RecordRefs record_refs = 1; + + // Queries to match against the records to be unpublished. + RecordQueries queries = 2; + + // TODO: Future enhancement - Unpublish all stored records. + // bool all_records = 3; + } +} + +message RecordRefs { + repeated core.v1.RecordRef refs = 1; +} + +message RecordQueries { + repeated search.v1.RecordQuery queries = 1; +} + +message SearchRequest { + // List of queries to match against the records. + repeated RecordQuery queries = 1; + + // Minimal target query match score. + // For example, if min_match_score=2, it will return records that match + // at least two of the queries. + // If not set, it will return records that match at least one query. + optional uint32 min_match_score = 2; + + // Limit the number of results returned. + // If not set, it will return all discovered records. + // Note that this is a soft limit, as the search may return more results + // than the limit if there are multiple peers providing the same record. + optional uint32 limit = 3; + + // TODO: we may want to add a way to filter results by peer. +} + +message SearchResponse { + // The record that matches the search query. + core.v1.RecordRef record_ref = 1; + + // The peer that provided the record. + Peer peer = 2; + + // The queries that were matched. + repeated RecordQuery match_queries = 3; + + // The score of the search match. + uint32 match_score = 4; +} + +message ListRequest { + // List of queries to match against the records. + // If set, all queries must match for the record to be returned. + repeated RecordQuery queries = 1; + + // Limit the number of results returned. + // If not set, it will return all records that this peer is providing. + optional uint32 limit = 2; +} + +message ListResponse { + // The record that matches the list queries. + core.v1.RecordRef record_ref = 1; + + // Labels associated with this record (skills, domains, modules) + // Derived from the record content for CLI display purposes + repeated string labels = 2; +} diff --git a/proto/agntcy/dir/search/v1/record_query.proto b/proto/agntcy/dir/search/v1/record_query.proto index 3ad5b0da8..218d45dff 100644 --- a/proto/agntcy/dir/search/v1/record_query.proto +++ b/proto/agntcy/dir/search/v1/record_query.proto @@ -1,80 +1,80 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package agntcy.dir.search.v1; - -// A query to match the record against during discovery. -// For example: -// Exact match: { type: RECORD_QUERY_TYPE_NAME, value: "my-agent" } -// Wildcard match: { type: RECORD_QUERY_TYPE_NAME, value: "web*" } -// Pattern match: { type: RECORD_QUERY_TYPE_SKILL_NAME, value: "*machine*learning*" } -// Question mark: { type: RECORD_QUERY_TYPE_VERSION, value: "v1.0.?" } -// List wildcards: { type: RECORD_QUERY_TYPE_NAME, value: "agent-[0-9]" } -// Complex match: { type: RECORD_QUERY_TYPE_LOCATOR, value: "docker-image:https://*.example.com/*" } -message RecordQuery { - // The type of the query to match against. - RecordQueryType type = 1; - - // The query value to match against. - // Supports wildcard patterns: - // '*' - matches zero or more characters - // '?' - matches exactly one character - // '[]' - matches any character within brackets (e.g., [0-9], [a-z], [abc]) - string value = 2; -} - -// Defines a list of supported record query types. -enum RecordQueryType { - // Unspecified query type. - RECORD_QUERY_TYPE_UNSPECIFIED = 0; - - // Query for a record name. - // Supports wildcard patterns: "web*", "*service", "api-*-v2", "???api", "agent-[0-9]" - RECORD_QUERY_TYPE_NAME = 1; - - // Query for a record version. - // Supports wildcard patterns: "v1.*", "v2.*", "*-beta", "v1.0.?", "v[0-9].*" - RECORD_QUERY_TYPE_VERSION = 2; - - // Query for a skill ID. - // Numeric field - exact match only, no wildcard support. - RECORD_QUERY_TYPE_SKILL_ID = 3; - - // Query for a skill name. - // Supports wildcard patterns: "python*", "*script", "*machine*learning*", "Pytho?", "[A-M]*" - RECORD_QUERY_TYPE_SKILL_NAME = 4; - - // Query for a locator type. - // Supports wildcard patterns: "http*", "ftp*", "*docker*", "[hf]tt[ps]*" - RECORD_QUERY_TYPE_LOCATOR = 5; - - // Query for a module name. - // Supports wildcard patterns: "*-plugin", "*-module", "core*", "mod-?", "plugin-[0-9]" - RECORD_QUERY_TYPE_MODULE_NAME = 6; - - // Query for a domain ID. - // Numeric field - exact match only, no wildcard support. - RECORD_QUERY_TYPE_DOMAIN_ID = 7; - - // Query for a domain name. - // Supports wildcard patterns: "*education*", "healthcare/*", "*technology" - RECORD_QUERY_TYPE_DOMAIN_NAME = 8; - - // Query for a record's created_at timestamp. - // Supports wildcard patterns for date strings: "2025-*", ">=2025-01-01" - RECORD_QUERY_TYPE_CREATED_AT = 9; - - // Query for a record author. - // Supports wildcard patterns: "AGNTCY*", "*@example.com", "*Team*" - RECORD_QUERY_TYPE_AUTHOR = 10; - - // Query for a schema version. - // Supports wildcard patterns: "0.7.*", "0.*", "1.0.?" - RECORD_QUERY_TYPE_SCHEMA_VERSION = 11; - - // Query for a module ID. - // Numeric field - exact match only, no wildcard support. - RECORD_QUERY_TYPE_MODULE_ID = 12; -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package agntcy.dir.search.v1; + +// A query to match the record against during discovery. +// For example: +// Exact match: { type: RECORD_QUERY_TYPE_NAME, value: "my-agent" } +// Wildcard match: { type: RECORD_QUERY_TYPE_NAME, value: "web*" } +// Pattern match: { type: RECORD_QUERY_TYPE_SKILL_NAME, value: "*machine*learning*" } +// Question mark: { type: RECORD_QUERY_TYPE_VERSION, value: "v1.0.?" } +// List wildcards: { type: RECORD_QUERY_TYPE_NAME, value: "agent-[0-9]" } +// Complex match: { type: RECORD_QUERY_TYPE_LOCATOR, value: "docker-image:https://*.example.com/*" } +message RecordQuery { + // The type of the query to match against. + RecordQueryType type = 1; + + // The query value to match against. + // Supports wildcard patterns: + // '*' - matches zero or more characters + // '?' - matches exactly one character + // '[]' - matches any character within brackets (e.g., [0-9], [a-z], [abc]) + string value = 2; +} + +// Defines a list of supported record query types. +enum RecordQueryType { + // Unspecified query type. + RECORD_QUERY_TYPE_UNSPECIFIED = 0; + + // Query for a record name. + // Supports wildcard patterns: "web*", "*service", "api-*-v2", "???api", "agent-[0-9]" + RECORD_QUERY_TYPE_NAME = 1; + + // Query for a record version. + // Supports wildcard patterns: "v1.*", "v2.*", "*-beta", "v1.0.?", "v[0-9].*" + RECORD_QUERY_TYPE_VERSION = 2; + + // Query for a skill ID. + // Numeric field - exact match only, no wildcard support. + RECORD_QUERY_TYPE_SKILL_ID = 3; + + // Query for a skill name. + // Supports wildcard patterns: "python*", "*script", "*machine*learning*", "Pytho?", "[A-M]*" + RECORD_QUERY_TYPE_SKILL_NAME = 4; + + // Query for a locator type. + // Supports wildcard patterns: "http*", "ftp*", "*docker*", "[hf]tt[ps]*" + RECORD_QUERY_TYPE_LOCATOR = 5; + + // Query for a module name. + // Supports wildcard patterns: "*-plugin", "*-module", "core*", "mod-?", "plugin-[0-9]" + RECORD_QUERY_TYPE_MODULE_NAME = 6; + + // Query for a domain ID. + // Numeric field - exact match only, no wildcard support. + RECORD_QUERY_TYPE_DOMAIN_ID = 7; + + // Query for a domain name. + // Supports wildcard patterns: "*education*", "healthcare/*", "*technology" + RECORD_QUERY_TYPE_DOMAIN_NAME = 8; + + // Query for a record's created_at timestamp. + // Supports wildcard patterns for date strings: "2025-*", ">=2025-01-01" + RECORD_QUERY_TYPE_CREATED_AT = 9; + + // Query for a record author. + // Supports wildcard patterns: "AGNTCY*", "*@example.com", "*Team*" + RECORD_QUERY_TYPE_AUTHOR = 10; + + // Query for a schema version. + // Supports wildcard patterns: "0.7.*", "0.*", "1.0.?" + RECORD_QUERY_TYPE_SCHEMA_VERSION = 11; + + // Query for a module ID. + // Numeric field - exact match only, no wildcard support. + RECORD_QUERY_TYPE_MODULE_ID = 12; +} diff --git a/proto/agntcy/dir/search/v1/search_service.proto b/proto/agntcy/dir/search/v1/search_service.proto index fa9170062..057a5d045 100644 --- a/proto/agntcy/dir/search/v1/search_service.proto +++ b/proto/agntcy/dir/search/v1/search_service.proto @@ -1,53 +1,53 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package agntcy.dir.search.v1; - -import "agntcy/dir/core/v1/record.proto"; -import "agntcy/dir/search/v1/record_query.proto"; - -service SearchService { - // Search for record CIDs that match the given parameters. - // Returns only CIDs for efficient lookups and piping to other commands. - // This operation does not interact with the network. - rpc SearchCIDs(SearchCIDsRequest) returns (stream SearchCIDsResponse); - - // Search for full records that match the given parameters. - // Returns complete record data including all metadata, skills, domains, etc. - // This operation does not interact with the network. - rpc SearchRecords(SearchRecordsRequest) returns (stream SearchRecordsResponse); -} - -message SearchCIDsRequest { - // List of queries to match against the records. - repeated RecordQuery queries = 1; - - // Optional limit on the number of results to return. - optional uint32 limit = 2; - - // Optional offset for pagination of results. - optional uint32 offset = 3; -} - -message SearchRecordsRequest { - // List of queries to match against the records. - repeated RecordQuery queries = 1; - - // Optional limit on the number of results to return. - optional uint32 limit = 2; - - // Optional offset for pagination of results. - optional uint32 offset = 3; -} - -message SearchCIDsResponse { - // The CID of the record that matches the search criteria. - string record_cid = 1; -} - -message SearchRecordsResponse { - // The full record that matches the search criteria. - agntcy.dir.core.v1.Record record = 1; -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package agntcy.dir.search.v1; + +import "agntcy/dir/core/v1/record.proto"; +import "agntcy/dir/search/v1/record_query.proto"; + +service SearchService { + // Search for record CIDs that match the given parameters. + // Returns only CIDs for efficient lookups and piping to other commands. + // This operation does not interact with the network. + rpc SearchCIDs(SearchCIDsRequest) returns (stream SearchCIDsResponse); + + // Search for full records that match the given parameters. + // Returns complete record data including all metadata, skills, domains, etc. + // This operation does not interact with the network. + rpc SearchRecords(SearchRecordsRequest) returns (stream SearchRecordsResponse); +} + +message SearchCIDsRequest { + // List of queries to match against the records. + repeated RecordQuery queries = 1; + + // Optional limit on the number of results to return. + optional uint32 limit = 2; + + // Optional offset for pagination of results. + optional uint32 offset = 3; +} + +message SearchRecordsRequest { + // List of queries to match against the records. + repeated RecordQuery queries = 1; + + // Optional limit on the number of results to return. + optional uint32 limit = 2; + + // Optional offset for pagination of results. + optional uint32 offset = 3; +} + +message SearchCIDsResponse { + // The CID of the record that matches the search criteria. + string record_cid = 1; +} + +message SearchRecordsResponse { + // The full record that matches the search criteria. + agntcy.dir.core.v1.Record record = 1; +} diff --git a/proto/agntcy/dir/sign/v1/public_key.proto b/proto/agntcy/dir/sign/v1/public_key.proto index 7cd7abb98..8953f2df0 100644 --- a/proto/agntcy/dir/sign/v1/public_key.proto +++ b/proto/agntcy/dir/sign/v1/public_key.proto @@ -1,14 +1,14 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package agntcy.dir.sign.v1; - -// PublicKey is the public key data associated with a Record. -// Multiple public keys can be associated with a single Record. -message PublicKey { - // PEM-encoded public key string. - string key = 1; -} - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package agntcy.dir.sign.v1; + +// PublicKey is the public key data associated with a Record. +// Multiple public keys can be associated with a single Record. +message PublicKey { + // PEM-encoded public key string. + string key = 1; +} + diff --git a/proto/agntcy/dir/sign/v1/sign_service.proto b/proto/agntcy/dir/sign/v1/sign_service.proto index adc934fe1..6840cbfc5 100644 --- a/proto/agntcy/dir/sign/v1/sign_service.proto +++ b/proto/agntcy/dir/sign/v1/sign_service.proto @@ -1,85 +1,85 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package agntcy.dir.sign.v1; - -import "agntcy/dir/core/v1/record.proto"; -import "agntcy/dir/sign/v1/signature.proto"; - -// SignService provides methods to sign and verify records. -service SignService { - // Sign record using keyless OIDC based provider or using PEM-encoded private key with an optional passphrase - rpc Sign(SignRequest) returns (SignResponse); - - // Verify signed record using keyless OIDC based provider or using PEM-encoded formatted PEM public key encrypted - rpc Verify(VerifyRequest) returns (VerifyResponse); -} - -message SignRequest { - // Record reference to be signed - core.v1.RecordRef record_ref = 1; - - // Signing provider to use - SignRequestProvider provider = 2; -} - -message SignRequestProvider { - oneof request { - // Sign with OIDC provider - SignWithOIDC oidc = 1; - - // Sign with PEM-encoded public key - SignWithKey key = 2; - } -} - -message SignWithOIDC { - // List of sign options for OIDC - message SignOpts { - // Fulcio authority access URL (default value: https://fulcio.sigstage.dev) - optional string fulcio_url = 1; - - // Rekor validator access URL (default value: https://rekor.sigstage.dev) - optional string rekor_url = 2; - - // Timestamp authority access URL (default value: https://timestamp.sigstage.dev/api/v1/timestamp) - optional string timestamp_url = 3; - - // OIDC provider access URL (default value: https://oauth2.sigstage.dev/auth) - optional string oidc_provider_url = 4; - } - - // Token for OIDC provider - string id_token = 1; - - // Signing options for OIDC - SignOpts options = 2; -} - -message SignWithKey { - // Private key used for signing - bytes private_key = 1; - - // Password to unlock the private key - optional bytes password = 2; -} - -message SignResponse { - // Cryptographic signature of the record - Signature signature = 1; -} - -message VerifyRequest { - // Record reference to be verified - core.v1.RecordRef record_ref = 1; -} - -message VerifyResponse { - // The verify process result - bool success = 1; - - // Optional error message if verification failed - optional string error_message = 2; -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package agntcy.dir.sign.v1; + +import "agntcy/dir/core/v1/record.proto"; +import "agntcy/dir/sign/v1/signature.proto"; + +// SignService provides methods to sign and verify records. +service SignService { + // Sign record using keyless OIDC based provider or using PEM-encoded private key with an optional passphrase + rpc Sign(SignRequest) returns (SignResponse); + + // Verify signed record using keyless OIDC based provider or using PEM-encoded formatted PEM public key encrypted + rpc Verify(VerifyRequest) returns (VerifyResponse); +} + +message SignRequest { + // Record reference to be signed + core.v1.RecordRef record_ref = 1; + + // Signing provider to use + SignRequestProvider provider = 2; +} + +message SignRequestProvider { + oneof request { + // Sign with OIDC provider + SignWithOIDC oidc = 1; + + // Sign with PEM-encoded public key + SignWithKey key = 2; + } +} + +message SignWithOIDC { + // List of sign options for OIDC + message SignOpts { + // Fulcio authority access URL (default value: https://fulcio.sigstage.dev) + optional string fulcio_url = 1; + + // Rekor validator access URL (default value: https://rekor.sigstage.dev) + optional string rekor_url = 2; + + // Timestamp authority access URL (default value: https://timestamp.sigstage.dev/api/v1/timestamp) + optional string timestamp_url = 3; + + // OIDC provider access URL (default value: https://oauth2.sigstage.dev/auth) + optional string oidc_provider_url = 4; + } + + // Token for OIDC provider + string id_token = 1; + + // Signing options for OIDC + SignOpts options = 2; +} + +message SignWithKey { + // Private key used for signing + bytes private_key = 1; + + // Password to unlock the private key + optional bytes password = 2; +} + +message SignResponse { + // Cryptographic signature of the record + Signature signature = 1; +} + +message VerifyRequest { + // Record reference to be verified + core.v1.RecordRef record_ref = 1; +} + +message VerifyResponse { + // The verify process result + bool success = 1; + + // Optional error message if verification failed + optional string error_message = 2; +} diff --git a/proto/agntcy/dir/sign/v1/signature.proto b/proto/agntcy/dir/sign/v1/signature.proto index decb65e62..1b47a020c 100644 --- a/proto/agntcy/dir/sign/v1/signature.proto +++ b/proto/agntcy/dir/sign/v1/signature.proto @@ -1,41 +1,41 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package agntcy.dir.sign.v1; - -// Signature is the signing data associated with a Record. -// Multiple signatures can be associated with a single Record, -// ie 1 record : N record signatures. -// -// Storage and management of signatures is provided via -// StoreService as a RecordReferrer object. -// -// Signature can be encoded into RecordReferrer object as follows: -// type = "agntcy.dir.sign.v1.Signature" -// data = Signature message encoded as JSON -message Signature { - // Metadata associated with the signature. - map annotations = 1; - - // Signing timestamp of the record in the RFC3339 format. - // Specs: https://www.rfc-editor.org/rfc/rfc3339.html - string signed_at = 2; - - // The signature algorithm used (e.g., "ECDSA_P256_SHA256"). - string algorithm = 3; - - // Base64-encoded signature. - string signature = 4; - - // Base64-encoded signing certificate. - string certificate = 5; - - // Type of the signature content bundle. - string content_type = 6; - - // Base64-encoded signature bundle produced by the signer. - // It is up to the client to interpret the content of the bundle. - string content_bundle = 7; -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package agntcy.dir.sign.v1; + +// Signature is the signing data associated with a Record. +// Multiple signatures can be associated with a single Record, +// ie 1 record : N record signatures. +// +// Storage and management of signatures is provided via +// StoreService as a RecordReferrer object. +// +// Signature can be encoded into RecordReferrer object as follows: +// type = "agntcy.dir.sign.v1.Signature" +// data = Signature message encoded as JSON +message Signature { + // Metadata associated with the signature. + map annotations = 1; + + // Signing timestamp of the record in the RFC3339 format. + // Specs: https://www.rfc-editor.org/rfc/rfc3339.html + string signed_at = 2; + + // The signature algorithm used (e.g., "ECDSA_P256_SHA256"). + string algorithm = 3; + + // Base64-encoded signature. + string signature = 4; + + // Base64-encoded signing certificate. + string certificate = 5; + + // Type of the signature content bundle. + string content_type = 6; + + // Base64-encoded signature bundle produced by the signer. + // It is up to the client to interpret the content of the bundle. + string content_bundle = 7; +} diff --git a/proto/agntcy/dir/store/v1/store_service.proto b/proto/agntcy/dir/store/v1/store_service.proto index 4e517cca9..5665109eb 100644 --- a/proto/agntcy/dir/store/v1/store_service.proto +++ b/proto/agntcy/dir/store/v1/store_service.proto @@ -1,78 +1,78 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package agntcy.dir.store.v1; - -import "agntcy/dir/core/v1/record.proto"; -import "google/protobuf/empty.proto"; - -// Defines an interface for content-addressable storage -// service for objects. -// -// Max object size: 4MB (to fully fit in a single request) -// Max metadata size: 100KB -// -// Store service can be implemented by various storage backends, -// such as local file system, OCI registry, etc. -// -// Middleware should be used to control who can perform these RPCs. -// Policies for the middleware can be handled via separate service. -// -// Each operation is performed sequentially, meaning that -// for the N-th request, N-th response will be returned. -// If an error occurs, the stream will be cancelled. -service StoreService { - // Push performs write operation for given records. - rpc Push(stream core.v1.Record) returns (stream core.v1.RecordRef); - - // Pull performs read operation for given records. - rpc Pull(stream core.v1.RecordRef) returns (stream core.v1.Record); - - // Lookup resolves basic metadata for the records. - rpc Lookup(stream core.v1.RecordRef) returns (stream core.v1.RecordMeta); - - // Remove performs delete operation for the records. - rpc Delete(stream core.v1.RecordRef) returns (google.protobuf.Empty); - - // PushReferrer performs write operation for record referrers. - rpc PushReferrer(stream PushReferrerRequest) returns (stream PushReferrerResponse); - - // PullReferrer performs read operation for record referrers. - rpc PullReferrer(stream PullReferrerRequest) returns (stream PullReferrerResponse); -} - -// PushReferrerRequest represents a record with optional OCI artifacts for push operations. -message PushReferrerRequest { - // Record reference - core.v1.RecordRef record_ref = 1; - - // RecordReferrer object to be stored for the record - core.v1.RecordReferrer referrer = 2; -} - -// PushReferrerResponse -message PushReferrerResponse { - // The push process result - bool success = 1; - - // Optional error message if push failed - optional string error_message = 2; -} - -// PullReferrerRequest represents a record with optional OCI artifacts for pull operations. -message PullReferrerRequest { - // Record reference - core.v1.RecordRef record_ref = 1; - - // Record referrer type to be pulled - // If not provided, all referrers will be pulled - optional string referrer_type = 2; -} - -// PullReferrerResponse is returned after successfully fetching a record referrer. -message PullReferrerResponse { - // RecordReferrer object associated with the record - core.v1.RecordReferrer referrer = 1; -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package agntcy.dir.store.v1; + +import "agntcy/dir/core/v1/record.proto"; +import "google/protobuf/empty.proto"; + +// Defines an interface for content-addressable storage +// service for objects. +// +// Max object size: 4MB (to fully fit in a single request) +// Max metadata size: 100KB +// +// Store service can be implemented by various storage backends, +// such as local file system, OCI registry, etc. +// +// Middleware should be used to control who can perform these RPCs. +// Policies for the middleware can be handled via separate service. +// +// Each operation is performed sequentially, meaning that +// for the N-th request, N-th response will be returned. +// If an error occurs, the stream will be cancelled. +service StoreService { + // Push performs write operation for given records. + rpc Push(stream core.v1.Record) returns (stream core.v1.RecordRef); + + // Pull performs read operation for given records. + rpc Pull(stream core.v1.RecordRef) returns (stream core.v1.Record); + + // Lookup resolves basic metadata for the records. + rpc Lookup(stream core.v1.RecordRef) returns (stream core.v1.RecordMeta); + + // Remove performs delete operation for the records. + rpc Delete(stream core.v1.RecordRef) returns (google.protobuf.Empty); + + // PushReferrer performs write operation for record referrers. + rpc PushReferrer(stream PushReferrerRequest) returns (stream PushReferrerResponse); + + // PullReferrer performs read operation for record referrers. + rpc PullReferrer(stream PullReferrerRequest) returns (stream PullReferrerResponse); +} + +// PushReferrerRequest represents a record with optional OCI artifacts for push operations. +message PushReferrerRequest { + // Record reference + core.v1.RecordRef record_ref = 1; + + // RecordReferrer object to be stored for the record + core.v1.RecordReferrer referrer = 2; +} + +// PushReferrerResponse +message PushReferrerResponse { + // The push process result + bool success = 1; + + // Optional error message if push failed + optional string error_message = 2; +} + +// PullReferrerRequest represents a record with optional OCI artifacts for pull operations. +message PullReferrerRequest { + // Record reference + core.v1.RecordRef record_ref = 1; + + // Record referrer type to be pulled + // If not provided, all referrers will be pulled + optional string referrer_type = 2; +} + +// PullReferrerResponse is returned after successfully fetching a record referrer. +message PullReferrerResponse { + // RecordReferrer object associated with the record + core.v1.RecordReferrer referrer = 1; +} diff --git a/proto/agntcy/dir/store/v1/sync_service.proto b/proto/agntcy/dir/store/v1/sync_service.proto index f993d09ca..5b2993898 100644 --- a/proto/agntcy/dir/store/v1/sync_service.proto +++ b/proto/agntcy/dir/store/v1/sync_service.proto @@ -1,168 +1,168 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -syntax = "proto3"; - -package agntcy.dir.store.v1; - -// SyncService provides functionality for synchronizing objects between Directory nodes. -// -// This service enables one-way synchronization from a remote Directory node to the local node, -// allowing distributed Directory instances to share and replicate objects. The service supports -// both on-demand synchronization and tracking of sync operations through their lifecycle. -service SyncService { - // CreateSync initiates a new synchronization operation from a remote Directory node. - // - // The operation is non-blocking and returns immediately with a sync ID that can be used - // to track progress and manage the sync operation. - rpc CreateSync(CreateSyncRequest) returns (CreateSyncResponse); - - // ListSyncs returns a stream of all sync operations known to the system. - // - // This includes active, completed, and failed synchronizations. - rpc ListSyncs(ListSyncsRequest) returns (stream ListSyncsItem); - - // GetSync retrieves detailed status information for a specific synchronization. - rpc GetSync(GetSyncRequest) returns (GetSyncResponse); - - // DeleteSync removes a synchronization operation from the system. - rpc DeleteSync(DeleteSyncRequest) returns (DeleteSyncResponse); - - // RequestRegistryCredentials requests registry credentials between two Directory nodes. - // - // This RPC allows a requesting node to authenticate with this node and obtain - // temporary registry credentials for secure Zot-based synchronization. - rpc RequestRegistryCredentials(RequestRegistryCredentialsRequest) returns (RequestRegistryCredentialsResponse); -} - -// CreateSyncRequest defines the parameters for creating a new synchronization operation. -// -// Currently supports basic synchronization of all objects from a remote Directory. -// Future versions may include additional options for filtering and scheduling capabilities. -message CreateSyncRequest { - // URL of the remote Registry to synchronize from. - // - // This should be a complete URL including protocol and port if non-standard. - // Examples: - // - "https://directory.example.com" - // - "http://localhost:8080" - // - "https://directory.example.com:9443" - string remote_directory_url = 1; - - // List of CIDs to synchronize from the remote Directory. - // If empty, all objects will be synchronized. - repeated string cids = 2; -} - -// CreateSyncResponse contains the result of creating a new synchronization operation. -message CreateSyncResponse { - // Unique identifier for the created synchronization operation. - // This ID can be used with other SyncService RPCs to monitor and manage the sync. - string sync_id = 1; -} - -// ListSyncsRequest specifies parameters for listing synchronization operations. -message ListSyncsRequest { - // Optional limit on the number of results to return. - optional uint32 limit = 2; - - // Optional offset for pagination of results. - optional uint32 offset = 3; -} - -// ListSyncItem represents a single synchronization in the list of all syncs. -message ListSyncsItem { - // Unique identifier of the synchronization operation. - string sync_id = 1; - - // Current status of the synchronization operation. - SyncStatus status = 2; - - // URL of the remote Directory being synchronized from. - string remote_directory_url = 3; -} - -// GetSyncRequest specifies which synchronization status to retrieve. -message GetSyncRequest { - // Unique identifier of the synchronization operation to query. - string sync_id = 1; -} - -// GetSyncResponse provides detailed information about a specific synchronization operation. -message GetSyncResponse { - // Unique identifier of the synchronization operation. - string sync_id = 1; - - // Current status of the synchronization operation. - SyncStatus status = 2; - - // URL of the remote Directory node being synchronized from. - string remote_directory_url = 3; - - // Timestamp when the synchronization operation was created in the RFC3339 format. - // Specs: https://www.rfc-editor.org/rfc/rfc3339.html - string created_time = 4; - - // Timestamp of the most recent status update for this synchronization in the RFC3339 format. - string last_update_time = 5; -} - -// DeleteSyncRequest specifies which synchronization to delete. -message DeleteSyncRequest { - // Unique identifier of the synchronization operation to delete. - string sync_id = 1; -} - -// DeleteSyncResponse -message DeleteSyncResponse { -} - -message RequestRegistryCredentialsRequest { - // Identity of the requesting node - // For example: spiffe://example.org/service/foo - string requesting_node_id = 1; -} - -message RequestRegistryCredentialsResponse { - // Success status of the credential negotiation - bool success = 1; - - // Error message if negotiation failed - string error_message = 2; - - // URL of the remote Registry being synchronized from. - string remote_registry_url = 3; - - // Registry credentials (oneof based on credential type) - oneof credentials { - BasicAuthCredentials basic_auth = 4; - // CertificateCredentials certificate = 5; - } -} - -// Supporting credential type definitions -message BasicAuthCredentials { - string username = 1; - string password = 2; -} - -// SyncStatus enumeration defines the possible states of a synchronization operation. -enum SyncStatus { - // Default/unset status - should not be used in practice - SYNC_STATUS_UNSPECIFIED = 0; - - // Sync operation has been created but not yet started - SYNC_STATUS_PENDING = 1; - - // Sync operation is actively discovering and transferring objects - SYNC_STATUS_IN_PROGRESS = 2; - - // Sync operation encountered an error and stopped - SYNC_STATUS_FAILED = 3; - - // Sync operation has been marked for deletion but cleanup not yet started - SYNC_STATUS_DELETE_PENDING = 4; - - // Sync operation has been successfully deleted and cleaned up - SYNC_STATUS_DELETED = 5; -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package agntcy.dir.store.v1; + +// SyncService provides functionality for synchronizing objects between Directory nodes. +// +// This service enables one-way synchronization from a remote Directory node to the local node, +// allowing distributed Directory instances to share and replicate objects. The service supports +// both on-demand synchronization and tracking of sync operations through their lifecycle. +service SyncService { + // CreateSync initiates a new synchronization operation from a remote Directory node. + // + // The operation is non-blocking and returns immediately with a sync ID that can be used + // to track progress and manage the sync operation. + rpc CreateSync(CreateSyncRequest) returns (CreateSyncResponse); + + // ListSyncs returns a stream of all sync operations known to the system. + // + // This includes active, completed, and failed synchronizations. + rpc ListSyncs(ListSyncsRequest) returns (stream ListSyncsItem); + + // GetSync retrieves detailed status information for a specific synchronization. + rpc GetSync(GetSyncRequest) returns (GetSyncResponse); + + // DeleteSync removes a synchronization operation from the system. + rpc DeleteSync(DeleteSyncRequest) returns (DeleteSyncResponse); + + // RequestRegistryCredentials requests registry credentials between two Directory nodes. + // + // This RPC allows a requesting node to authenticate with this node and obtain + // temporary registry credentials for secure Zot-based synchronization. + rpc RequestRegistryCredentials(RequestRegistryCredentialsRequest) returns (RequestRegistryCredentialsResponse); +} + +// CreateSyncRequest defines the parameters for creating a new synchronization operation. +// +// Currently supports basic synchronization of all objects from a remote Directory. +// Future versions may include additional options for filtering and scheduling capabilities. +message CreateSyncRequest { + // URL of the remote Registry to synchronize from. + // + // This should be a complete URL including protocol and port if non-standard. + // Examples: + // - "https://directory.example.com" + // - "http://localhost:8080" + // - "https://directory.example.com:9443" + string remote_directory_url = 1; + + // List of CIDs to synchronize from the remote Directory. + // If empty, all objects will be synchronized. + repeated string cids = 2; +} + +// CreateSyncResponse contains the result of creating a new synchronization operation. +message CreateSyncResponse { + // Unique identifier for the created synchronization operation. + // This ID can be used with other SyncService RPCs to monitor and manage the sync. + string sync_id = 1; +} + +// ListSyncsRequest specifies parameters for listing synchronization operations. +message ListSyncsRequest { + // Optional limit on the number of results to return. + optional uint32 limit = 2; + + // Optional offset for pagination of results. + optional uint32 offset = 3; +} + +// ListSyncItem represents a single synchronization in the list of all syncs. +message ListSyncsItem { + // Unique identifier of the synchronization operation. + string sync_id = 1; + + // Current status of the synchronization operation. + SyncStatus status = 2; + + // URL of the remote Directory being synchronized from. + string remote_directory_url = 3; +} + +// GetSyncRequest specifies which synchronization status to retrieve. +message GetSyncRequest { + // Unique identifier of the synchronization operation to query. + string sync_id = 1; +} + +// GetSyncResponse provides detailed information about a specific synchronization operation. +message GetSyncResponse { + // Unique identifier of the synchronization operation. + string sync_id = 1; + + // Current status of the synchronization operation. + SyncStatus status = 2; + + // URL of the remote Directory node being synchronized from. + string remote_directory_url = 3; + + // Timestamp when the synchronization operation was created in the RFC3339 format. + // Specs: https://www.rfc-editor.org/rfc/rfc3339.html + string created_time = 4; + + // Timestamp of the most recent status update for this synchronization in the RFC3339 format. + string last_update_time = 5; +} + +// DeleteSyncRequest specifies which synchronization to delete. +message DeleteSyncRequest { + // Unique identifier of the synchronization operation to delete. + string sync_id = 1; +} + +// DeleteSyncResponse +message DeleteSyncResponse { +} + +message RequestRegistryCredentialsRequest { + // Identity of the requesting node + // For example: spiffe://example.org/service/foo + string requesting_node_id = 1; +} + +message RequestRegistryCredentialsResponse { + // Success status of the credential negotiation + bool success = 1; + + // Error message if negotiation failed + string error_message = 2; + + // URL of the remote Registry being synchronized from. + string remote_registry_url = 3; + + // Registry credentials (oneof based on credential type) + oneof credentials { + BasicAuthCredentials basic_auth = 4; + // CertificateCredentials certificate = 5; + } +} + +// Supporting credential type definitions +message BasicAuthCredentials { + string username = 1; + string password = 2; +} + +// SyncStatus enumeration defines the possible states of a synchronization operation. +enum SyncStatus { + // Default/unset status - should not be used in practice + SYNC_STATUS_UNSPECIFIED = 0; + + // Sync operation has been created but not yet started + SYNC_STATUS_PENDING = 1; + + // Sync operation is actively discovering and transferring objects + SYNC_STATUS_IN_PROGRESS = 2; + + // Sync operation encountered an error and stopped + SYNC_STATUS_FAILED = 3; + + // Sync operation has been marked for deletion but cleanup not yet started + SYNC_STATUS_DELETE_PENDING = 4; + + // Sync operation has been successfully deleted and cleaned up + SYNC_STATUS_DELETED = 5; +} diff --git a/proto/buf.gen.yaml b/proto/buf.gen.yaml index b9aecb5f0..0e07b3b86 100644 --- a/proto/buf.gen.yaml +++ b/proto/buf.gen.yaml @@ -1,54 +1,54 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -version: v2 -managed: - enabled: true - disable: - - module: buf.build/googleapis/googleapis - override: - # Map specific paths to desired Go packages - - path: agntcy/dir/core/v1 - file_option: go_package - value: github.com/agntcy/dir/api/core/v1 - - path: agntcy/dir/routing/v1 - file_option: go_package - value: github.com/agntcy/dir/api/routing/v1 - - path: agntcy/dir/search/v1 - file_option: go_package - value: github.com/agntcy/dir/api/search/v1 - - path: agntcy/dir/sign/v1 - file_option: go_package - value: github.com/agntcy/dir/api/sign/v1 - - path: agntcy/dir/store/v1 - file_option: go_package - value: github.com/agntcy/dir/api/store/v1 - - path: agntcy/dir/events/v1 - file_option: go_package - value: github.com/agntcy/dir/api/events/v1 -plugins: - # Stubs for Golang - - remote: buf.build/protocolbuffers/go:v1.36.5 - out: ../api - opt: - - module=github.com/agntcy/dir/api - - remote: buf.build/grpc/go:v1.5.1 - out: ../api - opt: - - module=github.com/agntcy/dir/api - - require_unimplemented_servers=false - - use_generic_streams_experimental=false - - # Stubs for Python - - remote: buf.build/protocolbuffers/python:v32.1 - out: ../sdk/dir-py/ - - remote: buf.build/protocolbuffers/pyi:v27.4 - out: ../sdk/dir-py/ - - remote: buf.build/grpc/python:v1.75.1 - out: ../sdk/dir-py/ - - # Stubs for Javascript/Typescript - - remote: buf.build/bufbuild/es:v2.9.0 - out: ../sdk/dir-js/src/models/ - opt: - - import_extension=js +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +version: v2 +managed: + enabled: true + disable: + - module: buf.build/googleapis/googleapis + override: + # Map specific paths to desired Go packages + - path: agntcy/dir/core/v1 + file_option: go_package + value: github.com/agntcy/dir/api/core/v1 + - path: agntcy/dir/routing/v1 + file_option: go_package + value: github.com/agntcy/dir/api/routing/v1 + - path: agntcy/dir/search/v1 + file_option: go_package + value: github.com/agntcy/dir/api/search/v1 + - path: agntcy/dir/sign/v1 + file_option: go_package + value: github.com/agntcy/dir/api/sign/v1 + - path: agntcy/dir/store/v1 + file_option: go_package + value: github.com/agntcy/dir/api/store/v1 + - path: agntcy/dir/events/v1 + file_option: go_package + value: github.com/agntcy/dir/api/events/v1 +plugins: + # Stubs for Golang + - remote: buf.build/protocolbuffers/go:v1.36.5 + out: ../api + opt: + - module=github.com/agntcy/dir/api + - remote: buf.build/grpc/go:v1.5.1 + out: ../api + opt: + - module=github.com/agntcy/dir/api + - require_unimplemented_servers=false + - use_generic_streams_experimental=false + + # Stubs for Python + - remote: buf.build/protocolbuffers/python:v32.1 + out: ../sdk/dir-py/ + - remote: buf.build/protocolbuffers/pyi:v27.4 + out: ../sdk/dir-py/ + - remote: buf.build/grpc/python:v1.75.1 + out: ../sdk/dir-py/ + + # Stubs for Javascript/Typescript + - remote: buf.build/bufbuild/es:v2.9.0 + out: ../sdk/dir-js/src/models/ + opt: + - import_extension=js diff --git a/proto/buf.lock b/proto/buf.lock index 4f98143f5..5117fc466 100644 --- a/proto/buf.lock +++ b/proto/buf.lock @@ -1,2 +1,2 @@ -# Generated by buf. DO NOT EDIT. -version: v2 +# Generated by buf. DO NOT EDIT. +version: v2 diff --git a/proto/buf.yaml b/proto/buf.yaml index 721c89eeb..92e408f00 100644 --- a/proto/buf.yaml +++ b/proto/buf.yaml @@ -1,31 +1,31 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -version: v2 -lint: - disallow_comment_ignores: true - use: - - STANDARD - ignore_only: - ENUM_ZERO_VALUE_SUFFIX: - - agntcy/dir/routing/v1/peer.proto - RPC_REQUEST_STANDARD_NAME: - - agntcy/dir/routing/v1/publication_service.proto - - agntcy/dir/routing/v1/routing_service.proto - - agntcy/dir/store/v1/store_service.proto - RPC_RESPONSE_STANDARD_NAME: - - agntcy/dir/routing/v1/publication_service.proto - - agntcy/dir/routing/v1/routing_service.proto - - agntcy/dir/store/v1/store_service.proto - - agntcy/dir/store/v1/sync_service.proto - RPC_REQUEST_RESPONSE_UNIQUE: - - agntcy/dir/routing/v1/publication_service.proto - - agntcy/dir/routing/v1/routing_service.proto - - agntcy/dir/store/v1/store_service.proto - enum_zero_value_suffix: _UNSPECIFIED -breaking: - use: - - WIRE_JSON -modules: - - path: . - name: buf.build/agntcy/dir +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +version: v2 +lint: + disallow_comment_ignores: true + use: + - STANDARD + ignore_only: + ENUM_ZERO_VALUE_SUFFIX: + - agntcy/dir/routing/v1/peer.proto + RPC_REQUEST_STANDARD_NAME: + - agntcy/dir/routing/v1/publication_service.proto + - agntcy/dir/routing/v1/routing_service.proto + - agntcy/dir/store/v1/store_service.proto + RPC_RESPONSE_STANDARD_NAME: + - agntcy/dir/routing/v1/publication_service.proto + - agntcy/dir/routing/v1/routing_service.proto + - agntcy/dir/store/v1/store_service.proto + - agntcy/dir/store/v1/sync_service.proto + RPC_REQUEST_RESPONSE_UNIQUE: + - agntcy/dir/routing/v1/publication_service.proto + - agntcy/dir/routing/v1/routing_service.proto + - agntcy/dir/store/v1/store_service.proto + enum_zero_value_suffix: _UNSPECIFIED +breaking: + use: + - WIRE_JSON +modules: + - path: . + name: buf.build/agntcy/dir diff --git a/scripts/run_trust_ranking.py b/scripts/run_trust_ranking.py new file mode 100644 index 000000000..ad3d8addc --- /dev/null +++ b/scripts/run_trust_ranking.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 +""" +Run the reference trust ranking demo. + +Example: + python scripts/run_trust_ranking.py --top 10 +""" + +from __future__ import annotations + +import argparse +import json +import sys +from pathlib import Path + +# Allow running from repo root without installing as a package +REPO_ROOT = Path(__file__).resolve().parents[1] +sys.path.insert(0, str(REPO_ROOT)) + +from extensions.trust_ranking.reference_ranker import rank_agents # noqa: E402 + + +def _load_agents(path: Path) -> list[dict]: + data = json.loads(path.read_text(encoding="utf-8")) + agents = data.get("agents") + if not isinstance(agents, list): + raise ValueError("Input JSON must contain an 'agents' list") + return agents + + +def main() -> int: + parser = argparse.ArgumentParser(description="Trust ranking PoC runner (reference only)") + parser.add_argument( + "--input", + default="examples/directory_sample.json", + help="Path to JSON file containing {'agents': [...]}", + ) + parser.add_argument("--top", type=int, default=10, help="How many results to print") + parser.add_argument("--json", action="store_true", help="Output full ranked list as JSON") + args = parser.parse_args() + + input_path = (REPO_ROOT / args.input).resolve() + if not input_path.exists(): + print(f"ERROR: input file not found: {input_path}", file=sys.stderr) + return 2 + + try: + agents = _load_agents(input_path) + ranked = rank_agents(agents) + except Exception as e: + print(f"ERROR: {e}", file=sys.stderr) + return 2 + + if args.json: + print(json.dumps({"agents": ranked}, indent=2, ensure_ascii=False)) + return 0 + + top_n = max(0, min(args.top, len(ranked))) + + print("Trust Ranking PoC (reference only)") + print(f"Input: {args.input}") + print(f"Results: top {top_n} of {len(ranked)}") + print("") + + for i, a in enumerate(ranked[:top_n], start=1): + trust = a.get("trust") or {} + score = trust.get("score", "n/a") + band = trust.get("band", "n/a") + reasons = trust.get("reasons", []) + name = a.get("name") or a.get("id") or "(unnamed)" + url = a.get("url") or "" + + reasons_str = "; ".join(reasons) if isinstance(reasons, list) else str(reasons) + + print(f"{i:>2}. {name}") + print(f" id: {a.get('id')}") + print(f" url: {url}") + print(f" trust: {score} ({band})") + print(f"reason: {reasons_str}") + print("") + + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/sdk/dir-js/.gitignore b/sdk/dir-js/.gitignore index f4e2c6d6b..eccb865c2 100644 --- a/sdk/dir-js/.gitignore +++ b/sdk/dir-js/.gitignore @@ -1,3 +1,3 @@ -node_modules/ -dist/ -*.tsbuildinfo +node_modules/ +dist/ +*.tsbuildinfo diff --git a/sdk/dir-js/.npmrc b/sdk/dir-js/.npmrc index 3bd5c96a6..439bfab81 100644 --- a/sdk/dir-js/.npmrc +++ b/sdk/dir-js/.npmrc @@ -1,3 +1,3 @@ -//registry.npmjs.org/:_auth=$NODE_AUTH_TOKEN -@agntcy:registry=https://registry.npmjs.org/ -@buf:registry=https://buf.build/gen/npm/v1/ +//registry.npmjs.org/:_auth=$NODE_AUTH_TOKEN +@agntcy:registry=https://registry.npmjs.org/ +@buf:registry=https://buf.build/gen/npm/v1/ diff --git a/sdk/dir-js/.prettierrc b/sdk/dir-js/.prettierrc index 24ac03846..6481fb0ad 100644 --- a/sdk/dir-js/.prettierrc +++ b/sdk/dir-js/.prettierrc @@ -1,16 +1,16 @@ -{ - "printWidth" : 80, - "tabWidth" : 2, - "useTabs" : false, - "semi" : true, - "singleQuote" : true, - "quoteProps" : "preserve", - "bracketSpacing" : false, - "trailingComma" : "all", - "arrowParens" : "always", - "embeddedLanguageFormatting" : "off", - "bracketSameLine" : true, - "singleAttributePerLine" : false, - "htmlWhitespaceSensitivity" : "strict", - "plugins": ["prettier-plugin-organize-imports"], -} +{ + "printWidth" : 80, + "tabWidth" : 2, + "useTabs" : false, + "semi" : true, + "singleQuote" : true, + "quoteProps" : "preserve", + "bracketSpacing" : false, + "trailingComma" : "all", + "arrowParens" : "always", + "embeddedLanguageFormatting" : "off", + "bracketSameLine" : true, + "singleAttributePerLine" : false, + "htmlWhitespaceSensitivity" : "strict", + "plugins": ["prettier-plugin-organize-imports"], +} diff --git a/sdk/dir-js/README.md b/sdk/dir-js/README.md index 3c34c4243..9d364d5e7 100644 --- a/sdk/dir-js/README.md +++ b/sdk/dir-js/README.md @@ -1,147 +1,147 @@ -# Directory JavaScript SDK - -## Overview - -Dir JavaScript SDK provides a simple way to interact with the Directory API. -It allows developers to integrate and use Directory functionality from their applications with ease. -The SDK supports both JavaScript and TypeScript applications. - -**Note for users:** The SDK is intended for use in Node.js applications and will not work in Web applications. - -## Features - -The Directory SDK provides comprehensive access to all Directory APIs with a simple, intuitive interface: - -### **Store API** -- **Record Management**: Push records to the store and pull them by reference -- **Metadata Operations**: Look up record metadata without downloading full content -- **Data Lifecycle**: Delete records permanently from the store -- **Referrer Support**: Push and pull artifacts for existing records -- **Sync Management**: Manage storage synchronization policies between Directory servers - -### **Search API** -- **Flexible Search**: Search stored records using text, semantic, and structured queries -- **Advanced Filtering**: Filter results by metadata, content type, and other criteria - -### **Routing API** -- **Network Publishing**: Publish records to make them discoverable across the network -- **Content Discovery**: List and query published records across the network -- **Network Management**: Unpublish records to remove them from network discovery - -### **Signing and Verification** -- **Local Signing**: Sign records locally using private keys or OIDC-based authentication. -Requires [dirctl](https://github.com/agntcy/dir/releases) binary to perform signing. -- **Remote Verification**: Verify record signatures using the Directory gRPC API - -### **Developer Experience** -- **Type Safety**: Full type hints for better IDE support and fewer runtime errors -- **Async Support**: Non-blocking operations with streaming responses for large datasets -- **Error Handling**: Comprehensive gRPC error handling with detailed error messages -- **Configuration**: Flexible configuration via environment variables or direct instantiation - -## Installation - -Install the SDK using one of available JS package managers like [npm](https://www.npmjs.com/) - -1. Initialize the project: -```bash -npm init -y -``` - -2. Add the SDK to your project: -```bash -npm install agntcy-dir -``` - -## Configuration - -The SDK can be configured via environment variables or direct instantiation: - -```js -// Environment variables (insecure mode) -process.env.DIRECTORY_CLIENT_SERVER_ADDRESS = "localhost:8888"; -process.env.DIRCTL_PATH = "/path/to/dirctl"; - -// Environment variables (X.509 authentication) -process.env.DIRECTORY_CLIENT_SERVER_ADDRESS = "localhost:8888"; -process.env.DIRECTORY_CLIENT_AUTH_MODE = "x509"; -process.env.DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH = "/tmp/agent.sock"; - -// Environment variables (JWT authentication) -process.env.DIRECTORY_CLIENT_SERVER_ADDRESS = "localhost:8888"; -process.env.DIRECTORY_CLIENT_AUTH_MODE = "jwt"; -process.env.DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH = "/tmp/agent.sock"; -process.env.DIRECTORY_CLIENT_JWT_AUDIENCE = "spiffe://example.org/dir-server"; - -// Or configure directly -import {Config, Client} from 'agntcy-dir'; - -// Insecure mode (default, for development only) -const config = new Config( - serverAddress="localhost:8888", - dirctlPath="/usr/local/bin/dirctl" -); -const client = new Client(config); - -// X.509 authentication with SPIRE -const x509Config = new Config( - "localhost:8888", - "/usr/local/bin/dirctl", - "/tmp/agent.sock", // SPIFFE socket path - "x509" // auth mode -); -const x509Transport = await Client.createGRPCTransport(x509Config); -const x509Client = new Client(x509Config, x509Transport); - -// JWT authentication with SPIRE -const jwtConfig = new Config( - "localhost:8888", - "/usr/local/bin/dirctl", - "/tmp/agent.sock", // SPIFFE socket path - "jwt", // auth mode - "spiffe://example.org/dir-server" // JWT audience -); -const jwtTransport = await Client.createGRPCTransport(jwtConfig); -const jwtClient = new Client(jwtConfig, jwtTransport); -``` - -## Getting Started - -### Prerequisites - -- [NodeJS](https://nodejs.org/en/) - JavaScript runtime -- [npm](https://www.npmjs.com/) - Package manager -- [dirctl](https://github.com/agntcy/dir/releases) - Directory CLI binary -- Directory server instance (see setup below) - -### 1. Server Setup - -**Option A: Local Development Server** - -```bash -# Clone the repository and start the server using Taskfile -task server:start -``` - -**Option B: Custom Server** - -```bash -# Set your Directory server address -export DIRECTORY_CLIENT_SERVER_ADDRESS="your-server:8888" -``` - -### 2. SDK Installation - -```bash -# Add the Directory SDK -npm install agntcy-dir -``` - -### Usage Examples - -See the [Example JavaScript Project](../examples/example-js/) for a complete working example that demonstrates all SDK features. - -```bash -npm install -npm run example -``` +# Directory JavaScript SDK + +## Overview + +Dir JavaScript SDK provides a simple way to interact with the Directory API. +It allows developers to integrate and use Directory functionality from their applications with ease. +The SDK supports both JavaScript and TypeScript applications. + +**Note for users:** The SDK is intended for use in Node.js applications and will not work in Web applications. + +## Features + +The Directory SDK provides comprehensive access to all Directory APIs with a simple, intuitive interface: + +### **Store API** +- **Record Management**: Push records to the store and pull them by reference +- **Metadata Operations**: Look up record metadata without downloading full content +- **Data Lifecycle**: Delete records permanently from the store +- **Referrer Support**: Push and pull artifacts for existing records +- **Sync Management**: Manage storage synchronization policies between Directory servers + +### **Search API** +- **Flexible Search**: Search stored records using text, semantic, and structured queries +- **Advanced Filtering**: Filter results by metadata, content type, and other criteria + +### **Routing API** +- **Network Publishing**: Publish records to make them discoverable across the network +- **Content Discovery**: List and query published records across the network +- **Network Management**: Unpublish records to remove them from network discovery + +### **Signing and Verification** +- **Local Signing**: Sign records locally using private keys or OIDC-based authentication. +Requires [dirctl](https://github.com/agntcy/dir/releases) binary to perform signing. +- **Remote Verification**: Verify record signatures using the Directory gRPC API + +### **Developer Experience** +- **Type Safety**: Full type hints for better IDE support and fewer runtime errors +- **Async Support**: Non-blocking operations with streaming responses for large datasets +- **Error Handling**: Comprehensive gRPC error handling with detailed error messages +- **Configuration**: Flexible configuration via environment variables or direct instantiation + +## Installation + +Install the SDK using one of available JS package managers like [npm](https://www.npmjs.com/) + +1. Initialize the project: +```bash +npm init -y +``` + +2. Add the SDK to your project: +```bash +npm install agntcy-dir +``` + +## Configuration + +The SDK can be configured via environment variables or direct instantiation: + +```js +// Environment variables (insecure mode) +process.env.DIRECTORY_CLIENT_SERVER_ADDRESS = "localhost:8888"; +process.env.DIRCTL_PATH = "/path/to/dirctl"; + +// Environment variables (X.509 authentication) +process.env.DIRECTORY_CLIENT_SERVER_ADDRESS = "localhost:8888"; +process.env.DIRECTORY_CLIENT_AUTH_MODE = "x509"; +process.env.DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH = "/tmp/agent.sock"; + +// Environment variables (JWT authentication) +process.env.DIRECTORY_CLIENT_SERVER_ADDRESS = "localhost:8888"; +process.env.DIRECTORY_CLIENT_AUTH_MODE = "jwt"; +process.env.DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH = "/tmp/agent.sock"; +process.env.DIRECTORY_CLIENT_JWT_AUDIENCE = "spiffe://example.org/dir-server"; + +// Or configure directly +import {Config, Client} from 'agntcy-dir'; + +// Insecure mode (default, for development only) +const config = new Config( + serverAddress="localhost:8888", + dirctlPath="/usr/local/bin/dirctl" +); +const client = new Client(config); + +// X.509 authentication with SPIRE +const x509Config = new Config( + "localhost:8888", + "/usr/local/bin/dirctl", + "/tmp/agent.sock", // SPIFFE socket path + "x509" // auth mode +); +const x509Transport = await Client.createGRPCTransport(x509Config); +const x509Client = new Client(x509Config, x509Transport); + +// JWT authentication with SPIRE +const jwtConfig = new Config( + "localhost:8888", + "/usr/local/bin/dirctl", + "/tmp/agent.sock", // SPIFFE socket path + "jwt", // auth mode + "spiffe://example.org/dir-server" // JWT audience +); +const jwtTransport = await Client.createGRPCTransport(jwtConfig); +const jwtClient = new Client(jwtConfig, jwtTransport); +``` + +## Getting Started + +### Prerequisites + +- [NodeJS](https://nodejs.org/en/) - JavaScript runtime +- [npm](https://www.npmjs.com/) - Package manager +- [dirctl](https://github.com/agntcy/dir/releases) - Directory CLI binary +- Directory server instance (see setup below) + +### 1. Server Setup + +**Option A: Local Development Server** + +```bash +# Clone the repository and start the server using Taskfile +task server:start +``` + +**Option B: Custom Server** + +```bash +# Set your Directory server address +export DIRECTORY_CLIENT_SERVER_ADDRESS="your-server:8888" +``` + +### 2. SDK Installation + +```bash +# Add the Directory SDK +npm install agntcy-dir +``` + +### Usage Examples + +See the [Example JavaScript Project](../examples/example-js/) for a complete working example that demonstrates all SDK features. + +```bash +npm install +npm run example +``` diff --git a/sdk/dir-js/api-extractor.json b/sdk/dir-js/api-extractor.json index 713215cbc..25f31d89b 100644 --- a/sdk/dir-js/api-extractor.json +++ b/sdk/dir-js/api-extractor.json @@ -1,422 +1,422 @@ -/** - * Config file for API Extractor. For more info, please visit: https://api-extractor.com - */ -{ - "$schema": "https://developer.microsoft.com/json-schemas/api-extractor/v7/api-extractor.schema.json", - /** - * Optionally specifies another JSON config file that this file extends from. This provides a way for - * standard settings to be shared across multiple projects. - * - * If the path starts with "./" or "../", the path is resolved relative to the folder of the file that contains - * the "extends" field. Otherwise, the first path segment is interpreted as an NPM package name, and will be - * resolved using NodeJS require(). - * - * SUPPORTED TOKENS: none - * DEFAULT VALUE: "" - */ - // "extends": "./shared/api-extractor-base.json" - // "extends": "my-package/include/api-extractor-base.json" - /** - * Determines the "" token that can be used with other config file settings. The project folder - * typically contains the tsconfig.json and package.json config files, but the path is user-defined. - * - * The path is resolved relative to the folder of the config file that contains the setting. - * - * The default value for "projectFolder" is the token "", which means the folder is determined by traversing - * parent folders, starting from the folder containing api-extractor.json, and stopping at the first folder - * that contains a tsconfig.json file. If a tsconfig.json file cannot be found in this way, then an error - * will be reported. - * - * SUPPORTED TOKENS: - * DEFAULT VALUE: "" - */ - // "projectFolder": "..", - /** - * (REQUIRED) Specifies the .d.ts file to be used as the starting point for analysis. API Extractor - * analyzes the symbols exported by this module. - * - * The file extension must be ".d.ts" and not ".ts". - * - * The path is resolved relative to the folder of the config file that contains the setting; to change this, - * prepend a folder token such as "". - * - * SUPPORTED TOKENS: , , - */ - "mainEntryPointFilePath": "/dist/src/index.d.ts", - /** - * A list of NPM package names whose exports should be treated as part of this package. - * - * For example, suppose that Webpack is used to generate a distributed bundle for the project "library1", - * and another NPM package "library2" is embedded in this bundle. Some types from library2 may become part - * of the exported API for library1, but by default API Extractor would generate a .d.ts rollup that explicitly - * imports library2. To avoid this, we might specify: - * - * "bundledPackages": [ "library2" ], - * - * This would direct API Extractor to embed those types directly in the .d.ts rollup, as if they had been - * local files for library1. - * - * The "bundledPackages" elements may specify glob patterns using minimatch syntax. To ensure deterministic - * output, globs are expanded by matching explicitly declared top-level dependencies only. For example, - * the pattern below will NOT match "@my-company/example" unless it appears in a field such as "dependencies" - * or "devDependencies" of the project's package.json file: - * - * "bundledPackages": [ "@my-company/*" ], - */ - "bundledPackages": [], - /** - * Specifies what type of newlines API Extractor should use when writing output files. By default, the output files - * will be written with Windows-style newlines. To use POSIX-style newlines, specify "lf" instead. - * To use the OS's default newline kind, specify "os". - * - * DEFAULT VALUE: "crlf" - */ - // "newlineKind": "crlf", - /** - * Specifies how API Extractor sorts members of an enum when generating the .api.json file. By default, the output - * files will be sorted alphabetically, which is "by-name". To keep the ordering in the source code, specify - * "preserve". - * - * DEFAULT VALUE: "by-name" - */ - // "enumMemberOrder": "by-name", - /** - * Set to true when invoking API Extractor's test harness. When `testMode` is true, the `toolVersion` field in the - * .api.json file is assigned an empty string to prevent spurious diffs in output files tracked for tests. - * - * DEFAULT VALUE: "false" - */ - // "testMode": false, - /** - * Determines how the TypeScript compiler engine will be invoked by API Extractor. - */ - "compiler": { - /** - * Specifies the path to the tsconfig.json file to be used by API Extractor when analyzing the project. - * - * The path is resolved relative to the folder of the config file that contains the setting; to change this, - * prepend a folder token such as "". - * - * Note: This setting will be ignored if "overrideTsconfig" is used. - * - * SUPPORTED TOKENS: , , - * DEFAULT VALUE: "/tsconfig.json" - */ - // "tsconfigFilePath": "/tsconfig.json", - /** - * Provides a compiler configuration that will be used instead of reading the tsconfig.json file from disk. - * The object must conform to the TypeScript tsconfig schema: - * - * http://json.schemastore.org/tsconfig - * - * If omitted, then the tsconfig.json file will be read from the "projectFolder". - * - * DEFAULT VALUE: no overrideTsconfig section - */ - // "overrideTsconfig": { - // . . . - // } - /** - * This option causes the compiler to be invoked with the --skipLibCheck option. This option is not recommended - * and may cause API Extractor to produce incomplete or incorrect declarations, but it may be required when - * dependencies contain declarations that are incompatible with the TypeScript engine that API Extractor uses - * for its analysis. Where possible, the underlying issue should be fixed rather than relying on skipLibCheck. - * - * DEFAULT VALUE: false - */ - // "skipLibCheck": true, - }, - /** - * Configures how the API report file (*.api.md) will be generated. - */ - "apiReport": { - /** - * (REQUIRED) Whether to generate an API report. - */ - "enabled": false - /** - * The base filename for the API report files, to be combined with "reportFolder" or "reportTempFolder" - * to produce the full file path. The "reportFileName" should not include any path separators such as - * "\" or "/". The "reportFileName" should not include a file extension, since API Extractor will automatically - * append an appropriate file extension such as ".api.md". If the "reportVariants" setting is used, then the - * file extension includes the variant name, for example "my-report.public.api.md" or "my-report.beta.api.md". - * The "complete" variant always uses the simple extension "my-report.api.md". - * - * Previous versions of API Extractor required "reportFileName" to include the ".api.md" extension explicitly; - * for backwards compatibility, that is still accepted but will be discarded before applying the above rules. - * - * SUPPORTED TOKENS: , - * DEFAULT VALUE: "" - */ - // "reportFileName": "", - /** - * To support different approval requirements for different API levels, multiple "variants" of the API report can - * be generated. The "reportVariants" setting specifies a list of variants to be generated. If omitted, - * by default only the "complete" variant will be generated, which includes all @internal, @alpha, @beta, - * and @public items. Other possible variants are "alpha" (@alpha + @beta + @public), "beta" (@beta + @public), - * and "public" (@public only). - * - * DEFAULT VALUE: [ "complete" ] - */ - // "reportVariants": ["public", "beta"], - /** - * Specifies the folder where the API report file is written. The file name portion is determined by - * the "reportFileName" setting. - * - * The API report file is normally tracked by Git. Changes to it can be used to trigger a branch policy, - * e.g. for an API review. - * - * The path is resolved relative to the folder of the config file that contains the setting; to change this, - * prepend a folder token such as "". - * - * SUPPORTED TOKENS: , , - * DEFAULT VALUE: "/etc/" - */ - // "reportFolder": "/etc/", - /** - * Specifies the folder where the temporary report file is written. The file name portion is determined by - * the "reportFileName" setting. - * - * After the temporary file is written to disk, it is compared with the file in the "reportFolder". - * If they are different, a production build will fail. - * - * The path is resolved relative to the folder of the config file that contains the setting; to change this, - * prepend a folder token such as "". - * - * SUPPORTED TOKENS: , , - * DEFAULT VALUE: "/temp/" - */ - // "reportTempFolder": "/temp/", - /** - * Whether "forgotten exports" should be included in the API report file. Forgotten exports are declarations - * flagged with `ae-forgotten-export` warnings. See https://api-extractor.com/pages/messages/ae-forgotten-export/ to - * learn more. - * - * DEFAULT VALUE: "false" - */ - // "includeForgottenExports": false - }, - /** - * Configures how the doc model file (*.api.json) will be generated. - */ - "docModel": { - /** - * (REQUIRED) Whether to generate a doc model file. - */ - "enabled": false - /** - * The output path for the doc model file. The file extension should be ".api.json". - * - * The path is resolved relative to the folder of the config file that contains the setting; to change this, - * prepend a folder token such as "". - * - * SUPPORTED TOKENS: , , - * DEFAULT VALUE: "/temp/.api.json" - */ - // "apiJsonFilePath": "/temp/.api.json", - /** - * Whether "forgotten exports" should be included in the doc model file. Forgotten exports are declarations - * flagged with `ae-forgotten-export` warnings. See https://api-extractor.com/pages/messages/ae-forgotten-export/ to - * learn more. - * - * DEFAULT VALUE: "false" - */ - // "includeForgottenExports": false, - /** - * The base URL where the project's source code can be viewed on a website such as GitHub or - * Azure DevOps. This URL path corresponds to the `` path on disk. - * - * This URL is concatenated with the file paths serialized to the doc model to produce URL file paths to individual API items. - * For example, if the `projectFolderUrl` is "https://github.com/microsoft/rushstack/tree/main/apps/api-extractor" and an API - * item's file path is "api/ExtractorConfig.ts", the full URL file path would be - * "https://github.com/microsoft/rushstack/tree/main/apps/api-extractor/api/ExtractorConfig.js". - * - * This setting can be omitted if you don't need source code links in your API documentation reference. - * - * SUPPORTED TOKENS: none - * DEFAULT VALUE: "" - */ - // "projectFolderUrl": "http://github.com/path/to/your/projectFolder" - }, - /** - * Configures how the .d.ts rollup file will be generated. - */ - "dtsRollup": { - /** - * (REQUIRED) Whether to generate the .d.ts rollup file. - */ - "enabled": true - /** - * Specifies the output path for a .d.ts rollup file to be generated without any trimming. - * This file will include all declarations that are exported by the main entry point. - * - * If the path is an empty string, then this file will not be written. - * - * The path is resolved relative to the folder of the config file that contains the setting; to change this, - * prepend a folder token such as "". - * - * SUPPORTED TOKENS: , , - * DEFAULT VALUE: "/dist/.d.ts" - */ - // "untrimmedFilePath": "/dist/.d.ts", - /** - * Specifies the output path for a .d.ts rollup file to be generated with trimming for an "alpha" release. - * This file will include only declarations that are marked as "@public", "@beta", or "@alpha". - * - * If the path is an empty string, then this file will not be written. - * - * The path is resolved relative to the folder of the config file that contains the setting; to change this, - * prepend a folder token such as "". - * - * SUPPORTED TOKENS: , , - * DEFAULT VALUE: "" - */ - // "alphaTrimmedFilePath": "/dist/-alpha.d.ts", - /** - * Specifies the output path for a .d.ts rollup file to be generated with trimming for a "beta" release. - * This file will include only declarations that are marked as "@public" or "@beta". - * - * If the path is an empty string, then this file will not be written. - * - * The path is resolved relative to the folder of the config file that contains the setting; to change this, - * prepend a folder token such as "". - * - * SUPPORTED TOKENS: , , - * DEFAULT VALUE: "" - */ - // "betaTrimmedFilePath": "/dist/-beta.d.ts", - /** - * Specifies the output path for a .d.ts rollup file to be generated with trimming for a "public" release. - * This file will include only declarations that are marked as "@public". - * - * If the path is an empty string, then this file will not be written. - * - * The path is resolved relative to the folder of the config file that contains the setting; to change this, - * prepend a folder token such as "". - * - * SUPPORTED TOKENS: , , - * DEFAULT VALUE: "" - */ - // "publicTrimmedFilePath": "/dist/-public.d.ts", - /** - * When a declaration is trimmed, by default it will be replaced by a code comment such as - * "Excluded from this release type: exampleMember". Set "omitTrimmingComments" to true to remove the - * declaration completely. - * - * DEFAULT VALUE: false - */ - // "omitTrimmingComments": true - }, - /** - * Configures how the tsdoc-metadata.json file will be generated. - */ - "tsdocMetadata": { - /** - * Whether to generate the tsdoc-metadata.json file. - * - * DEFAULT VALUE: true - */ - // "enabled": true, - /** - * Specifies where the TSDoc metadata file should be written. - * - * The path is resolved relative to the folder of the config file that contains the setting; to change this, - * prepend a folder token such as "". - * - * The default value is "", which causes the path to be automatically inferred from the "tsdocMetadata", - * "typings" or "main" fields of the project's package.json. If none of these fields are set, the lookup - * falls back to "tsdoc-metadata.json" in the package folder. - * - * SUPPORTED TOKENS: , , - * DEFAULT VALUE: "" - */ - // "tsdocMetadataFilePath": "/dist/tsdoc-metadata.json" - }, - /** - * Configures how API Extractor reports error and warning messages produced during analysis. - * - * There are three sources of messages: compiler messages, API Extractor messages, and TSDoc messages. - */ - "messages": { - /** - * Configures handling of diagnostic messages reported by the TypeScript compiler engine while analyzing - * the input .d.ts files. - * - * TypeScript message identifiers start with "TS" followed by an integer. For example: "TS2551" - * - * DEFAULT VALUE: A single "default" entry with logLevel=warning. - */ - "compilerMessageReporting": { - /** - * Configures the default routing for messages that don't match an explicit rule in this table. - */ - "default": { - /** - * Specifies whether the message should be written to the the tool's output log. Note that - * the "addToApiReportFile" property may supersede this option. - * - * Possible values: "error", "warning", "none" - * - * Errors cause the build to fail and return a nonzero exit code. Warnings cause a production build fail - * and return a nonzero exit code. For a non-production build (e.g. when "api-extractor run" includes - * the "--local" option), the warning is displayed but the build will not fail. - * - * DEFAULT VALUE: "warning" - */ - "logLevel": "warning" - /** - * When addToApiReportFile is true: If API Extractor is configured to write an API report file (.api.md), - * then the message will be written inside that file; otherwise, the message is instead logged according to - * the "logLevel" option. - * - * DEFAULT VALUE: false - */ - // "addToApiReportFile": false - } - // "TS2551": { - // "logLevel": "warning", - // "addToApiReportFile": true - // }, - // - // . . . - }, - /** - * Configures handling of messages reported by API Extractor during its analysis. - * - * API Extractor message identifiers start with "ae-". For example: "ae-extra-release-tag" - * - * DEFAULT VALUE: See api-extractor-defaults.json for the complete table of extractorMessageReporting mappings - */ - "extractorMessageReporting": { - "default": { - "logLevel": "warning" - // "addToApiReportFile": false - } - // "ae-extra-release-tag": { - // "logLevel": "warning", - // "addToApiReportFile": true - // }, - // - // . . . - }, - /** - * Configures handling of messages reported by the TSDoc parser when analyzing code comments. - * - * TSDoc message identifiers start with "tsdoc-". For example: "tsdoc-link-tag-unescaped-text" - * - * DEFAULT VALUE: A single "default" entry with logLevel=warning. - */ - "tsdocMessageReporting": { - "default": { - "logLevel": "warning" - // "addToApiReportFile": false - } - // "tsdoc-link-tag-unescaped-text": { - // "logLevel": "warning", - // "addToApiReportFile": true - // }, - // - // . . . - } - } -} +/** + * Config file for API Extractor. For more info, please visit: https://api-extractor.com + */ +{ + "$schema": "https://developer.microsoft.com/json-schemas/api-extractor/v7/api-extractor.schema.json", + /** + * Optionally specifies another JSON config file that this file extends from. This provides a way for + * standard settings to be shared across multiple projects. + * + * If the path starts with "./" or "../", the path is resolved relative to the folder of the file that contains + * the "extends" field. Otherwise, the first path segment is interpreted as an NPM package name, and will be + * resolved using NodeJS require(). + * + * SUPPORTED TOKENS: none + * DEFAULT VALUE: "" + */ + // "extends": "./shared/api-extractor-base.json" + // "extends": "my-package/include/api-extractor-base.json" + /** + * Determines the "" token that can be used with other config file settings. The project folder + * typically contains the tsconfig.json and package.json config files, but the path is user-defined. + * + * The path is resolved relative to the folder of the config file that contains the setting. + * + * The default value for "projectFolder" is the token "", which means the folder is determined by traversing + * parent folders, starting from the folder containing api-extractor.json, and stopping at the first folder + * that contains a tsconfig.json file. If a tsconfig.json file cannot be found in this way, then an error + * will be reported. + * + * SUPPORTED TOKENS: + * DEFAULT VALUE: "" + */ + // "projectFolder": "..", + /** + * (REQUIRED) Specifies the .d.ts file to be used as the starting point for analysis. API Extractor + * analyzes the symbols exported by this module. + * + * The file extension must be ".d.ts" and not ".ts". + * + * The path is resolved relative to the folder of the config file that contains the setting; to change this, + * prepend a folder token such as "". + * + * SUPPORTED TOKENS: , , + */ + "mainEntryPointFilePath": "/dist/src/index.d.ts", + /** + * A list of NPM package names whose exports should be treated as part of this package. + * + * For example, suppose that Webpack is used to generate a distributed bundle for the project "library1", + * and another NPM package "library2" is embedded in this bundle. Some types from library2 may become part + * of the exported API for library1, but by default API Extractor would generate a .d.ts rollup that explicitly + * imports library2. To avoid this, we might specify: + * + * "bundledPackages": [ "library2" ], + * + * This would direct API Extractor to embed those types directly in the .d.ts rollup, as if they had been + * local files for library1. + * + * The "bundledPackages" elements may specify glob patterns using minimatch syntax. To ensure deterministic + * output, globs are expanded by matching explicitly declared top-level dependencies only. For example, + * the pattern below will NOT match "@my-company/example" unless it appears in a field such as "dependencies" + * or "devDependencies" of the project's package.json file: + * + * "bundledPackages": [ "@my-company/*" ], + */ + "bundledPackages": [], + /** + * Specifies what type of newlines API Extractor should use when writing output files. By default, the output files + * will be written with Windows-style newlines. To use POSIX-style newlines, specify "lf" instead. + * To use the OS's default newline kind, specify "os". + * + * DEFAULT VALUE: "crlf" + */ + // "newlineKind": "crlf", + /** + * Specifies how API Extractor sorts members of an enum when generating the .api.json file. By default, the output + * files will be sorted alphabetically, which is "by-name". To keep the ordering in the source code, specify + * "preserve". + * + * DEFAULT VALUE: "by-name" + */ + // "enumMemberOrder": "by-name", + /** + * Set to true when invoking API Extractor's test harness. When `testMode` is true, the `toolVersion` field in the + * .api.json file is assigned an empty string to prevent spurious diffs in output files tracked for tests. + * + * DEFAULT VALUE: "false" + */ + // "testMode": false, + /** + * Determines how the TypeScript compiler engine will be invoked by API Extractor. + */ + "compiler": { + /** + * Specifies the path to the tsconfig.json file to be used by API Extractor when analyzing the project. + * + * The path is resolved relative to the folder of the config file that contains the setting; to change this, + * prepend a folder token such as "". + * + * Note: This setting will be ignored if "overrideTsconfig" is used. + * + * SUPPORTED TOKENS: , , + * DEFAULT VALUE: "/tsconfig.json" + */ + // "tsconfigFilePath": "/tsconfig.json", + /** + * Provides a compiler configuration that will be used instead of reading the tsconfig.json file from disk. + * The object must conform to the TypeScript tsconfig schema: + * + * http://json.schemastore.org/tsconfig + * + * If omitted, then the tsconfig.json file will be read from the "projectFolder". + * + * DEFAULT VALUE: no overrideTsconfig section + */ + // "overrideTsconfig": { + // . . . + // } + /** + * This option causes the compiler to be invoked with the --skipLibCheck option. This option is not recommended + * and may cause API Extractor to produce incomplete or incorrect declarations, but it may be required when + * dependencies contain declarations that are incompatible with the TypeScript engine that API Extractor uses + * for its analysis. Where possible, the underlying issue should be fixed rather than relying on skipLibCheck. + * + * DEFAULT VALUE: false + */ + // "skipLibCheck": true, + }, + /** + * Configures how the API report file (*.api.md) will be generated. + */ + "apiReport": { + /** + * (REQUIRED) Whether to generate an API report. + */ + "enabled": false + /** + * The base filename for the API report files, to be combined with "reportFolder" or "reportTempFolder" + * to produce the full file path. The "reportFileName" should not include any path separators such as + * "\" or "/". The "reportFileName" should not include a file extension, since API Extractor will automatically + * append an appropriate file extension such as ".api.md". If the "reportVariants" setting is used, then the + * file extension includes the variant name, for example "my-report.public.api.md" or "my-report.beta.api.md". + * The "complete" variant always uses the simple extension "my-report.api.md". + * + * Previous versions of API Extractor required "reportFileName" to include the ".api.md" extension explicitly; + * for backwards compatibility, that is still accepted but will be discarded before applying the above rules. + * + * SUPPORTED TOKENS: , + * DEFAULT VALUE: "" + */ + // "reportFileName": "", + /** + * To support different approval requirements for different API levels, multiple "variants" of the API report can + * be generated. The "reportVariants" setting specifies a list of variants to be generated. If omitted, + * by default only the "complete" variant will be generated, which includes all @internal, @alpha, @beta, + * and @public items. Other possible variants are "alpha" (@alpha + @beta + @public), "beta" (@beta + @public), + * and "public" (@public only). + * + * DEFAULT VALUE: [ "complete" ] + */ + // "reportVariants": ["public", "beta"], + /** + * Specifies the folder where the API report file is written. The file name portion is determined by + * the "reportFileName" setting. + * + * The API report file is normally tracked by Git. Changes to it can be used to trigger a branch policy, + * e.g. for an API review. + * + * The path is resolved relative to the folder of the config file that contains the setting; to change this, + * prepend a folder token such as "". + * + * SUPPORTED TOKENS: , , + * DEFAULT VALUE: "/etc/" + */ + // "reportFolder": "/etc/", + /** + * Specifies the folder where the temporary report file is written. The file name portion is determined by + * the "reportFileName" setting. + * + * After the temporary file is written to disk, it is compared with the file in the "reportFolder". + * If they are different, a production build will fail. + * + * The path is resolved relative to the folder of the config file that contains the setting; to change this, + * prepend a folder token such as "". + * + * SUPPORTED TOKENS: , , + * DEFAULT VALUE: "/temp/" + */ + // "reportTempFolder": "/temp/", + /** + * Whether "forgotten exports" should be included in the API report file. Forgotten exports are declarations + * flagged with `ae-forgotten-export` warnings. See https://api-extractor.com/pages/messages/ae-forgotten-export/ to + * learn more. + * + * DEFAULT VALUE: "false" + */ + // "includeForgottenExports": false + }, + /** + * Configures how the doc model file (*.api.json) will be generated. + */ + "docModel": { + /** + * (REQUIRED) Whether to generate a doc model file. + */ + "enabled": false + /** + * The output path for the doc model file. The file extension should be ".api.json". + * + * The path is resolved relative to the folder of the config file that contains the setting; to change this, + * prepend a folder token such as "". + * + * SUPPORTED TOKENS: , , + * DEFAULT VALUE: "/temp/.api.json" + */ + // "apiJsonFilePath": "/temp/.api.json", + /** + * Whether "forgotten exports" should be included in the doc model file. Forgotten exports are declarations + * flagged with `ae-forgotten-export` warnings. See https://api-extractor.com/pages/messages/ae-forgotten-export/ to + * learn more. + * + * DEFAULT VALUE: "false" + */ + // "includeForgottenExports": false, + /** + * The base URL where the project's source code can be viewed on a website such as GitHub or + * Azure DevOps. This URL path corresponds to the `` path on disk. + * + * This URL is concatenated with the file paths serialized to the doc model to produce URL file paths to individual API items. + * For example, if the `projectFolderUrl` is "https://github.com/microsoft/rushstack/tree/main/apps/api-extractor" and an API + * item's file path is "api/ExtractorConfig.ts", the full URL file path would be + * "https://github.com/microsoft/rushstack/tree/main/apps/api-extractor/api/ExtractorConfig.js". + * + * This setting can be omitted if you don't need source code links in your API documentation reference. + * + * SUPPORTED TOKENS: none + * DEFAULT VALUE: "" + */ + // "projectFolderUrl": "http://github.com/path/to/your/projectFolder" + }, + /** + * Configures how the .d.ts rollup file will be generated. + */ + "dtsRollup": { + /** + * (REQUIRED) Whether to generate the .d.ts rollup file. + */ + "enabled": true + /** + * Specifies the output path for a .d.ts rollup file to be generated without any trimming. + * This file will include all declarations that are exported by the main entry point. + * + * If the path is an empty string, then this file will not be written. + * + * The path is resolved relative to the folder of the config file that contains the setting; to change this, + * prepend a folder token such as "". + * + * SUPPORTED TOKENS: , , + * DEFAULT VALUE: "/dist/.d.ts" + */ + // "untrimmedFilePath": "/dist/.d.ts", + /** + * Specifies the output path for a .d.ts rollup file to be generated with trimming for an "alpha" release. + * This file will include only declarations that are marked as "@public", "@beta", or "@alpha". + * + * If the path is an empty string, then this file will not be written. + * + * The path is resolved relative to the folder of the config file that contains the setting; to change this, + * prepend a folder token such as "". + * + * SUPPORTED TOKENS: , , + * DEFAULT VALUE: "" + */ + // "alphaTrimmedFilePath": "/dist/-alpha.d.ts", + /** + * Specifies the output path for a .d.ts rollup file to be generated with trimming for a "beta" release. + * This file will include only declarations that are marked as "@public" or "@beta". + * + * If the path is an empty string, then this file will not be written. + * + * The path is resolved relative to the folder of the config file that contains the setting; to change this, + * prepend a folder token such as "". + * + * SUPPORTED TOKENS: , , + * DEFAULT VALUE: "" + */ + // "betaTrimmedFilePath": "/dist/-beta.d.ts", + /** + * Specifies the output path for a .d.ts rollup file to be generated with trimming for a "public" release. + * This file will include only declarations that are marked as "@public". + * + * If the path is an empty string, then this file will not be written. + * + * The path is resolved relative to the folder of the config file that contains the setting; to change this, + * prepend a folder token such as "". + * + * SUPPORTED TOKENS: , , + * DEFAULT VALUE: "" + */ + // "publicTrimmedFilePath": "/dist/-public.d.ts", + /** + * When a declaration is trimmed, by default it will be replaced by a code comment such as + * "Excluded from this release type: exampleMember". Set "omitTrimmingComments" to true to remove the + * declaration completely. + * + * DEFAULT VALUE: false + */ + // "omitTrimmingComments": true + }, + /** + * Configures how the tsdoc-metadata.json file will be generated. + */ + "tsdocMetadata": { + /** + * Whether to generate the tsdoc-metadata.json file. + * + * DEFAULT VALUE: true + */ + // "enabled": true, + /** + * Specifies where the TSDoc metadata file should be written. + * + * The path is resolved relative to the folder of the config file that contains the setting; to change this, + * prepend a folder token such as "". + * + * The default value is "", which causes the path to be automatically inferred from the "tsdocMetadata", + * "typings" or "main" fields of the project's package.json. If none of these fields are set, the lookup + * falls back to "tsdoc-metadata.json" in the package folder. + * + * SUPPORTED TOKENS: , , + * DEFAULT VALUE: "" + */ + // "tsdocMetadataFilePath": "/dist/tsdoc-metadata.json" + }, + /** + * Configures how API Extractor reports error and warning messages produced during analysis. + * + * There are three sources of messages: compiler messages, API Extractor messages, and TSDoc messages. + */ + "messages": { + /** + * Configures handling of diagnostic messages reported by the TypeScript compiler engine while analyzing + * the input .d.ts files. + * + * TypeScript message identifiers start with "TS" followed by an integer. For example: "TS2551" + * + * DEFAULT VALUE: A single "default" entry with logLevel=warning. + */ + "compilerMessageReporting": { + /** + * Configures the default routing for messages that don't match an explicit rule in this table. + */ + "default": { + /** + * Specifies whether the message should be written to the the tool's output log. Note that + * the "addToApiReportFile" property may supersede this option. + * + * Possible values: "error", "warning", "none" + * + * Errors cause the build to fail and return a nonzero exit code. Warnings cause a production build fail + * and return a nonzero exit code. For a non-production build (e.g. when "api-extractor run" includes + * the "--local" option), the warning is displayed but the build will not fail. + * + * DEFAULT VALUE: "warning" + */ + "logLevel": "warning" + /** + * When addToApiReportFile is true: If API Extractor is configured to write an API report file (.api.md), + * then the message will be written inside that file; otherwise, the message is instead logged according to + * the "logLevel" option. + * + * DEFAULT VALUE: false + */ + // "addToApiReportFile": false + } + // "TS2551": { + // "logLevel": "warning", + // "addToApiReportFile": true + // }, + // + // . . . + }, + /** + * Configures handling of messages reported by API Extractor during its analysis. + * + * API Extractor message identifiers start with "ae-". For example: "ae-extra-release-tag" + * + * DEFAULT VALUE: See api-extractor-defaults.json for the complete table of extractorMessageReporting mappings + */ + "extractorMessageReporting": { + "default": { + "logLevel": "warning" + // "addToApiReportFile": false + } + // "ae-extra-release-tag": { + // "logLevel": "warning", + // "addToApiReportFile": true + // }, + // + // . . . + }, + /** + * Configures handling of messages reported by the TSDoc parser when analyzing code comments. + * + * TSDoc message identifiers start with "tsdoc-". For example: "tsdoc-link-tag-unescaped-text" + * + * DEFAULT VALUE: A single "default" entry with logLevel=warning. + */ + "tsdocMessageReporting": { + "default": { + "logLevel": "warning" + // "addToApiReportFile": false + } + // "tsdoc-link-tag-unescaped-text": { + // "logLevel": "warning", + // "addToApiReportFile": true + // }, + // + // . . . + } + } +} diff --git a/sdk/dir-js/eslint.config.mjs b/sdk/dir-js/eslint.config.mjs index bc0a6ab1d..d0d5b41c7 100644 --- a/sdk/dir-js/eslint.config.mjs +++ b/sdk/dir-js/eslint.config.mjs @@ -1,33 +1,33 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -import eslint from '@eslint/js'; -import tseslint from 'typescript-eslint'; - -export default [ - eslint.configs.recommended, - ...tseslint.configs.recommended, - { - ignores: [ - // Ignore built files. - 'dist/**', - ], - }, - { - rules: { - '@typescript-eslint/no-unused-vars': [ - 'error', - { - 'argsIgnorePattern': '^_', - 'varsIgnorePattern': '^_', - }, - ], - '@typescript-eslint/no-empty-object-type': [ - 'error', - { - 'allowInterfaces': 'always', - }, - ], - }, - }, -]; +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +import eslint from '@eslint/js'; +import tseslint from 'typescript-eslint'; + +export default [ + eslint.configs.recommended, + ...tseslint.configs.recommended, + { + ignores: [ + // Ignore built files. + 'dist/**', + ], + }, + { + rules: { + '@typescript-eslint/no-unused-vars': [ + 'error', + { + 'argsIgnorePattern': '^_', + 'varsIgnorePattern': '^_', + }, + ], + '@typescript-eslint/no-empty-object-type': [ + 'error', + { + 'allowInterfaces': 'always', + }, + ], + }, + }, +]; diff --git a/sdk/dir-js/package-lock.json b/sdk/dir-js/package-lock.json index 696d21fdd..ff67ec476 100644 --- a/sdk/dir-js/package-lock.json +++ b/sdk/dir-js/package-lock.json @@ -1,4909 +1,4909 @@ -{ - "name": "agntcy-dir", - "version": "0.6.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "agntcy-dir", - "version": "0.6.0", - "license": "Apache-2.0", - "dependencies": { - "@bufbuild/protobuf": "^2.9.0", - "@connectrpc/connect": "^2.1.0", - "@connectrpc/connect-node": "^2.1.0", - "@grpc/grpc-js": "^1.13.4", - "spiffe": "^0.4.0" - }, - "devDependencies": { - "@microsoft/api-extractor": "^7.52.13", - "@rollup/plugin-json": "^6.1.0", - "@rollup/plugin-node-resolve": "^16.0.1", - "@types/node": "^22.19.1", - "@types/uuid": "^10.0.0", - "rollup-plugin-typescript2": "^0.36.0", - "ts-node": "^10.9.2", - "typescript": "^5.9.3", - "typescript-eslint": "^8.44.0", - "uuid": "^11.1.0", - "vitest": "^3.2.4", - "workerpool": "^10.0.1" - }, - "engines": { - "node": ">=20.0.0" - }, - "optionalDependencies": { - "@rollup/rollup-linux-x64-gnu": "4.50.2" - } - }, - "api": { - "version": "0.0.0", - "extraneous": true, - "peerDependencies": { - "@bufbuild/protobuf": "^2.9.0" - } - }, - "node_modules/@bufbuild/protobuf": { - "version": "2.9.0", - "resolved": "https://registry.npmjs.org/@bufbuild/protobuf/-/protobuf-2.9.0.tgz", - "integrity": "sha512-rnJenoStJ8nvmt9Gzye8nkYd6V22xUAnu4086ER7h1zJ508vStko4pMvDeQ446ilDTFpV5wnoc5YS7XvMwwMqA==", - "license": "(Apache-2.0 AND BSD-3-Clause)" - }, - "node_modules/@connectrpc/connect": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@connectrpc/connect/-/connect-2.1.0.tgz", - "integrity": "sha512-xhiwnYlJNHzmFsRw+iSPIwXR/xweTvTw8x5HiwWp10sbVtd4OpOXbRgE7V58xs1EC17fzusF1f5uOAy24OkBuA==", - "license": "Apache-2.0", - "peerDependencies": { - "@bufbuild/protobuf": "^2.7.0" - } - }, - "node_modules/@connectrpc/connect-node": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@connectrpc/connect-node/-/connect-node-2.1.0.tgz", - "integrity": "sha512-6akCXZSX5uWHLR654ne9Tnq7AnPUkLS65NvgsI5885xBkcuVy2APBd8sA4sLqaplUt84cVEr6LhjEFNx6W1KtQ==", - "license": "Apache-2.0", - "engines": { - "node": ">=20" - }, - "peerDependencies": { - "@bufbuild/protobuf": "^2.7.0", - "@connectrpc/connect": "2.1.0" - } - }, - "node_modules/@cspotcode/source-map-support": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", - "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@jridgewell/trace-mapping": "0.3.9" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/aix-ppc64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.11.tgz", - "integrity": "sha512-Xt1dOL13m8u0WE8iplx9Ibbm+hFAO0GsU2P34UNoDGvZYkY8ifSiy6Zuc1lYxfG7svWE2fzqCUmFp5HCn51gJg==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "aix" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/android-arm": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.11.tgz", - "integrity": "sha512-uoa7dU+Dt3HYsethkJ1k6Z9YdcHjTrSb5NUy66ZfZaSV8hEYGD5ZHbEMXnqLFlbBflLsl89Zke7CAdDJ4JI+Gg==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/android-arm64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.11.tgz", - "integrity": "sha512-9slpyFBc4FPPz48+f6jyiXOx/Y4v34TUeDDXJpZqAWQn/08lKGeD8aDp9TMn9jDz2CiEuHwfhRmGBvpnd/PWIQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/android-x64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.11.tgz", - "integrity": "sha512-Sgiab4xBjPU1QoPEIqS3Xx+R2lezu0LKIEcYe6pftr56PqPygbB7+szVnzoShbx64MUupqoE0KyRlN7gezbl8g==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/darwin-arm64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.11.tgz", - "integrity": "sha512-VekY0PBCukppoQrycFxUqkCojnTQhdec0vevUL/EDOCnXd9LKWqD/bHwMPzigIJXPhC59Vd1WFIL57SKs2mg4w==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/darwin-x64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.11.tgz", - "integrity": "sha512-+hfp3yfBalNEpTGp9loYgbknjR695HkqtY3d3/JjSRUyPg/xd6q+mQqIb5qdywnDxRZykIHs3axEqU6l1+oWEQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/freebsd-arm64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.11.tgz", - "integrity": "sha512-CmKjrnayyTJF2eVuO//uSjl/K3KsMIeYeyN7FyDBjsR3lnSJHaXlVoAK8DZa7lXWChbuOk7NjAc7ygAwrnPBhA==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/freebsd-x64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.11.tgz", - "integrity": "sha512-Dyq+5oscTJvMaYPvW3x3FLpi2+gSZTCE/1ffdwuM6G1ARang/mb3jvjxs0mw6n3Lsw84ocfo9CrNMqc5lTfGOw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-arm": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.11.tgz", - "integrity": "sha512-TBMv6B4kCfrGJ8cUPo7vd6NECZH/8hPpBHHlYI3qzoYFvWu2AdTvZNuU/7hsbKWqu/COU7NIK12dHAAqBLLXgw==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-arm64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.11.tgz", - "integrity": "sha512-Qr8AzcplUhGvdyUF08A1kHU3Vr2O88xxP0Tm8GcdVOUm25XYcMPp2YqSVHbLuXzYQMf9Bh/iKx7YPqECs6ffLA==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-ia32": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.11.tgz", - "integrity": "sha512-TmnJg8BMGPehs5JKrCLqyWTVAvielc615jbkOirATQvWWB1NMXY77oLMzsUjRLa0+ngecEmDGqt5jiDC6bfvOw==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-loong64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.11.tgz", - "integrity": "sha512-DIGXL2+gvDaXlaq8xruNXUJdT5tF+SBbJQKbWy/0J7OhU8gOHOzKmGIlfTTl6nHaCOoipxQbuJi7O++ldrxgMw==", - "cpu": [ - "loong64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-mips64el": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.11.tgz", - "integrity": "sha512-Osx1nALUJu4pU43o9OyjSCXokFkFbyzjXb6VhGIJZQ5JZi8ylCQ9/LFagolPsHtgw6himDSyb5ETSfmp4rpiKQ==", - "cpu": [ - "mips64el" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-ppc64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.11.tgz", - "integrity": "sha512-nbLFgsQQEsBa8XSgSTSlrnBSrpoWh7ioFDUmwo158gIm5NNP+17IYmNWzaIzWmgCxq56vfr34xGkOcZ7jX6CPw==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-riscv64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.11.tgz", - "integrity": "sha512-HfyAmqZi9uBAbgKYP1yGuI7tSREXwIb438q0nqvlpxAOs3XnZ8RsisRfmVsgV486NdjD7Mw2UrFSw51lzUk1ww==", - "cpu": [ - "riscv64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-s390x": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.11.tgz", - "integrity": "sha512-HjLqVgSSYnVXRisyfmzsH6mXqyvj0SA7pG5g+9W7ESgwA70AXYNpfKBqh1KbTxmQVaYxpzA/SvlB9oclGPbApw==", - "cpu": [ - "s390x" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-x64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.11.tgz", - "integrity": "sha512-HSFAT4+WYjIhrHxKBwGmOOSpphjYkcswF449j6EjsjbinTZbp8PJtjsVK1XFJStdzXdy/jaddAep2FGY+wyFAQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/netbsd-arm64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.11.tgz", - "integrity": "sha512-hr9Oxj1Fa4r04dNpWr3P8QKVVsjQhqrMSUzZzf+LZcYjZNqhA3IAfPQdEh1FLVUJSiu6sgAwp3OmwBfbFgG2Xg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "netbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/netbsd-x64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.11.tgz", - "integrity": "sha512-u7tKA+qbzBydyj0vgpu+5h5AeudxOAGncb8N6C9Kh1N4n7wU1Xw1JDApsRjpShRpXRQlJLb9wY28ELpwdPcZ7A==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "netbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/openbsd-arm64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.11.tgz", - "integrity": "sha512-Qq6YHhayieor3DxFOoYM1q0q1uMFYb7cSpLD2qzDSvK1NAvqFi8Xgivv0cFC6J+hWVw2teCYltyy9/m/14ryHg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/openbsd-x64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.11.tgz", - "integrity": "sha512-CN+7c++kkbrckTOz5hrehxWN7uIhFFlmS/hqziSFVWpAzpWrQoAG4chH+nN3Be+Kzv/uuo7zhX716x3Sn2Jduw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/openharmony-arm64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.11.tgz", - "integrity": "sha512-rOREuNIQgaiR+9QuNkbkxubbp8MSO9rONmwP5nKncnWJ9v5jQ4JxFnLu4zDSRPf3x4u+2VN4pM4RdyIzDty/wQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openharmony" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/sunos-x64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.11.tgz", - "integrity": "sha512-nq2xdYaWxyg9DcIyXkZhcYulC6pQ2FuCgem3LI92IwMgIZ69KHeY8T4Y88pcwoLIjbed8n36CyKoYRDygNSGhA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "sunos" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/win32-arm64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.11.tgz", - "integrity": "sha512-3XxECOWJq1qMZ3MN8srCJ/QfoLpL+VaxD/WfNRm1O3B4+AZ/BnLVgFbUV3eiRYDMXetciH16dwPbbHqwe1uU0Q==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/win32-ia32": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.11.tgz", - "integrity": "sha512-3ukss6gb9XZ8TlRyJlgLn17ecsK4NSQTmdIXRASVsiS2sQ6zPPZklNJT5GR5tE/MUarymmy8kCEf5xPCNCqVOA==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/win32-x64": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.11.tgz", - "integrity": "sha512-D7Hpz6A2L4hzsRpPaCYkQnGOotdUpDzSGRIv9I+1ITdHROSFUWW95ZPZWQmGka1Fg7W3zFJowyn9WGwMJ0+KPA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@eslint-community/eslint-utils": { - "version": "4.9.0", - "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz", - "integrity": "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==", - "dev": true, - "license": "MIT", - "dependencies": { - "eslint-visitor-keys": "^3.4.3" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - }, - "peerDependencies": { - "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" - } - }, - "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", - "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/@eslint-community/regexpp": { - "version": "4.12.1", - "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz", - "integrity": "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^12.0.0 || ^14.0.0 || >=16.0.0" - } - }, - "node_modules/@eslint/config-array": { - "version": "0.21.1", - "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.1.tgz", - "integrity": "sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==", - "dev": true, - "license": "Apache-2.0", - "peer": true, - "dependencies": { - "@eslint/object-schema": "^2.1.7", - "debug": "^4.3.1", - "minimatch": "^3.1.2" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - } - }, - "node_modules/@eslint/config-array/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "license": "ISC", - "peer": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/@eslint/config-helpers": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.4.1.tgz", - "integrity": "sha512-csZAzkNhsgwb0I/UAV6/RGFTbiakPCf0ZrGmrIxQpYvGZ00PhTkSnyKNolphgIvmnJeGw6rcGVEXfTzUnFuEvw==", - "dev": true, - "license": "Apache-2.0", - "peer": true, - "dependencies": { - "@eslint/core": "^0.16.0" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - } - }, - "node_modules/@eslint/core": { - "version": "0.16.0", - "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.16.0.tgz", - "integrity": "sha512-nmC8/totwobIiFcGkDza3GIKfAw1+hLiYVrh3I1nIomQ8PEr5cxg34jnkmGawul/ep52wGRAcyeDCNtWKSOj4Q==", - "dev": true, - "license": "Apache-2.0", - "peer": true, - "dependencies": { - "@types/json-schema": "^7.0.15" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - } - }, - "node_modules/@eslint/eslintrc": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.1.tgz", - "integrity": "sha512-gtF186CXhIl1p4pJNGZw8Yc6RlshoePRvE0X91oPGb3vZ8pM3qOS9W9NGPat9LziaBV7XrJWGylNQXkGcnM3IQ==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "ajv": "^6.12.4", - "debug": "^4.3.2", - "espree": "^10.0.1", - "globals": "^14.0.0", - "ignore": "^5.2.0", - "import-fresh": "^3.2.1", - "js-yaml": "^4.1.0", - "minimatch": "^3.1.2", - "strip-json-comments": "^3.1.1" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/@eslint/eslintrc/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/@eslint/eslintrc/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true, - "license": "MIT", - "peer": true - }, - "node_modules/@eslint/eslintrc/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "license": "ISC", - "peer": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/@eslint/js": { - "version": "9.38.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.38.0.tgz", - "integrity": "sha512-UZ1VpFvXf9J06YG9xQBdnzU+kthors6KjhMAl6f4gH4usHyh31rUf2DLGInT8RFYIReYXNSydgPY0V2LuWgl7A==", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "url": "https://eslint.org/donate" - } - }, - "node_modules/@eslint/object-schema": { - "version": "2.1.7", - "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.7.tgz", - "integrity": "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==", - "dev": true, - "license": "Apache-2.0", - "peer": true, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - } - }, - "node_modules/@eslint/plugin-kit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.4.0.tgz", - "integrity": "sha512-sB5uyeq+dwCWyPi31B2gQlVlo+j5brPlWx4yZBrEaRo/nhdDE8Xke1gsGgtiBdaBTxuTkceLVuVt/pclrasb0A==", - "dev": true, - "license": "Apache-2.0", - "peer": true, - "dependencies": { - "@eslint/core": "^0.16.0", - "levn": "^0.4.1" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - } - }, - "node_modules/@grpc/grpc-js": { - "version": "1.14.0", - "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.14.0.tgz", - "integrity": "sha512-N8Jx6PaYzcTRNzirReJCtADVoq4z7+1KQ4E70jTg/koQiMoUSN1kbNjPOqpPbhMFhfU1/l7ixspPl8dNY+FoUg==", - "license": "Apache-2.0", - "dependencies": { - "@grpc/proto-loader": "^0.8.0", - "@js-sdsl/ordered-map": "^4.4.2" - }, - "engines": { - "node": ">=12.10.0" - } - }, - "node_modules/@grpc/proto-loader": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.8.0.tgz", - "integrity": "sha512-rc1hOQtjIWGxcxpb9aHAfLpIctjEnsDehj0DAiVfBlmT84uvR0uUtN2hEi/ecvWVjXUGf5qPF4qEgiLOx1YIMQ==", - "license": "Apache-2.0", - "dependencies": { - "lodash.camelcase": "^4.3.0", - "long": "^5.0.0", - "protobufjs": "^7.5.3", - "yargs": "^17.7.2" - }, - "bin": { - "proto-loader-gen-types": "build/bin/proto-loader-gen-types.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/@humanfs/core": { - "version": "0.19.1", - "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", - "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", - "dev": true, - "license": "Apache-2.0", - "peer": true, - "engines": { - "node": ">=18.18.0" - } - }, - "node_modules/@humanfs/node": { - "version": "0.16.7", - "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", - "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", - "dev": true, - "license": "Apache-2.0", - "peer": true, - "dependencies": { - "@humanfs/core": "^0.19.1", - "@humanwhocodes/retry": "^0.4.0" - }, - "engines": { - "node": ">=18.18.0" - } - }, - "node_modules/@humanwhocodes/module-importer": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", - "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", - "dev": true, - "license": "Apache-2.0", - "peer": true, - "engines": { - "node": ">=12.22" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/nzakas" - } - }, - "node_modules/@humanwhocodes/retry": { - "version": "0.4.3", - "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", - "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", - "dev": true, - "license": "Apache-2.0", - "peer": true, - "engines": { - "node": ">=18.18" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/nzakas" - } - }, - "node_modules/@isaacs/balanced-match": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/@isaacs/balanced-match/-/balanced-match-4.0.1.tgz", - "integrity": "sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": "20 || >=22" - } - }, - "node_modules/@isaacs/brace-expansion": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/@isaacs/brace-expansion/-/brace-expansion-5.0.0.tgz", - "integrity": "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@isaacs/balanced-match": "^4.0.1" - }, - "engines": { - "node": "20 || >=22" - } - }, - "node_modules/@jridgewell/resolve-uri": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", - "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.5.5", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", - "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", - "dev": true, - "license": "MIT" - }, - "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.9", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", - "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@jridgewell/resolve-uri": "^3.0.3", - "@jridgewell/sourcemap-codec": "^1.4.10" - } - }, - "node_modules/@js-sdsl/ordered-map": { - "version": "4.4.2", - "resolved": "https://registry.npmjs.org/@js-sdsl/ordered-map/-/ordered-map-4.4.2.tgz", - "integrity": "sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw==", - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/js-sdsl" - } - }, - "node_modules/@microsoft/api-extractor": { - "version": "7.53.1", - "resolved": "https://registry.npmjs.org/@microsoft/api-extractor/-/api-extractor-7.53.1.tgz", - "integrity": "sha512-bul5eTNxijLdDBqLye74u9494sRmf+9QULtec9Od0uHnifahGeNt8CC4/xCdn7mVyEBrXIQyQ5+sc4Uc0QfBSA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@microsoft/api-extractor-model": "7.31.1", - "@microsoft/tsdoc": "~0.15.1", - "@microsoft/tsdoc-config": "~0.17.1", - "@rushstack/node-core-library": "5.17.0", - "@rushstack/rig-package": "0.6.0", - "@rushstack/terminal": "0.19.1", - "@rushstack/ts-command-line": "5.1.1", - "lodash": "~4.17.15", - "minimatch": "10.0.3", - "resolve": "~1.22.1", - "semver": "~7.5.4", - "source-map": "~0.6.1", - "typescript": "5.8.2" - }, - "bin": { - "api-extractor": "bin/api-extractor" - } - }, - "node_modules/@microsoft/api-extractor-model": { - "version": "7.31.1", - "resolved": "https://registry.npmjs.org/@microsoft/api-extractor-model/-/api-extractor-model-7.31.1.tgz", - "integrity": "sha512-Dhnip5OFKbl85rq/ICHBFGhV4RA5UQSl8AC/P/zoGvs+CBudPkatt5kIhMGiYgVPnUWmfR6fcp38+1AFLYNtUw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@microsoft/tsdoc": "~0.15.1", - "@microsoft/tsdoc-config": "~0.17.1", - "@rushstack/node-core-library": "5.17.0" - } - }, - "node_modules/@microsoft/api-extractor/node_modules/typescript": { - "version": "5.8.2", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.2.tgz", - "integrity": "sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ==", - "dev": true, - "license": "Apache-2.0", - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=14.17" - } - }, - "node_modules/@microsoft/tsdoc": { - "version": "0.15.1", - "resolved": "https://registry.npmjs.org/@microsoft/tsdoc/-/tsdoc-0.15.1.tgz", - "integrity": "sha512-4aErSrCR/On/e5G2hDP0wjooqDdauzEbIq8hIkIe5pXV0rtWJZvdCEKL0ykZxex+IxIwBp0eGeV48hQN07dXtw==", - "dev": true, - "license": "MIT" - }, - "node_modules/@microsoft/tsdoc-config": { - "version": "0.17.1", - "resolved": "https://registry.npmjs.org/@microsoft/tsdoc-config/-/tsdoc-config-0.17.1.tgz", - "integrity": "sha512-UtjIFe0C6oYgTnad4q1QP4qXwLhe6tIpNTRStJ2RZEPIkqQPREAwE5spzVxsdn9UaEMUqhh0AqSx3X4nWAKXWw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@microsoft/tsdoc": "0.15.1", - "ajv": "~8.12.0", - "jju": "~1.4.0", - "resolve": "~1.22.2" - } - }, - "node_modules/@nodelib/fs.scandir": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", - "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", - "dev": true, - "license": "MIT", - "dependencies": { - "@nodelib/fs.stat": "2.0.5", - "run-parallel": "^1.1.9" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@nodelib/fs.stat": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", - "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 8" - } - }, - "node_modules/@nodelib/fs.walk": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", - "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@nodelib/fs.scandir": "2.1.5", - "fastq": "^1.6.0" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/@peculiar/asn1-cms": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@peculiar/asn1-cms/-/asn1-cms-2.5.0.tgz", - "integrity": "sha512-p0SjJ3TuuleIvjPM4aYfvYw8Fk1Hn/zAVyPJZTtZ2eE9/MIer6/18ROxX6N/e6edVSfvuZBqhxAj3YgsmSjQ/A==", - "license": "MIT", - "dependencies": { - "@peculiar/asn1-schema": "^2.5.0", - "@peculiar/asn1-x509": "^2.5.0", - "@peculiar/asn1-x509-attr": "^2.5.0", - "asn1js": "^3.0.6", - "tslib": "^2.8.1" - } - }, - "node_modules/@peculiar/asn1-csr": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@peculiar/asn1-csr/-/asn1-csr-2.5.0.tgz", - "integrity": "sha512-ioigvA6WSYN9h/YssMmmoIwgl3RvZlAYx4A/9jD2qaqXZwGcNlAxaw54eSx2QG1Yu7YyBC5Rku3nNoHrQ16YsQ==", - "license": "MIT", - "dependencies": { - "@peculiar/asn1-schema": "^2.5.0", - "@peculiar/asn1-x509": "^2.5.0", - "asn1js": "^3.0.6", - "tslib": "^2.8.1" - } - }, - "node_modules/@peculiar/asn1-ecc": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@peculiar/asn1-ecc/-/asn1-ecc-2.5.0.tgz", - "integrity": "sha512-t4eYGNhXtLRxaP50h3sfO6aJebUCDGQACoeexcelL4roMFRRVgB20yBIu2LxsPh/tdW9I282gNgMOyg3ywg/mg==", - "license": "MIT", - "dependencies": { - "@peculiar/asn1-schema": "^2.5.0", - "@peculiar/asn1-x509": "^2.5.0", - "asn1js": "^3.0.6", - "tslib": "^2.8.1" - } - }, - "node_modules/@peculiar/asn1-pfx": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@peculiar/asn1-pfx/-/asn1-pfx-2.5.0.tgz", - "integrity": "sha512-Vj0d0wxJZA+Ztqfb7W+/iu8Uasw6hhKtCdLKXLG/P3kEPIQpqGI4P4YXlROfl7gOCqFIbgsj1HzFIFwQ5s20ug==", - "license": "MIT", - "dependencies": { - "@peculiar/asn1-cms": "^2.5.0", - "@peculiar/asn1-pkcs8": "^2.5.0", - "@peculiar/asn1-rsa": "^2.5.0", - "@peculiar/asn1-schema": "^2.5.0", - "asn1js": "^3.0.6", - "tslib": "^2.8.1" - } - }, - "node_modules/@peculiar/asn1-pkcs8": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@peculiar/asn1-pkcs8/-/asn1-pkcs8-2.5.0.tgz", - "integrity": "sha512-L7599HTI2SLlitlpEP8oAPaJgYssByI4eCwQq2C9eC90otFpm8MRn66PpbKviweAlhinWQ3ZjDD2KIVtx7PaVw==", - "license": "MIT", - "dependencies": { - "@peculiar/asn1-schema": "^2.5.0", - "@peculiar/asn1-x509": "^2.5.0", - "asn1js": "^3.0.6", - "tslib": "^2.8.1" - } - }, - "node_modules/@peculiar/asn1-pkcs9": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@peculiar/asn1-pkcs9/-/asn1-pkcs9-2.5.0.tgz", - "integrity": "sha512-UgqSMBLNLR5TzEZ5ZzxR45Nk6VJrammxd60WMSkofyNzd3DQLSNycGWSK5Xg3UTYbXcDFyG8pA/7/y/ztVCa6A==", - "license": "MIT", - "dependencies": { - "@peculiar/asn1-cms": "^2.5.0", - "@peculiar/asn1-pfx": "^2.5.0", - "@peculiar/asn1-pkcs8": "^2.5.0", - "@peculiar/asn1-schema": "^2.5.0", - "@peculiar/asn1-x509": "^2.5.0", - "@peculiar/asn1-x509-attr": "^2.5.0", - "asn1js": "^3.0.6", - "tslib": "^2.8.1" - } - }, - "node_modules/@peculiar/asn1-rsa": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@peculiar/asn1-rsa/-/asn1-rsa-2.5.0.tgz", - "integrity": "sha512-qMZ/vweiTHy9syrkkqWFvbT3eLoedvamcUdnnvwyyUNv5FgFXA3KP8td+ATibnlZ0EANW5PYRm8E6MJzEB/72Q==", - "license": "MIT", - "dependencies": { - "@peculiar/asn1-schema": "^2.5.0", - "@peculiar/asn1-x509": "^2.5.0", - "asn1js": "^3.0.6", - "tslib": "^2.8.1" - } - }, - "node_modules/@peculiar/asn1-schema": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@peculiar/asn1-schema/-/asn1-schema-2.5.0.tgz", - "integrity": "sha512-YM/nFfskFJSlHqv59ed6dZlLZqtZQwjRVJ4bBAiWV08Oc+1rSd5lDZcBEx0lGDHfSoH3UziI2pXt2UM33KerPQ==", - "license": "MIT", - "dependencies": { - "asn1js": "^3.0.6", - "pvtsutils": "^1.3.6", - "tslib": "^2.8.1" - } - }, - "node_modules/@peculiar/asn1-x509": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@peculiar/asn1-x509/-/asn1-x509-2.5.0.tgz", - "integrity": "sha512-CpwtMCTJvfvYTFMuiME5IH+8qmDe3yEWzKHe7OOADbGfq7ohxeLaXwQo0q4du3qs0AII3UbLCvb9NF/6q0oTKQ==", - "license": "MIT", - "dependencies": { - "@peculiar/asn1-schema": "^2.5.0", - "asn1js": "^3.0.6", - "pvtsutils": "^1.3.6", - "tslib": "^2.8.1" - } - }, - "node_modules/@peculiar/asn1-x509-attr": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/@peculiar/asn1-x509-attr/-/asn1-x509-attr-2.5.0.tgz", - "integrity": "sha512-9f0hPOxiJDoG/bfNLAFven+Bd4gwz/VzrCIIWc1025LEI4BXO0U5fOCTNDPbbp2ll+UzqKsZ3g61mpBp74gk9A==", - "license": "MIT", - "dependencies": { - "@peculiar/asn1-schema": "^2.5.0", - "@peculiar/asn1-x509": "^2.5.0", - "asn1js": "^3.0.6", - "tslib": "^2.8.1" - } - }, - "node_modules/@peculiar/json-schema": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/@peculiar/json-schema/-/json-schema-1.1.12.tgz", - "integrity": "sha512-coUfuoMeIB7B8/NMekxaDzLhaYmp0HZNPEjYRm9goRou8UZIC3z21s0sL9AWoCw4EG876QyO3kYrc61WNF9B/w==", - "license": "MIT", - "dependencies": { - "tslib": "^2.0.0" - }, - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/@peculiar/webcrypto": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@peculiar/webcrypto/-/webcrypto-1.5.0.tgz", - "integrity": "sha512-BRs5XUAwiyCDQMsVA9IDvDa7UBR9gAvPHgugOeGng3YN6vJ9JYonyDc0lNczErgtCWtucjR5N7VtaonboD/ezg==", - "license": "MIT", - "dependencies": { - "@peculiar/asn1-schema": "^2.3.8", - "@peculiar/json-schema": "^1.1.12", - "pvtsutils": "^1.3.5", - "tslib": "^2.6.2", - "webcrypto-core": "^1.8.0" - }, - "engines": { - "node": ">=10.12.0" - } - }, - "node_modules/@peculiar/x509": { - "version": "1.14.0", - "resolved": "https://registry.npmjs.org/@peculiar/x509/-/x509-1.14.0.tgz", - "integrity": "sha512-Yc4PDxN3OrxUPiXgU63c+ZRXKGE8YKF2McTciYhUHFtHVB0KMnjeFSU0qpztGhsp4P0uKix4+J2xEpIEDu8oXg==", - "license": "MIT", - "dependencies": { - "@peculiar/asn1-cms": "^2.5.0", - "@peculiar/asn1-csr": "^2.5.0", - "@peculiar/asn1-ecc": "^2.5.0", - "@peculiar/asn1-pkcs9": "^2.5.0", - "@peculiar/asn1-rsa": "^2.5.0", - "@peculiar/asn1-schema": "^2.5.0", - "@peculiar/asn1-x509": "^2.5.0", - "pvtsutils": "^1.3.6", - "reflect-metadata": "^0.2.2", - "tslib": "^2.8.1", - "tsyringe": "^4.10.0" - } - }, - "node_modules/@protobuf-ts/grpc-transport": { - "version": "2.11.1", - "resolved": "https://registry.npmjs.org/@protobuf-ts/grpc-transport/-/grpc-transport-2.11.1.tgz", - "integrity": "sha512-l6wrcFffY+tuNnuyrNCkRM8hDIsAZVLA8Mn7PKdVyYxITosYh60qW663p9kL6TWXYuDCL3oxH8ih3vLKTDyhtg==", - "license": "Apache-2.0", - "dependencies": { - "@protobuf-ts/runtime": "^2.11.1", - "@protobuf-ts/runtime-rpc": "^2.11.1" - }, - "peerDependencies": { - "@grpc/grpc-js": "^1.6.0" - } - }, - "node_modules/@protobuf-ts/runtime": { - "version": "2.11.1", - "resolved": "https://registry.npmjs.org/@protobuf-ts/runtime/-/runtime-2.11.1.tgz", - "integrity": "sha512-KuDaT1IfHkugM2pyz+FwiY80ejWrkH1pAtOBOZFuR6SXEFTsnb/jiQWQ1rCIrcKx2BtyxnxW6BWwsVSA/Ie+WQ==", - "license": "(Apache-2.0 AND BSD-3-Clause)" - }, - "node_modules/@protobuf-ts/runtime-rpc": { - "version": "2.11.1", - "resolved": "https://registry.npmjs.org/@protobuf-ts/runtime-rpc/-/runtime-rpc-2.11.1.tgz", - "integrity": "sha512-4CqqUmNA+/uMz00+d3CYKgElXO9VrEbucjnBFEjqI4GuDrEQ32MaI3q+9qPBvIGOlL4PmHXrzM32vBPWRhQKWQ==", - "license": "Apache-2.0", - "dependencies": { - "@protobuf-ts/runtime": "^2.11.1" - } - }, - "node_modules/@protobufjs/aspromise": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", - "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/base64": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", - "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/codegen": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", - "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/eventemitter": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", - "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/fetch": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", - "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", - "license": "BSD-3-Clause", - "dependencies": { - "@protobufjs/aspromise": "^1.1.1", - "@protobufjs/inquire": "^1.1.0" - } - }, - "node_modules/@protobufjs/float": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", - "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/inquire": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", - "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/path": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", - "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/pool": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", - "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/utf8": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", - "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==", - "license": "BSD-3-Clause" - }, - "node_modules/@rollup/plugin-json": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/@rollup/plugin-json/-/plugin-json-6.1.0.tgz", - "integrity": "sha512-EGI2te5ENk1coGeADSIwZ7G2Q8CJS2sF120T7jLw4xFw9n7wIOXHo+kIYRAoVpJAN+kmqZSoO3Fp4JtoNF4ReA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@rollup/pluginutils": "^5.1.0" - }, - "engines": { - "node": ">=14.0.0" - }, - "peerDependencies": { - "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" - }, - "peerDependenciesMeta": { - "rollup": { - "optional": true - } - } - }, - "node_modules/@rollup/plugin-node-resolve": { - "version": "16.0.3", - "resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-16.0.3.tgz", - "integrity": "sha512-lUYM3UBGuM93CnMPG1YocWu7X802BrNF3jW2zny5gQyLQgRFJhV1Sq0Zi74+dh/6NBx1DxFC4b4GXg9wUCG5Qg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@rollup/pluginutils": "^5.0.1", - "@types/resolve": "1.20.2", - "deepmerge": "^4.2.2", - "is-module": "^1.0.0", - "resolve": "^1.22.1" - }, - "engines": { - "node": ">=14.0.0" - }, - "peerDependencies": { - "rollup": "^2.78.0||^3.0.0||^4.0.0" - }, - "peerDependenciesMeta": { - "rollup": { - "optional": true - } - } - }, - "node_modules/@rollup/pluginutils": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.3.0.tgz", - "integrity": "sha512-5EdhGZtnu3V88ces7s53hhfK5KSASnJZv8Lulpc04cWO3REESroJXg73DFsOmgbU2BhwV0E20bu2IDZb3VKW4Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0", - "estree-walker": "^2.0.2", - "picomatch": "^4.0.2" - }, - "engines": { - "node": ">=14.0.0" - }, - "peerDependencies": { - "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" - }, - "peerDependenciesMeta": { - "rollup": { - "optional": true - } - } - }, - "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.52.5.tgz", - "integrity": "sha512-8c1vW4ocv3UOMp9K+gToY5zL2XiiVw3k7f1ksf4yO1FlDFQ1C2u72iACFnSOceJFsWskc2WZNqeRhFRPzv+wtQ==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ] - }, - "node_modules/@rollup/rollup-android-arm64": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.52.5.tgz", - "integrity": "sha512-mQGfsIEFcu21mvqkEKKu2dYmtuSZOBMmAl5CFlPGLY94Vlcm+zWApK7F/eocsNzp8tKmbeBP8yXyAbx0XHsFNA==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ] - }, - "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.52.5.tgz", - "integrity": "sha512-takF3CR71mCAGA+v794QUZ0b6ZSrgJkArC+gUiG6LB6TQty9T0Mqh3m2ImRBOxS2IeYBo4lKWIieSvnEk2OQWA==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ] - }, - "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.52.5.tgz", - "integrity": "sha512-W901Pla8Ya95WpxDn//VF9K9u2JbocwV/v75TE0YIHNTbhqUTv9w4VuQ9MaWlNOkkEfFwkdNhXgcLqPSmHy0fA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ] - }, - "node_modules/@rollup/rollup-freebsd-arm64": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.52.5.tgz", - "integrity": "sha512-QofO7i7JycsYOWxe0GFqhLmF6l1TqBswJMvICnRUjqCx8b47MTo46W8AoeQwiokAx3zVryVnxtBMcGcnX12LvA==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ] - }, - "node_modules/@rollup/rollup-freebsd-x64": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.52.5.tgz", - "integrity": "sha512-jr21b/99ew8ujZubPo9skbrItHEIE50WdV86cdSoRkKtmWa+DDr6fu2c/xyRT0F/WazZpam6kk7IHBerSL7LDQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ] - }, - "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.52.5.tgz", - "integrity": "sha512-PsNAbcyv9CcecAUagQefwX8fQn9LQ4nZkpDboBOttmyffnInRy8R8dSg6hxxl2Re5QhHBf6FYIDhIj5v982ATQ==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.52.5.tgz", - "integrity": "sha512-Fw4tysRutyQc/wwkmcyoqFtJhh0u31K+Q6jYjeicsGJJ7bbEq8LwPWV/w0cnzOqR2m694/Af6hpFayLJZkG2VQ==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.52.5.tgz", - "integrity": "sha512-a+3wVnAYdQClOTlyapKmyI6BLPAFYs0JM8HRpgYZQO02rMR09ZcV9LbQB+NL6sljzG38869YqThrRnfPMCDtZg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.52.5.tgz", - "integrity": "sha512-AvttBOMwO9Pcuuf7m9PkC1PUIKsfaAJ4AYhy944qeTJgQOqJYJ9oVl2nYgY7Rk0mkbsuOpCAYSs6wLYB2Xiw0Q==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-loong64-gnu": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.52.5.tgz", - "integrity": "sha512-DkDk8pmXQV2wVrF6oq5tONK6UHLz/XcEVow4JTTerdeV1uqPeHxwcg7aFsfnSm9L+OO8WJsWotKM2JJPMWrQtA==", - "cpu": [ - "loong64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-ppc64-gnu": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.52.5.tgz", - "integrity": "sha512-W/b9ZN/U9+hPQVvlGwjzi+Wy4xdoH2I8EjaCkMvzpI7wJUs8sWJ03Rq96jRnHkSrcHTpQe8h5Tg3ZzUPGauvAw==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.52.5.tgz", - "integrity": "sha512-sjQLr9BW7R/ZiXnQiWPkErNfLMkkWIoCz7YMn27HldKsADEKa5WYdobaa1hmN6slu9oWQbB6/jFpJ+P2IkVrmw==", - "cpu": [ - "riscv64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-riscv64-musl": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.52.5.tgz", - "integrity": "sha512-hq3jU/kGyjXWTvAh2awn8oHroCbrPm8JqM7RUpKjalIRWWXE01CQOf/tUNWNHjmbMHg/hmNCwc/Pz3k1T/j/Lg==", - "cpu": [ - "riscv64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.52.5.tgz", - "integrity": "sha512-gn8kHOrku8D4NGHMK1Y7NA7INQTRdVOntt1OCYypZPRt6skGbddska44K8iocdpxHTMMNui5oH4elPH4QOLrFQ==", - "cpu": [ - "s390x" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.50.2", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.50.2.tgz", - "integrity": "sha512-9Jie/At6qk70dNIcopcL4p+1UirusEtznpNtcq/u/C5cC4HBX7qSGsYIcG6bdxj15EYWhHiu02YvmdPzylIZlA==", - "cpu": [ - "x64" - ], - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.52.5.tgz", - "integrity": "sha512-arCGIcuNKjBoKAXD+y7XomR9gY6Mw7HnFBv5Rw7wQRvwYLR7gBAgV7Mb2QTyjXfTveBNFAtPt46/36vV9STLNg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-openharmony-arm64": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.52.5.tgz", - "integrity": "sha512-QoFqB6+/9Rly/RiPjaomPLmR/13cgkIGfA40LHly9zcH1S0bN2HVFYk3a1eAyHQyjs3ZJYlXvIGtcCs5tko9Cw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openharmony" - ] - }, - "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.52.5.tgz", - "integrity": "sha512-w0cDWVR6MlTstla1cIfOGyl8+qb93FlAVutcor14Gf5Md5ap5ySfQ7R9S/NjNaMLSFdUnKGEasmVnu3lCMqB7w==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.52.5.tgz", - "integrity": "sha512-Aufdpzp7DpOTULJCuvzqcItSGDH73pF3ko/f+ckJhxQyHtp67rHw3HMNxoIdDMUITJESNE6a8uh4Lo4SLouOUg==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@rollup/rollup-win32-x64-gnu": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.52.5.tgz", - "integrity": "sha512-UGBUGPFp1vkj6p8wCRraqNhqwX/4kNQPS57BCFc8wYh0g94iVIW33wJtQAx3G7vrjjNtRaxiMUylM0ktp/TRSQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.52.5.tgz", - "integrity": "sha512-TAcgQh2sSkykPRWLrdyy2AiceMckNf5loITqXxFI5VuQjS5tSuw3WlwdN8qv8vzjLAUTvYaH/mVjSFpbkFbpTg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@rushstack/node-core-library": { - "version": "5.17.0", - "resolved": "https://registry.npmjs.org/@rushstack/node-core-library/-/node-core-library-5.17.0.tgz", - "integrity": "sha512-24vt1GbHN6kyIglRMTVpyEiNRRRJK8uZHc1XoGAhmnTDKnrWet8OmOpImMswJIe6gM78eV8cMg1HXwuUHkSSgg==", - "dev": true, - "license": "MIT", - "dependencies": { - "ajv": "~8.13.0", - "ajv-draft-04": "~1.0.0", - "ajv-formats": "~3.0.1", - "fs-extra": "~11.3.0", - "import-lazy": "~4.0.0", - "jju": "~1.4.0", - "resolve": "~1.22.1", - "semver": "~7.5.4" - }, - "peerDependencies": { - "@types/node": "*" - }, - "peerDependenciesMeta": { - "@types/node": { - "optional": true - } - } - }, - "node_modules/@rushstack/node-core-library/node_modules/ajv": { - "version": "8.13.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.13.0.tgz", - "integrity": "sha512-PRA911Blj99jR5RMeTunVbNXMF6Lp4vZXnk5GQjcnUWUTsrXtekg/pnmFFI2u/I36Y/2bITGS30GZCXei6uNkA==", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.3", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.4.1" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/@rushstack/problem-matcher": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/@rushstack/problem-matcher/-/problem-matcher-0.1.1.tgz", - "integrity": "sha512-Fm5XtS7+G8HLcJHCWpES5VmeMyjAKaWeyZU5qPzZC+22mPlJzAsOxymHiWIfuirtPckX3aptWws+K2d0BzniJA==", - "dev": true, - "license": "MIT", - "peerDependencies": { - "@types/node": "*" - }, - "peerDependenciesMeta": { - "@types/node": { - "optional": true - } - } - }, - "node_modules/@rushstack/rig-package": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/@rushstack/rig-package/-/rig-package-0.6.0.tgz", - "integrity": "sha512-ZQmfzsLE2+Y91GF15c65L/slMRVhF6Hycq04D4TwtdGaUAbIXXg9c5pKA5KFU7M4QMaihoobp9JJYpYcaY3zOw==", - "dev": true, - "license": "MIT", - "dependencies": { - "resolve": "~1.22.1", - "strip-json-comments": "~3.1.1" - } - }, - "node_modules/@rushstack/terminal": { - "version": "0.19.1", - "resolved": "https://registry.npmjs.org/@rushstack/terminal/-/terminal-0.19.1.tgz", - "integrity": "sha512-jsBuSad67IDVMO2yp0hDfs0OdE4z3mDIjIL2pclDT3aEJboeZXE85e1HjuD0F6JoW3XgHvDwoX+WOV+AVTDQeA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@rushstack/node-core-library": "5.17.0", - "@rushstack/problem-matcher": "0.1.1", - "supports-color": "~8.1.1" - }, - "peerDependencies": { - "@types/node": "*" - }, - "peerDependenciesMeta": { - "@types/node": { - "optional": true - } - } - }, - "node_modules/@rushstack/ts-command-line": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/@rushstack/ts-command-line/-/ts-command-line-5.1.1.tgz", - "integrity": "sha512-HPzFsUcr+wZ3oQI08Ec/E6cuiAVHKzrXZGHhwiwIGygAFiqN5QzX+ff30n70NU2WyE26CykgMwBZZSSyHCJrzA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@rushstack/terminal": "0.19.1", - "@types/argparse": "1.0.38", - "argparse": "~1.0.9", - "string-argv": "~0.3.1" - } - }, - "node_modules/@tsconfig/node10": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.11.tgz", - "integrity": "sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw==", - "dev": true, - "license": "MIT" - }, - "node_modules/@tsconfig/node12": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", - "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", - "dev": true, - "license": "MIT" - }, - "node_modules/@tsconfig/node14": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", - "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", - "dev": true, - "license": "MIT" - }, - "node_modules/@tsconfig/node16": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", - "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/argparse": { - "version": "1.0.38", - "resolved": "https://registry.npmjs.org/@types/argparse/-/argparse-1.0.38.tgz", - "integrity": "sha512-ebDJ9b0e702Yr7pWgB0jzm+CX4Srzz8RcXtLJDJB+BSccqMa36uyH/zUsSYao5+BD1ytv3k3rPYCq4mAE1hsXA==", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/chai": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.2.tgz", - "integrity": "sha512-8kB30R7Hwqf40JPiKhVzodJs2Qc1ZJ5zuT3uzw5Hq/dhNCl3G3l83jfpdI1e20BP348+fV7VIL/+FxaXkqBmWg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/deep-eql": "*" - } - }, - "node_modules/@types/deep-eql": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", - "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/estree": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", - "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/json-schema": { - "version": "7.0.15", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", - "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", - "dev": true, - "license": "MIT", - "peer": true - }, - "node_modules/@types/node": { - "version": "22.19.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.1.tgz", - "integrity": "sha512-LCCV0HdSZZZb34qifBsyWlUmok6W7ouER+oQIGBScS8EsZsQbrtFTUrDX4hOl+CS6p7cnNC4td+qrSVGSCTUfQ==", - "license": "MIT", - "dependencies": { - "undici-types": "~6.21.0" - } - }, - "node_modules/@types/resolve": { - "version": "1.20.2", - "resolved": "https://registry.npmjs.org/@types/resolve/-/resolve-1.20.2.tgz", - "integrity": "sha512-60BCwRFOZCQhDncwQdxxeOEEkbc5dIMccYLwbxsS4TUNeVECQ/pBJ0j09mrHOl/JJvpRPGwO9SvE4nR2Nb/a4Q==", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/uuid": { - "version": "10.0.0", - "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-10.0.0.tgz", - "integrity": "sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/@typescript-eslint/eslint-plugin": { - "version": "8.46.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.46.1.tgz", - "integrity": "sha512-rUsLh8PXmBjdiPY+Emjz9NX2yHvhS11v0SR6xNJkm5GM1MO9ea/1GoDKlHHZGrOJclL/cZ2i/vRUYVtjRhrHVQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "8.46.1", - "@typescript-eslint/type-utils": "8.46.1", - "@typescript-eslint/utils": "8.46.1", - "@typescript-eslint/visitor-keys": "8.46.1", - "graphemer": "^1.4.0", - "ignore": "^7.0.0", - "natural-compare": "^1.4.0", - "ts-api-utils": "^2.1.0" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "@typescript-eslint/parser": "^8.46.1", - "eslint": "^8.57.0 || ^9.0.0", - "typescript": ">=4.8.4 <6.0.0" - } - }, - "node_modules/@typescript-eslint/eslint-plugin/node_modules/ignore": { - "version": "7.0.5", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", - "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, - "node_modules/@typescript-eslint/parser": { - "version": "8.46.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.46.1.tgz", - "integrity": "sha512-6JSSaBZmsKvEkbRUkf7Zj7dru/8ZCrJxAqArcLaVMee5907JdtEbKGsZ7zNiIm/UAkpGUkaSMZEXShnN2D1HZA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/scope-manager": "8.46.1", - "@typescript-eslint/types": "8.46.1", - "@typescript-eslint/typescript-estree": "8.46.1", - "@typescript-eslint/visitor-keys": "8.46.1", - "debug": "^4.3.4" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^8.57.0 || ^9.0.0", - "typescript": ">=4.8.4 <6.0.0" - } - }, - "node_modules/@typescript-eslint/project-service": { - "version": "8.46.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.46.1.tgz", - "integrity": "sha512-FOIaFVMHzRskXr5J4Jp8lFVV0gz5ngv3RHmn+E4HYxSJ3DgDzU7fVI1/M7Ijh1zf6S7HIoaIOtln1H5y8V+9Zg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/tsconfig-utils": "^8.46.1", - "@typescript-eslint/types": "^8.46.1", - "debug": "^4.3.4" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "typescript": ">=4.8.4 <6.0.0" - } - }, - "node_modules/@typescript-eslint/scope-manager": { - "version": "8.46.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.46.1.tgz", - "integrity": "sha512-weL9Gg3/5F0pVQKiF8eOXFZp8emqWzZsOJuWRUNtHT+UNV2xSJegmpCNQHy37aEQIbToTq7RHKhWvOsmbM680A==", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/types": "8.46.1", - "@typescript-eslint/visitor-keys": "8.46.1" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/tsconfig-utils": { - "version": "8.46.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.46.1.tgz", - "integrity": "sha512-X88+J/CwFvlJB+mK09VFqx5FE4H5cXD+H/Bdza2aEWkSb8hnWIQorNcscRl4IEo1Cz9VI/+/r/jnGWkbWPx54g==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "typescript": ">=4.8.4 <6.0.0" - } - }, - "node_modules/@typescript-eslint/type-utils": { - "version": "8.46.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.46.1.tgz", - "integrity": "sha512-+BlmiHIiqufBxkVnOtFwjah/vrkF4MtKKvpXrKSPLCkCtAp8H01/VV43sfqA98Od7nJpDcFnkwgyfQbOG0AMvw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/types": "8.46.1", - "@typescript-eslint/typescript-estree": "8.46.1", - "@typescript-eslint/utils": "8.46.1", - "debug": "^4.3.4", - "ts-api-utils": "^2.1.0" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^8.57.0 || ^9.0.0", - "typescript": ">=4.8.4 <6.0.0" - } - }, - "node_modules/@typescript-eslint/types": { - "version": "8.46.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.46.1.tgz", - "integrity": "sha512-C+soprGBHwWBdkDpbaRC4paGBrkIXxVlNohadL5o0kfhsXqOC6GYH2S/Obmig+I0HTDl8wMaRySwrfrXVP8/pQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/typescript-estree": { - "version": "8.46.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.46.1.tgz", - "integrity": "sha512-uIifjT4s8cQKFQ8ZBXXyoUODtRoAd7F7+G8MKmtzj17+1UbdzFl52AzRyZRyKqPHhgzvXunnSckVu36flGy8cg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/project-service": "8.46.1", - "@typescript-eslint/tsconfig-utils": "8.46.1", - "@typescript-eslint/types": "8.46.1", - "@typescript-eslint/visitor-keys": "8.46.1", - "debug": "^4.3.4", - "fast-glob": "^3.3.2", - "is-glob": "^4.0.3", - "minimatch": "^9.0.4", - "semver": "^7.6.0", - "ts-api-utils": "^2.1.0" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "typescript": ">=4.8.4 <6.0.0" - } - }, - "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", - "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", - "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": { - "version": "7.7.3", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", - "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", - "dev": true, - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/@typescript-eslint/utils": { - "version": "8.46.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.46.1.tgz", - "integrity": "sha512-vkYUy6LdZS7q1v/Gxb2Zs7zziuXN0wxqsetJdeZdRe/f5dwJFglmuvZBfTUivCtjH725C1jWCDfpadadD95EDQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@eslint-community/eslint-utils": "^4.7.0", - "@typescript-eslint/scope-manager": "8.46.1", - "@typescript-eslint/types": "8.46.1", - "@typescript-eslint/typescript-estree": "8.46.1" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^8.57.0 || ^9.0.0", - "typescript": ">=4.8.4 <6.0.0" - } - }, - "node_modules/@typescript-eslint/visitor-keys": { - "version": "8.46.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.46.1.tgz", - "integrity": "sha512-ptkmIf2iDkNUjdeu2bQqhFPV1m6qTnFFjg7PPDjxKWaMaP0Z6I9l30Jr3g5QqbZGdw8YdYvLp+XnqnWWZOg/NA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/types": "8.46.1", - "eslint-visitor-keys": "^4.2.1" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@vitest/expect": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-3.2.4.tgz", - "integrity": "sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/chai": "^5.2.2", - "@vitest/spy": "3.2.4", - "@vitest/utils": "3.2.4", - "chai": "^5.2.0", - "tinyrainbow": "^2.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/mocker": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-3.2.4.tgz", - "integrity": "sha512-46ryTE9RZO/rfDd7pEqFl7etuyzekzEhUbTW3BvmeO/BcCMEgq59BKhek3dXDWgAj4oMK6OZi+vRr1wPW6qjEQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/spy": "3.2.4", - "estree-walker": "^3.0.3", - "magic-string": "^0.30.17" - }, - "funding": { - "url": "https://opencollective.com/vitest" - }, - "peerDependencies": { - "msw": "^2.4.9", - "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" - }, - "peerDependenciesMeta": { - "msw": { - "optional": true - }, - "vite": { - "optional": true - } - } - }, - "node_modules/@vitest/mocker/node_modules/estree-walker": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", - "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0" - } - }, - "node_modules/@vitest/pretty-format": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-3.2.4.tgz", - "integrity": "sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==", - "dev": true, - "license": "MIT", - "dependencies": { - "tinyrainbow": "^2.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/runner": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-3.2.4.tgz", - "integrity": "sha512-oukfKT9Mk41LreEW09vt45f8wx7DordoWUZMYdY/cyAk7w5TWkTRCNZYF7sX7n2wB7jyGAl74OxgwhPgKaqDMQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/utils": "3.2.4", - "pathe": "^2.0.3", - "strip-literal": "^3.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/snapshot": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-3.2.4.tgz", - "integrity": "sha512-dEYtS7qQP2CjU27QBC5oUOxLE/v5eLkGqPE0ZKEIDGMs4vKWe7IjgLOeauHsR0D5YuuycGRO5oSRXnwnmA78fQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/pretty-format": "3.2.4", - "magic-string": "^0.30.17", - "pathe": "^2.0.3" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/spy": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-3.2.4.tgz", - "integrity": "sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==", - "dev": true, - "license": "MIT", - "dependencies": { - "tinyspy": "^4.0.3" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/utils": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-3.2.4.tgz", - "integrity": "sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/pretty-format": "3.2.4", - "loupe": "^3.1.4", - "tinyrainbow": "^2.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/acorn": { - "version": "8.15.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", - "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", - "dev": true, - "license": "MIT", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/acorn-jsx": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", - "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", - "dev": true, - "license": "MIT", - "peer": true, - "peerDependencies": { - "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" - } - }, - "node_modules/acorn-walk": { - "version": "8.3.4", - "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", - "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", - "dev": true, - "license": "MIT", - "dependencies": { - "acorn": "^8.11.0" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/ajv": { - "version": "8.12.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", - "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/ajv-draft-04": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/ajv-draft-04/-/ajv-draft-04-1.0.0.tgz", - "integrity": "sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw==", - "dev": true, - "license": "MIT", - "peerDependencies": { - "ajv": "^8.5.0" - }, - "peerDependenciesMeta": { - "ajv": { - "optional": true - } - } - }, - "node_modules/ajv-formats": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-3.0.1.tgz", - "integrity": "sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "ajv": "^8.0.0" - }, - "peerDependencies": { - "ajv": "^8.0.0" - }, - "peerDependenciesMeta": { - "ajv": { - "optional": true - } - } - }, - "node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/arg": { - "version": "4.1.3", - "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", - "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", - "dev": true, - "license": "MIT" - }, - "node_modules/argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "dev": true, - "license": "MIT", - "dependencies": { - "sprintf-js": "~1.0.2" - } - }, - "node_modules/asn1js": { - "version": "3.0.6", - "resolved": "https://registry.npmjs.org/asn1js/-/asn1js-3.0.6.tgz", - "integrity": "sha512-UOCGPYbl0tv8+006qks/dTgV9ajs97X2p0FAbyS2iyCRrmLSRolDaHdp+v/CLgnzHc3fVB+CwYiUmei7ndFcgA==", - "license": "BSD-3-Clause", - "dependencies": { - "pvtsutils": "^1.3.6", - "pvutils": "^1.1.3", - "tslib": "^2.8.1" - }, - "engines": { - "node": ">=12.0.0" - } - }, - "node_modules/assertion-error": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", - "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - } - }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true, - "license": "MIT" - }, - "node_modules/brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/braces": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", - "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", - "dev": true, - "license": "MIT", - "dependencies": { - "fill-range": "^7.1.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cac": { - "version": "6.7.14", - "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", - "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/callsites": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/chai": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/chai/-/chai-5.3.3.tgz", - "integrity": "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==", - "dev": true, - "license": "MIT", - "dependencies": { - "assertion-error": "^2.0.1", - "check-error": "^2.1.1", - "deep-eql": "^5.0.1", - "loupe": "^3.1.0", - "pathval": "^2.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/chalk/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/check-error": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", - "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 16" - } - }, - "node_modules/cliui": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", - "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", - "license": "ISC", - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.1", - "wrap-ansi": "^7.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "license": "MIT", - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "license": "MIT" - }, - "node_modules/commondir": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", - "integrity": "sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==", - "dev": true, - "license": "MIT" - }, - "node_modules/concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", - "dev": true, - "license": "MIT", - "peer": true - }, - "node_modules/create-require": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", - "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/cross-spawn": { - "version": "7.0.6", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", - "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/debug": { - "version": "4.4.3", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", - "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", - "dev": true, - "license": "MIT", - "dependencies": { - "ms": "^2.1.3" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/deep-eql": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", - "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/deep-is": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", - "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", - "dev": true, - "license": "MIT", - "peer": true - }, - "node_modules/deepmerge": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", - "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/diff": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", - "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", - "dev": true, - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.3.1" - } - }, - "node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/es-module-lexer": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", - "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", - "dev": true, - "license": "MIT" - }, - "node_modules/esbuild": { - "version": "0.25.11", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.11.tgz", - "integrity": "sha512-KohQwyzrKTQmhXDW1PjCv3Tyspn9n5GcY2RTDqeORIdIJY8yKIF7sTSopFmn/wpMPW4rdPXI0UE5LJLuq3bx0Q==", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "bin": { - "esbuild": "bin/esbuild" - }, - "engines": { - "node": ">=18" - }, - "optionalDependencies": { - "@esbuild/aix-ppc64": "0.25.11", - "@esbuild/android-arm": "0.25.11", - "@esbuild/android-arm64": "0.25.11", - "@esbuild/android-x64": "0.25.11", - "@esbuild/darwin-arm64": "0.25.11", - "@esbuild/darwin-x64": "0.25.11", - "@esbuild/freebsd-arm64": "0.25.11", - "@esbuild/freebsd-x64": "0.25.11", - "@esbuild/linux-arm": "0.25.11", - "@esbuild/linux-arm64": "0.25.11", - "@esbuild/linux-ia32": "0.25.11", - "@esbuild/linux-loong64": "0.25.11", - "@esbuild/linux-mips64el": "0.25.11", - "@esbuild/linux-ppc64": "0.25.11", - "@esbuild/linux-riscv64": "0.25.11", - "@esbuild/linux-s390x": "0.25.11", - "@esbuild/linux-x64": "0.25.11", - "@esbuild/netbsd-arm64": "0.25.11", - "@esbuild/netbsd-x64": "0.25.11", - "@esbuild/openbsd-arm64": "0.25.11", - "@esbuild/openbsd-x64": "0.25.11", - "@esbuild/openharmony-arm64": "0.25.11", - "@esbuild/sunos-x64": "0.25.11", - "@esbuild/win32-arm64": "0.25.11", - "@esbuild/win32-ia32": "0.25.11", - "@esbuild/win32-x64": "0.25.11" - } - }, - "node_modules/escalade": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", - "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/eslint": { - "version": "9.38.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.38.0.tgz", - "integrity": "sha512-t5aPOpmtJcZcz5UJyY2GbvpDlsK5E8JqRqoKtfiKE3cNh437KIqfJr3A3AKf5k64NPx6d0G3dno6XDY05PqPtw==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@eslint-community/eslint-utils": "^4.8.0", - "@eslint-community/regexpp": "^4.12.1", - "@eslint/config-array": "^0.21.1", - "@eslint/config-helpers": "^0.4.1", - "@eslint/core": "^0.16.0", - "@eslint/eslintrc": "^3.3.1", - "@eslint/js": "9.38.0", - "@eslint/plugin-kit": "^0.4.0", - "@humanfs/node": "^0.16.6", - "@humanwhocodes/module-importer": "^1.0.1", - "@humanwhocodes/retry": "^0.4.2", - "@types/estree": "^1.0.6", - "ajv": "^6.12.4", - "chalk": "^4.0.0", - "cross-spawn": "^7.0.6", - "debug": "^4.3.2", - "escape-string-regexp": "^4.0.0", - "eslint-scope": "^8.4.0", - "eslint-visitor-keys": "^4.2.1", - "espree": "^10.4.0", - "esquery": "^1.5.0", - "esutils": "^2.0.2", - "fast-deep-equal": "^3.1.3", - "file-entry-cache": "^8.0.0", - "find-up": "^5.0.0", - "glob-parent": "^6.0.2", - "ignore": "^5.2.0", - "imurmurhash": "^0.1.4", - "is-glob": "^4.0.0", - "json-stable-stringify-without-jsonify": "^1.0.1", - "lodash.merge": "^4.6.2", - "minimatch": "^3.1.2", - "natural-compare": "^1.4.0", - "optionator": "^0.9.3" - }, - "bin": { - "eslint": "bin/eslint.js" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "url": "https://eslint.org/donate" - }, - "peerDependencies": { - "jiti": "*" - }, - "peerDependenciesMeta": { - "jiti": { - "optional": true - } - } - }, - "node_modules/eslint-scope": { - "version": "8.4.0", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", - "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", - "dev": true, - "license": "BSD-2-Clause", - "peer": true, - "dependencies": { - "esrecurse": "^4.3.0", - "estraverse": "^5.2.0" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/eslint-visitor-keys": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", - "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/eslint/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/eslint/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true, - "license": "MIT", - "peer": true - }, - "node_modules/eslint/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "license": "ISC", - "peer": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/espree": { - "version": "10.4.0", - "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", - "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", - "dev": true, - "license": "BSD-2-Clause", - "peer": true, - "dependencies": { - "acorn": "^8.15.0", - "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^4.2.1" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/esquery": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz", - "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==", - "dev": true, - "license": "BSD-3-Clause", - "peer": true, - "dependencies": { - "estraverse": "^5.1.0" - }, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/esrecurse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", - "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", - "dev": true, - "license": "BSD-2-Clause", - "peer": true, - "dependencies": { - "estraverse": "^5.2.0" - }, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", - "dev": true, - "license": "BSD-2-Clause", - "peer": true, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/estree-walker": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", - "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", - "dev": true, - "license": "MIT" - }, - "node_modules/esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "dev": true, - "license": "BSD-2-Clause", - "peer": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/expect-type": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.2.2.tgz", - "integrity": "sha512-JhFGDVJ7tmDJItKhYgJCGLOWjuK9vPxiXoUFLwLDc99NlmklilbiQJwoctZtt13+xMw91MCk/REan6MWHqDjyA==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=12.0.0" - } - }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", - "dev": true, - "license": "MIT" - }, - "node_modules/fast-glob": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", - "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.2", - "merge2": "^1.3.0", - "micromatch": "^4.0.8" - }, - "engines": { - "node": ">=8.6.0" - } - }, - "node_modules/fast-glob/node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "dev": true, - "license": "ISC", - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", - "dev": true, - "license": "MIT", - "peer": true - }, - "node_modules/fast-levenshtein": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", - "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", - "dev": true, - "license": "MIT", - "peer": true - }, - "node_modules/fastq": { - "version": "1.19.1", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz", - "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==", - "dev": true, - "license": "ISC", - "dependencies": { - "reusify": "^1.0.4" - } - }, - "node_modules/fdir": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", - "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12.0.0" - }, - "peerDependencies": { - "picomatch": "^3 || ^4" - }, - "peerDependenciesMeta": { - "picomatch": { - "optional": true - } - } - }, - "node_modules/file-entry-cache": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", - "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "flat-cache": "^4.0.0" - }, - "engines": { - "node": ">=16.0.0" - } - }, - "node_modules/fill-range": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", - "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", - "dev": true, - "license": "MIT", - "dependencies": { - "to-regex-range": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/find-cache-dir": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.2.tgz", - "integrity": "sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig==", - "dev": true, - "license": "MIT", - "dependencies": { - "commondir": "^1.0.1", - "make-dir": "^3.0.2", - "pkg-dir": "^4.1.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/avajs/find-cache-dir?sponsor=1" - } - }, - "node_modules/find-up": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", - "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "locate-path": "^6.0.0", - "path-exists": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/flat-cache": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", - "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "flatted": "^3.2.9", - "keyv": "^4.5.4" - }, - "engines": { - "node": ">=16" - } - }, - "node_modules/flatted": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", - "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", - "dev": true, - "license": "ISC", - "peer": true - }, - "node_modules/fs-extra": { - "version": "11.3.2", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.2.tgz", - "integrity": "sha512-Xr9F6z6up6Ws+NjzMCZc6WXg2YFRlrLP9NQDO3VQrWrfiojdhS56TzueT88ze0uBdCTwEIhQ3ptnmKeWGFAe0A==", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=14.14" - } - }, - "node_modules/fsevents": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", - "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } - }, - "node_modules/function-bind": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", - "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", - "dev": true, - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "license": "ISC", - "engines": { - "node": "6.* || 8.* || >= 10.*" - } - }, - "node_modules/glob-parent": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", - "dev": true, - "license": "ISC", - "peer": true, - "dependencies": { - "is-glob": "^4.0.3" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/globals": { - "version": "14.0.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", - "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/graceful-fs": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", - "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", - "dev": true, - "license": "ISC" - }, - "node_modules/graphemer": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", - "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", - "dev": true, - "license": "MIT" - }, - "node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/hasown": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", - "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "function-bind": "^1.1.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/ignore": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", - "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">= 4" - } - }, - "node_modules/import-fresh": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", - "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "parent-module": "^1.0.0", - "resolve-from": "^4.0.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/import-lazy": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-4.0.0.tgz", - "integrity": "sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=0.8.19" - } - }, - "node_modules/is-core-module": { - "version": "2.16.1", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", - "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", - "dev": true, - "license": "MIT", - "dependencies": { - "hasown": "^2.0.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-glob": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", - "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", - "dev": true, - "license": "MIT", - "dependencies": { - "is-extglob": "^2.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/is-module": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-module/-/is-module-1.0.0.tgz", - "integrity": "sha512-51ypPSPCoTEIN9dy5Oy+h4pShgJmPCygKfyRCISBI+JoWT/2oJvK8QPxmwv7b/p239jXrm9M1mlQbyKJ5A152g==", - "dev": true, - "license": "MIT" - }, - "node_modules/is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "dev": true, - "license": "ISC", - "peer": true - }, - "node_modules/jju": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/jju/-/jju-1.4.0.tgz", - "integrity": "sha512-8wb9Yw966OSxApiCt0K3yNJL8pnNeIv+OEq2YMidz4FKP6nonSRoOXc80iXY4JaN2FC11B9qsNmDsm+ZOfMROA==", - "dev": true, - "license": "MIT" - }, - "node_modules/js-tokens": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz", - "integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/js-yaml": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", - "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/js-yaml/node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true, - "license": "Python-2.0", - "peer": true - }, - "node_modules/json-buffer": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", - "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", - "dev": true, - "license": "MIT", - "peer": true - }, - "node_modules/json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", - "dev": true, - "license": "MIT" - }, - "node_modules/json-stable-stringify-without-jsonify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", - "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", - "dev": true, - "license": "MIT", - "peer": true - }, - "node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/keyv": { - "version": "4.5.4", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", - "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "json-buffer": "3.0.1" - } - }, - "node_modules/levn": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", - "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "prelude-ls": "^1.2.1", - "type-check": "~0.4.0" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/locate-path": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", - "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "p-locate": "^5.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true, - "license": "MIT" - }, - "node_modules/lodash.camelcase": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", - "integrity": "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==", - "license": "MIT" - }, - "node_modules/lodash.merge": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", - "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", - "dev": true, - "license": "MIT", - "peer": true - }, - "node_modules/long": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/long/-/long-5.3.2.tgz", - "integrity": "sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==", - "license": "Apache-2.0" - }, - "node_modules/loupe": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.1.tgz", - "integrity": "sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/magic-string": { - "version": "0.30.19", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.19.tgz", - "integrity": "sha512-2N21sPY9Ws53PZvsEpVtNuSW+ScYbQdp4b9qUaL+9QkHUrGFKo56Lg9Emg5s9V/qrtNBmiR01sYhUOwu3H+VOw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@jridgewell/sourcemap-codec": "^1.5.5" - } - }, - "node_modules/make-dir": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", - "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", - "dev": true, - "license": "MIT", - "dependencies": { - "semver": "^6.0.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/make-dir/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "dev": true, - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "node_modules/make-error": { - "version": "1.3.6", - "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", - "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", - "dev": true, - "license": "ISC" - }, - "node_modules/merge2": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", - "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 8" - } - }, - "node_modules/micromatch": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", - "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", - "dev": true, - "license": "MIT", - "dependencies": { - "braces": "^3.0.3", - "picomatch": "^2.3.1" - }, - "engines": { - "node": ">=8.6" - } - }, - "node_modules/micromatch/node_modules/picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8.6" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "node_modules/minimatch": { - "version": "10.0.3", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.0.3.tgz", - "integrity": "sha512-IPZ167aShDZZUMdRk66cyQAW3qr0WzbHkPdMYa8bzZhlHhO3jALbKdxcaak7W9FfT2rZNpQuUu4Od7ILEpXSaw==", - "dev": true, - "license": "ISC", - "dependencies": { - "@isaacs/brace-expansion": "^5.0.0" - }, - "engines": { - "node": "20 || >=22" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "dev": true, - "license": "MIT" - }, - "node_modules/nanoid": { - "version": "3.3.11", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", - "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "bin": { - "nanoid": "bin/nanoid.cjs" - }, - "engines": { - "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" - } - }, - "node_modules/natural-compare": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", - "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", - "dev": true, - "license": "MIT" - }, - "node_modules/optionator": { - "version": "0.9.4", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", - "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "deep-is": "^0.1.3", - "fast-levenshtein": "^2.0.6", - "levn": "^0.4.1", - "prelude-ls": "^1.2.1", - "type-check": "^0.4.0", - "word-wrap": "^1.2.5" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/p-limit": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "yocto-queue": "^0.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-locate": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", - "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "p-limit": "^3.0.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/parent-module": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", - "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "callsites": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/path-parse": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", - "dev": true, - "license": "MIT" - }, - "node_modules/pathe": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", - "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", - "dev": true, - "license": "MIT" - }, - "node_modules/pathval": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz", - "integrity": "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 14.16" - } - }, - "node_modules/picocolors": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", - "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", - "dev": true, - "license": "ISC" - }, - "node_modules/picomatch": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", - "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "node_modules/pkg-dir": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", - "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "find-up": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/pkg-dir/node_modules/find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", - "dev": true, - "license": "MIT", - "dependencies": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/pkg-dir/node_modules/locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", - "dev": true, - "license": "MIT", - "dependencies": { - "p-locate": "^4.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/pkg-dir/node_modules/p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "dev": true, - "license": "MIT", - "dependencies": { - "p-try": "^2.0.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/pkg-dir/node_modules/p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", - "dev": true, - "license": "MIT", - "dependencies": { - "p-limit": "^2.2.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/postcss": { - "version": "8.5.6", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", - "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/postcss" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "nanoid": "^3.3.11", - "picocolors": "^1.1.1", - "source-map-js": "^1.2.1" - }, - "engines": { - "node": "^10 || ^12 || >=14" - } - }, - "node_modules/prelude-ls": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", - "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/protobufjs": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.5.4.tgz", - "integrity": "sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==", - "hasInstallScript": true, - "license": "BSD-3-Clause", - "dependencies": { - "@protobufjs/aspromise": "^1.1.2", - "@protobufjs/base64": "^1.1.2", - "@protobufjs/codegen": "^2.0.4", - "@protobufjs/eventemitter": "^1.1.0", - "@protobufjs/fetch": "^1.1.0", - "@protobufjs/float": "^1.0.2", - "@protobufjs/inquire": "^1.1.0", - "@protobufjs/path": "^1.1.2", - "@protobufjs/pool": "^1.1.0", - "@protobufjs/utf8": "^1.1.0", - "@types/node": ">=13.7.0", - "long": "^5.0.0" - }, - "engines": { - "node": ">=12.0.0" - } - }, - "node_modules/punycode": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", - "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/pvtsutils": { - "version": "1.3.6", - "resolved": "https://registry.npmjs.org/pvtsutils/-/pvtsutils-1.3.6.tgz", - "integrity": "sha512-PLgQXQ6H2FWCaeRak8vvk1GW462lMxB5s3Jm673N82zI4vqtVUPuZdffdZbPDFRoU8kAhItWFtPCWiPpp4/EDg==", - "license": "MIT", - "dependencies": { - "tslib": "^2.8.1" - } - }, - "node_modules/pvutils": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/pvutils/-/pvutils-1.1.3.tgz", - "integrity": "sha512-pMpnA0qRdFp32b1sJl1wOJNxZLQ2cbQx+k6tjNtZ8CpvVhNqEPRgivZ2WOUev2YMajecdH7ctUPDvEe87nariQ==", - "license": "MIT", - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/queue-microtask": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", - "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/reflect-metadata": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/reflect-metadata/-/reflect-metadata-0.2.2.tgz", - "integrity": "sha512-urBwgfrvVP/eAyXx4hluJivBKzuEbSQs9rKWCrCkbSxNv8mxPcUZKeuoF3Uy4mJl3Lwprp6yy5/39VWigZ4K6Q==", - "license": "Apache-2.0" - }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/require-from-string": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", - "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/resolve": { - "version": "1.22.10", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", - "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==", - "dev": true, - "license": "MIT", - "dependencies": { - "is-core-module": "^2.16.0", - "path-parse": "^1.0.7", - "supports-preserve-symlinks-flag": "^1.0.0" - }, - "bin": { - "resolve": "bin/resolve" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/resolve-from": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/reusify": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", - "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", - "dev": true, - "license": "MIT", - "engines": { - "iojs": ">=1.0.0", - "node": ">=0.10.0" - } - }, - "node_modules/rollup": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.52.5.tgz", - "integrity": "sha512-3GuObel8h7Kqdjt0gxkEzaifHTqLVW56Y/bjN7PSQtkKr0w3V/QYSdt6QWYtd7A1xUtYQigtdUfgj1RvWVtorw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/estree": "1.0.8" - }, - "bin": { - "rollup": "dist/bin/rollup" - }, - "engines": { - "node": ">=18.0.0", - "npm": ">=8.0.0" - }, - "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.52.5", - "@rollup/rollup-android-arm64": "4.52.5", - "@rollup/rollup-darwin-arm64": "4.52.5", - "@rollup/rollup-darwin-x64": "4.52.5", - "@rollup/rollup-freebsd-arm64": "4.52.5", - "@rollup/rollup-freebsd-x64": "4.52.5", - "@rollup/rollup-linux-arm-gnueabihf": "4.52.5", - "@rollup/rollup-linux-arm-musleabihf": "4.52.5", - "@rollup/rollup-linux-arm64-gnu": "4.52.5", - "@rollup/rollup-linux-arm64-musl": "4.52.5", - "@rollup/rollup-linux-loong64-gnu": "4.52.5", - "@rollup/rollup-linux-ppc64-gnu": "4.52.5", - "@rollup/rollup-linux-riscv64-gnu": "4.52.5", - "@rollup/rollup-linux-riscv64-musl": "4.52.5", - "@rollup/rollup-linux-s390x-gnu": "4.52.5", - "@rollup/rollup-linux-x64-gnu": "4.52.5", - "@rollup/rollup-linux-x64-musl": "4.52.5", - "@rollup/rollup-openharmony-arm64": "4.52.5", - "@rollup/rollup-win32-arm64-msvc": "4.52.5", - "@rollup/rollup-win32-ia32-msvc": "4.52.5", - "@rollup/rollup-win32-x64-gnu": "4.52.5", - "@rollup/rollup-win32-x64-msvc": "4.52.5", - "fsevents": "~2.3.2" - } - }, - "node_modules/rollup-plugin-typescript2": { - "version": "0.36.0", - "resolved": "https://registry.npmjs.org/rollup-plugin-typescript2/-/rollup-plugin-typescript2-0.36.0.tgz", - "integrity": "sha512-NB2CSQDxSe9+Oe2ahZbf+B4bh7pHwjV5L+RSYpCu7Q5ROuN94F9b6ioWwKfz3ueL3KTtmX4o2MUH2cgHDIEUsw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@rollup/pluginutils": "^4.1.2", - "find-cache-dir": "^3.3.2", - "fs-extra": "^10.0.0", - "semver": "^7.5.4", - "tslib": "^2.6.2" - }, - "peerDependencies": { - "rollup": ">=1.26.3", - "typescript": ">=2.4.0" - } - }, - "node_modules/rollup-plugin-typescript2/node_modules/@rollup/pluginutils": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-4.2.1.tgz", - "integrity": "sha512-iKnFXr7NkdZAIHiIWE+BX5ULi/ucVFYWD6TbAV+rZctiRTY2PL6tsIKhoIOaoskiWAkgu+VsbXgUVDNLHf+InQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "estree-walker": "^2.0.1", - "picomatch": "^2.2.2" - }, - "engines": { - "node": ">= 8.0.0" - } - }, - "node_modules/rollup-plugin-typescript2/node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/rollup-plugin-typescript2/node_modules/picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8.6" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "node_modules/rollup/node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.52.5", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.52.5.tgz", - "integrity": "sha512-hXGLYpdhiNElzN770+H2nlx+jRog8TyynpTVzdlc6bndktjKWyZyiCsuDAlpd+j+W+WNqfcyAWz9HxxIGfZm1Q==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/run-parallel": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", - "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT", - "dependencies": { - "queue-microtask": "^1.2.2" - } - }, - "node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", - "dev": true, - "license": "ISC", - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/siginfo": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", - "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", - "dev": true, - "license": "ISC" - }, - "node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true, - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/source-map-js": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", - "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", - "dev": true, - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/spiffe": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/spiffe/-/spiffe-0.4.0.tgz", - "integrity": "sha512-yjHNB+r2h/ybIIjdwl7TPRHNPfi95oNflRDfUo70w9nV7hrSluPfFecKHDx3X1j3rkO4iuwbssTrwwyJIjpWPw==", - "license": "MIT", - "dependencies": { - "@grpc/grpc-js": "^1.9.11", - "@peculiar/webcrypto": "^1.4.3", - "@peculiar/x509": "^1.9.5", - "@protobuf-ts/grpc-transport": "^2.9.1", - "@protobuf-ts/runtime": "^2.9.1", - "@protobuf-ts/runtime-rpc": "^2.9.1", - "protobufjs": "^7.2.5" - } - }, - "node_modules/sprintf-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", - "dev": true, - "license": "BSD-3-Clause" - }, - "node_modules/stackback": { - "version": "0.0.2", - "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", - "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", - "dev": true, - "license": "MIT" - }, - "node_modules/std-env": { - "version": "3.10.0", - "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz", - "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==", - "dev": true, - "license": "MIT" - }, - "node_modules/string-argv": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/string-argv/-/string-argv-0.3.2.tgz", - "integrity": "sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.6.19" - } - }, - "node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/strip-literal": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-3.1.0.tgz", - "integrity": "sha512-8r3mkIM/2+PpjHoOtiAW8Rg3jJLHaV7xPwG+YRGrv6FP0wwk/toTpATxWYOW0BKdWwl82VT2tFYi5DlROa0Mxg==", - "dev": true, - "license": "MIT", - "dependencies": { - "js-tokens": "^9.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/antfu" - } - }, - "node_modules/supports-color": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, - "node_modules/supports-preserve-symlinks-flag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", - "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/tinybench": { - "version": "2.9.0", - "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", - "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", - "dev": true, - "license": "MIT" - }, - "node_modules/tinyexec": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz", - "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==", - "dev": true, - "license": "MIT" - }, - "node_modules/tinyglobby": { - "version": "0.2.15", - "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", - "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "fdir": "^6.5.0", - "picomatch": "^4.0.3" - }, - "engines": { - "node": ">=12.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/SuperchupuDev" - } - }, - "node_modules/tinypool": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.1.1.tgz", - "integrity": "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^18.0.0 || >=20.0.0" - } - }, - "node_modules/tinyrainbow": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-2.0.0.tgz", - "integrity": "sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/tinyspy": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-4.0.4.tgz", - "integrity": "sha512-azl+t0z7pw/z958Gy9svOTuzqIk6xq+NSheJzn5MMWtWTFywIacg2wUlzKFGtt3cthx0r2SxMK0yzJOR0IES7Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "is-number": "^7.0.0" - }, - "engines": { - "node": ">=8.0" - } - }, - "node_modules/ts-api-utils": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.1.0.tgz", - "integrity": "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=18.12" - }, - "peerDependencies": { - "typescript": ">=4.8.4" - } - }, - "node_modules/ts-node": { - "version": "10.9.2", - "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", - "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@cspotcode/source-map-support": "^0.8.0", - "@tsconfig/node10": "^1.0.7", - "@tsconfig/node12": "^1.0.7", - "@tsconfig/node14": "^1.0.0", - "@tsconfig/node16": "^1.0.2", - "acorn": "^8.4.1", - "acorn-walk": "^8.1.1", - "arg": "^4.1.0", - "create-require": "^1.1.0", - "diff": "^4.0.1", - "make-error": "^1.1.1", - "v8-compile-cache-lib": "^3.0.1", - "yn": "3.1.1" - }, - "bin": { - "ts-node": "dist/bin.js", - "ts-node-cwd": "dist/bin-cwd.js", - "ts-node-esm": "dist/bin-esm.js", - "ts-node-script": "dist/bin-script.js", - "ts-node-transpile-only": "dist/bin-transpile.js", - "ts-script": "dist/bin-script-deprecated.js" - }, - "peerDependencies": { - "@swc/core": ">=1.2.50", - "@swc/wasm": ">=1.2.50", - "@types/node": "*", - "typescript": ">=2.7" - }, - "peerDependenciesMeta": { - "@swc/core": { - "optional": true - }, - "@swc/wasm": { - "optional": true - } - } - }, - "node_modules/tslib": { - "version": "2.8.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", - "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", - "license": "0BSD" - }, - "node_modules/tsyringe": { - "version": "4.10.0", - "resolved": "https://registry.npmjs.org/tsyringe/-/tsyringe-4.10.0.tgz", - "integrity": "sha512-axr3IdNuVIxnaK5XGEUFTu3YmAQ6lllgrvqfEoR16g/HGnYY/6We4oWENtAnzK6/LpJ2ur9PAb80RBt7/U4ugw==", - "license": "MIT", - "dependencies": { - "tslib": "^1.9.3" - }, - "engines": { - "node": ">= 6.0.0" - } - }, - "node_modules/tsyringe/node_modules/tslib": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", - "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", - "license": "0BSD" - }, - "node_modules/type-check": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", - "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "prelude-ls": "^1.2.1" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/typescript": { - "version": "5.9.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", - "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", - "dev": true, - "license": "Apache-2.0", - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=14.17" - } - }, - "node_modules/typescript-eslint": { - "version": "8.46.1", - "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.46.1.tgz", - "integrity": "sha512-VHgijW803JafdSsDO8I761r3SHrgk4T00IdyQ+/UsthtgPRsBWQLqoSxOolxTpxRKi1kGXK0bSz4CoAc9ObqJA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/eslint-plugin": "8.46.1", - "@typescript-eslint/parser": "8.46.1", - "@typescript-eslint/typescript-estree": "8.46.1", - "@typescript-eslint/utils": "8.46.1" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^8.57.0 || ^9.0.0", - "typescript": ">=4.8.4 <6.0.0" - } - }, - "node_modules/undici-types": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", - "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", - "license": "MIT" - }, - "node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/uuid": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-11.1.0.tgz", - "integrity": "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==", - "dev": true, - "funding": [ - "https://github.com/sponsors/broofa", - "https://github.com/sponsors/ctavan" - ], - "license": "MIT", - "bin": { - "uuid": "dist/esm/bin/uuid" - } - }, - "node_modules/v8-compile-cache-lib": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", - "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", - "dev": true, - "license": "MIT" - }, - "node_modules/vite": { - "version": "7.1.11", - "resolved": "https://registry.npmjs.org/vite/-/vite-7.1.11.tgz", - "integrity": "sha512-uzcxnSDVjAopEUjljkWh8EIrg6tlzrjFUfMcR1EVsRDGwf/ccef0qQPRyOrROwhrTDaApueq+ja+KLPlzR/zdg==", - "dev": true, - "license": "MIT", - "dependencies": { - "esbuild": "^0.25.0", - "fdir": "^6.5.0", - "picomatch": "^4.0.3", - "postcss": "^8.5.6", - "rollup": "^4.43.0", - "tinyglobby": "^0.2.15" - }, - "bin": { - "vite": "bin/vite.js" - }, - "engines": { - "node": "^20.19.0 || >=22.12.0" - }, - "funding": { - "url": "https://github.com/vitejs/vite?sponsor=1" - }, - "optionalDependencies": { - "fsevents": "~2.3.3" - }, - "peerDependencies": { - "@types/node": "^20.19.0 || >=22.12.0", - "jiti": ">=1.21.0", - "less": "^4.0.0", - "lightningcss": "^1.21.0", - "sass": "^1.70.0", - "sass-embedded": "^1.70.0", - "stylus": ">=0.54.8", - "sugarss": "^5.0.0", - "terser": "^5.16.0", - "tsx": "^4.8.1", - "yaml": "^2.4.2" - }, - "peerDependenciesMeta": { - "@types/node": { - "optional": true - }, - "jiti": { - "optional": true - }, - "less": { - "optional": true - }, - "lightningcss": { - "optional": true - }, - "sass": { - "optional": true - }, - "sass-embedded": { - "optional": true - }, - "stylus": { - "optional": true - }, - "sugarss": { - "optional": true - }, - "terser": { - "optional": true - }, - "tsx": { - "optional": true - }, - "yaml": { - "optional": true - } - } - }, - "node_modules/vite-node": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-3.2.4.tgz", - "integrity": "sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg==", - "dev": true, - "license": "MIT", - "dependencies": { - "cac": "^6.7.14", - "debug": "^4.4.1", - "es-module-lexer": "^1.7.0", - "pathe": "^2.0.3", - "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" - }, - "bin": { - "vite-node": "vite-node.mjs" - }, - "engines": { - "node": "^18.0.0 || ^20.0.0 || >=22.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/vitest": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/vitest/-/vitest-3.2.4.tgz", - "integrity": "sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/chai": "^5.2.2", - "@vitest/expect": "3.2.4", - "@vitest/mocker": "3.2.4", - "@vitest/pretty-format": "^3.2.4", - "@vitest/runner": "3.2.4", - "@vitest/snapshot": "3.2.4", - "@vitest/spy": "3.2.4", - "@vitest/utils": "3.2.4", - "chai": "^5.2.0", - "debug": "^4.4.1", - "expect-type": "^1.2.1", - "magic-string": "^0.30.17", - "pathe": "^2.0.3", - "picomatch": "^4.0.2", - "std-env": "^3.9.0", - "tinybench": "^2.9.0", - "tinyexec": "^0.3.2", - "tinyglobby": "^0.2.14", - "tinypool": "^1.1.1", - "tinyrainbow": "^2.0.0", - "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0", - "vite-node": "3.2.4", - "why-is-node-running": "^2.3.0" - }, - "bin": { - "vitest": "vitest.mjs" - }, - "engines": { - "node": "^18.0.0 || ^20.0.0 || >=22.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - }, - "peerDependencies": { - "@edge-runtime/vm": "*", - "@types/debug": "^4.1.12", - "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", - "@vitest/browser": "3.2.4", - "@vitest/ui": "3.2.4", - "happy-dom": "*", - "jsdom": "*" - }, - "peerDependenciesMeta": { - "@edge-runtime/vm": { - "optional": true - }, - "@types/debug": { - "optional": true - }, - "@types/node": { - "optional": true - }, - "@vitest/browser": { - "optional": true - }, - "@vitest/ui": { - "optional": true - }, - "happy-dom": { - "optional": true - }, - "jsdom": { - "optional": true - } - } - }, - "node_modules/webcrypto-core": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/webcrypto-core/-/webcrypto-core-1.8.1.tgz", - "integrity": "sha512-P+x1MvlNCXlKbLSOY4cYrdreqPG5hbzkmawbcXLKN/mf6DZW0SdNNkZ+sjwsqVkI4A4Ko2sPZmkZtCKY58w83A==", - "license": "MIT", - "dependencies": { - "@peculiar/asn1-schema": "^2.3.13", - "@peculiar/json-schema": "^1.1.12", - "asn1js": "^3.0.5", - "pvtsutils": "^1.3.5", - "tslib": "^2.7.0" - } - }, - "node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, - "license": "ISC", - "peer": true, - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/why-is-node-running": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", - "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", - "dev": true, - "license": "MIT", - "dependencies": { - "siginfo": "^2.0.0", - "stackback": "0.0.2" - }, - "bin": { - "why-is-node-running": "cli.js" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/word-wrap": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", - "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/workerpool": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-10.0.1.tgz", - "integrity": "sha512-NAnKwZJxWlj/U1cp6ZkEtPE+GQY1S6KtOS3AlCiPfPFLxV3m64giSp7g2LsNJxzYCocDT7TSl+7T0sgrDp3KoQ==", - "dev": true, - "license": "Apache-2.0" - }, - "node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "license": "ISC", - "engines": { - "node": ">=10" - } - }, - "node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true, - "license": "ISC" - }, - "node_modules/yargs": { - "version": "17.7.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", - "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", - "license": "MIT", - "dependencies": { - "cliui": "^8.0.1", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.3", - "y18n": "^5.0.5", - "yargs-parser": "^21.1.1" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/yargs-parser": { - "version": "21.1.1", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", - "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", - "license": "ISC", - "engines": { - "node": ">=12" - } - }, - "node_modules/yn": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", - "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/yocto-queue": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", - "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "src/models": { - "version": "0.0.0", - "extraneous": true, - "peerDependencies": { - "@bufbuild/protobuf": "^2.9.0" - } - } - } -} +{ + "name": "agntcy-dir", + "version": "0.6.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "agntcy-dir", + "version": "0.6.0", + "license": "Apache-2.0", + "dependencies": { + "@bufbuild/protobuf": "^2.9.0", + "@connectrpc/connect": "^2.1.0", + "@connectrpc/connect-node": "^2.1.0", + "@grpc/grpc-js": "^1.13.4", + "spiffe": "^0.4.0" + }, + "devDependencies": { + "@microsoft/api-extractor": "^7.52.13", + "@rollup/plugin-json": "^6.1.0", + "@rollup/plugin-node-resolve": "^16.0.1", + "@types/node": "^22.19.1", + "@types/uuid": "^10.0.0", + "rollup-plugin-typescript2": "^0.36.0", + "ts-node": "^10.9.2", + "typescript": "^5.9.3", + "typescript-eslint": "^8.44.0", + "uuid": "^11.1.0", + "vitest": "^3.2.4", + "workerpool": "^10.0.1" + }, + "engines": { + "node": ">=20.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-linux-x64-gnu": "4.50.2" + } + }, + "api": { + "version": "0.0.0", + "extraneous": true, + "peerDependencies": { + "@bufbuild/protobuf": "^2.9.0" + } + }, + "node_modules/@bufbuild/protobuf": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/@bufbuild/protobuf/-/protobuf-2.9.0.tgz", + "integrity": "sha512-rnJenoStJ8nvmt9Gzye8nkYd6V22xUAnu4086ER7h1zJ508vStko4pMvDeQ446ilDTFpV5wnoc5YS7XvMwwMqA==", + "license": "(Apache-2.0 AND BSD-3-Clause)" + }, + "node_modules/@connectrpc/connect": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@connectrpc/connect/-/connect-2.1.0.tgz", + "integrity": "sha512-xhiwnYlJNHzmFsRw+iSPIwXR/xweTvTw8x5HiwWp10sbVtd4OpOXbRgE7V58xs1EC17fzusF1f5uOAy24OkBuA==", + "license": "Apache-2.0", + "peerDependencies": { + "@bufbuild/protobuf": "^2.7.0" + } + }, + "node_modules/@connectrpc/connect-node": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@connectrpc/connect-node/-/connect-node-2.1.0.tgz", + "integrity": "sha512-6akCXZSX5uWHLR654ne9Tnq7AnPUkLS65NvgsI5885xBkcuVy2APBd8sA4sLqaplUt84cVEr6LhjEFNx6W1KtQ==", + "license": "Apache-2.0", + "engines": { + "node": ">=20" + }, + "peerDependencies": { + "@bufbuild/protobuf": "^2.7.0", + "@connectrpc/connect": "2.1.0" + } + }, + "node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.11.tgz", + "integrity": "sha512-Xt1dOL13m8u0WE8iplx9Ibbm+hFAO0GsU2P34UNoDGvZYkY8ifSiy6Zuc1lYxfG7svWE2fzqCUmFp5HCn51gJg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.11.tgz", + "integrity": "sha512-uoa7dU+Dt3HYsethkJ1k6Z9YdcHjTrSb5NUy66ZfZaSV8hEYGD5ZHbEMXnqLFlbBflLsl89Zke7CAdDJ4JI+Gg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.11.tgz", + "integrity": "sha512-9slpyFBc4FPPz48+f6jyiXOx/Y4v34TUeDDXJpZqAWQn/08lKGeD8aDp9TMn9jDz2CiEuHwfhRmGBvpnd/PWIQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.11.tgz", + "integrity": "sha512-Sgiab4xBjPU1QoPEIqS3Xx+R2lezu0LKIEcYe6pftr56PqPygbB7+szVnzoShbx64MUupqoE0KyRlN7gezbl8g==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.11.tgz", + "integrity": "sha512-VekY0PBCukppoQrycFxUqkCojnTQhdec0vevUL/EDOCnXd9LKWqD/bHwMPzigIJXPhC59Vd1WFIL57SKs2mg4w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.11.tgz", + "integrity": "sha512-+hfp3yfBalNEpTGp9loYgbknjR695HkqtY3d3/JjSRUyPg/xd6q+mQqIb5qdywnDxRZykIHs3axEqU6l1+oWEQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.11.tgz", + "integrity": "sha512-CmKjrnayyTJF2eVuO//uSjl/K3KsMIeYeyN7FyDBjsR3lnSJHaXlVoAK8DZa7lXWChbuOk7NjAc7ygAwrnPBhA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.11.tgz", + "integrity": "sha512-Dyq+5oscTJvMaYPvW3x3FLpi2+gSZTCE/1ffdwuM6G1ARang/mb3jvjxs0mw6n3Lsw84ocfo9CrNMqc5lTfGOw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.11.tgz", + "integrity": "sha512-TBMv6B4kCfrGJ8cUPo7vd6NECZH/8hPpBHHlYI3qzoYFvWu2AdTvZNuU/7hsbKWqu/COU7NIK12dHAAqBLLXgw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.11.tgz", + "integrity": "sha512-Qr8AzcplUhGvdyUF08A1kHU3Vr2O88xxP0Tm8GcdVOUm25XYcMPp2YqSVHbLuXzYQMf9Bh/iKx7YPqECs6ffLA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.11.tgz", + "integrity": "sha512-TmnJg8BMGPehs5JKrCLqyWTVAvielc615jbkOirATQvWWB1NMXY77oLMzsUjRLa0+ngecEmDGqt5jiDC6bfvOw==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.11.tgz", + "integrity": "sha512-DIGXL2+gvDaXlaq8xruNXUJdT5tF+SBbJQKbWy/0J7OhU8gOHOzKmGIlfTTl6nHaCOoipxQbuJi7O++ldrxgMw==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.11.tgz", + "integrity": "sha512-Osx1nALUJu4pU43o9OyjSCXokFkFbyzjXb6VhGIJZQ5JZi8ylCQ9/LFagolPsHtgw6himDSyb5ETSfmp4rpiKQ==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.11.tgz", + "integrity": "sha512-nbLFgsQQEsBa8XSgSTSlrnBSrpoWh7ioFDUmwo158gIm5NNP+17IYmNWzaIzWmgCxq56vfr34xGkOcZ7jX6CPw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.11.tgz", + "integrity": "sha512-HfyAmqZi9uBAbgKYP1yGuI7tSREXwIb438q0nqvlpxAOs3XnZ8RsisRfmVsgV486NdjD7Mw2UrFSw51lzUk1ww==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.11.tgz", + "integrity": "sha512-HjLqVgSSYnVXRisyfmzsH6mXqyvj0SA7pG5g+9W7ESgwA70AXYNpfKBqh1KbTxmQVaYxpzA/SvlB9oclGPbApw==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.11.tgz", + "integrity": "sha512-HSFAT4+WYjIhrHxKBwGmOOSpphjYkcswF449j6EjsjbinTZbp8PJtjsVK1XFJStdzXdy/jaddAep2FGY+wyFAQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.11.tgz", + "integrity": "sha512-hr9Oxj1Fa4r04dNpWr3P8QKVVsjQhqrMSUzZzf+LZcYjZNqhA3IAfPQdEh1FLVUJSiu6sgAwp3OmwBfbFgG2Xg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.11.tgz", + "integrity": "sha512-u7tKA+qbzBydyj0vgpu+5h5AeudxOAGncb8N6C9Kh1N4n7wU1Xw1JDApsRjpShRpXRQlJLb9wY28ELpwdPcZ7A==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.11.tgz", + "integrity": "sha512-Qq6YHhayieor3DxFOoYM1q0q1uMFYb7cSpLD2qzDSvK1NAvqFi8Xgivv0cFC6J+hWVw2teCYltyy9/m/14ryHg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.11.tgz", + "integrity": "sha512-CN+7c++kkbrckTOz5hrehxWN7uIhFFlmS/hqziSFVWpAzpWrQoAG4chH+nN3Be+Kzv/uuo7zhX716x3Sn2Jduw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.11.tgz", + "integrity": "sha512-rOREuNIQgaiR+9QuNkbkxubbp8MSO9rONmwP5nKncnWJ9v5jQ4JxFnLu4zDSRPf3x4u+2VN4pM4RdyIzDty/wQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.11.tgz", + "integrity": "sha512-nq2xdYaWxyg9DcIyXkZhcYulC6pQ2FuCgem3LI92IwMgIZ69KHeY8T4Y88pcwoLIjbed8n36CyKoYRDygNSGhA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.11.tgz", + "integrity": "sha512-3XxECOWJq1qMZ3MN8srCJ/QfoLpL+VaxD/WfNRm1O3B4+AZ/BnLVgFbUV3eiRYDMXetciH16dwPbbHqwe1uU0Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.11.tgz", + "integrity": "sha512-3ukss6gb9XZ8TlRyJlgLn17ecsK4NSQTmdIXRASVsiS2sQ6zPPZklNJT5GR5tE/MUarymmy8kCEf5xPCNCqVOA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.11.tgz", + "integrity": "sha512-D7Hpz6A2L4hzsRpPaCYkQnGOotdUpDzSGRIv9I+1ITdHROSFUWW95ZPZWQmGka1Fg7W3zFJowyn9WGwMJ0+KPA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz", + "integrity": "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz", + "integrity": "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/config-array": { + "version": "0.21.1", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.1.tgz", + "integrity": "sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA==", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "@eslint/object-schema": "^2.1.7", + "debug": "^4.3.1", + "minimatch": "^3.1.2" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/config-array/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "peer": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/config-helpers": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.4.1.tgz", + "integrity": "sha512-csZAzkNhsgwb0I/UAV6/RGFTbiakPCf0ZrGmrIxQpYvGZ00PhTkSnyKNolphgIvmnJeGw6rcGVEXfTzUnFuEvw==", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "@eslint/core": "^0.16.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/core": { + "version": "0.16.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.16.0.tgz", + "integrity": "sha512-nmC8/totwobIiFcGkDza3GIKfAw1+hLiYVrh3I1nIomQ8PEr5cxg34jnkmGawul/ep52wGRAcyeDCNtWKSOj4Q==", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "@types/json-schema": "^7.0.15" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.1.tgz", + "integrity": "sha512-gtF186CXhIl1p4pJNGZw8Yc6RlshoePRvE0X91oPGb3vZ8pM3qOS9W9NGPat9LziaBV7XrJWGylNQXkGcnM3IQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^10.0.1", + "globals": "^14.0.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/@eslint/eslintrc/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/@eslint/eslintrc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "peer": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/js": { + "version": "9.38.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.38.0.tgz", + "integrity": "sha512-UZ1VpFvXf9J06YG9xQBdnzU+kthors6KjhMAl6f4gH4usHyh31rUf2DLGInT8RFYIReYXNSydgPY0V2LuWgl7A==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + } + }, + "node_modules/@eslint/object-schema": { + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.7.tgz", + "integrity": "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.4.0.tgz", + "integrity": "sha512-sB5uyeq+dwCWyPi31B2gQlVlo+j5brPlWx4yZBrEaRo/nhdDE8Xke1gsGgtiBdaBTxuTkceLVuVt/pclrasb0A==", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "@eslint/core": "^0.16.0", + "levn": "^0.4.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@grpc/grpc-js": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.14.0.tgz", + "integrity": "sha512-N8Jx6PaYzcTRNzirReJCtADVoq4z7+1KQ4E70jTg/koQiMoUSN1kbNjPOqpPbhMFhfU1/l7ixspPl8dNY+FoUg==", + "license": "Apache-2.0", + "dependencies": { + "@grpc/proto-loader": "^0.8.0", + "@js-sdsl/ordered-map": "^4.4.2" + }, + "engines": { + "node": ">=12.10.0" + } + }, + "node_modules/@grpc/proto-loader": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.8.0.tgz", + "integrity": "sha512-rc1hOQtjIWGxcxpb9aHAfLpIctjEnsDehj0DAiVfBlmT84uvR0uUtN2hEi/ecvWVjXUGf5qPF4qEgiLOx1YIMQ==", + "license": "Apache-2.0", + "dependencies": { + "lodash.camelcase": "^4.3.0", + "long": "^5.0.0", + "protobufjs": "^7.5.3", + "yargs": "^17.7.2" + }, + "bin": { + "proto-loader-gen-types": "build/bin/proto-loader-gen-types.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/@humanfs/core": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", + "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node": { + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", + "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "@humanfs/core": "^0.19.1", + "@humanwhocodes/retry": "^0.4.0" + }, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/retry": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@isaacs/balanced-match": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/@isaacs/balanced-match/-/balanced-match-4.0.1.tgz", + "integrity": "sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/@isaacs/brace-expansion": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@isaacs/brace-expansion/-/brace-expansion-5.0.0.tgz", + "integrity": "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@isaacs/balanced-match": "^4.0.1" + }, + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, + "node_modules/@js-sdsl/ordered-map": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/@js-sdsl/ordered-map/-/ordered-map-4.4.2.tgz", + "integrity": "sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/js-sdsl" + } + }, + "node_modules/@microsoft/api-extractor": { + "version": "7.53.1", + "resolved": "https://registry.npmjs.org/@microsoft/api-extractor/-/api-extractor-7.53.1.tgz", + "integrity": "sha512-bul5eTNxijLdDBqLye74u9494sRmf+9QULtec9Od0uHnifahGeNt8CC4/xCdn7mVyEBrXIQyQ5+sc4Uc0QfBSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@microsoft/api-extractor-model": "7.31.1", + "@microsoft/tsdoc": "~0.15.1", + "@microsoft/tsdoc-config": "~0.17.1", + "@rushstack/node-core-library": "5.17.0", + "@rushstack/rig-package": "0.6.0", + "@rushstack/terminal": "0.19.1", + "@rushstack/ts-command-line": "5.1.1", + "lodash": "~4.17.15", + "minimatch": "10.0.3", + "resolve": "~1.22.1", + "semver": "~7.5.4", + "source-map": "~0.6.1", + "typescript": "5.8.2" + }, + "bin": { + "api-extractor": "bin/api-extractor" + } + }, + "node_modules/@microsoft/api-extractor-model": { + "version": "7.31.1", + "resolved": "https://registry.npmjs.org/@microsoft/api-extractor-model/-/api-extractor-model-7.31.1.tgz", + "integrity": "sha512-Dhnip5OFKbl85rq/ICHBFGhV4RA5UQSl8AC/P/zoGvs+CBudPkatt5kIhMGiYgVPnUWmfR6fcp38+1AFLYNtUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@microsoft/tsdoc": "~0.15.1", + "@microsoft/tsdoc-config": "~0.17.1", + "@rushstack/node-core-library": "5.17.0" + } + }, + "node_modules/@microsoft/api-extractor/node_modules/typescript": { + "version": "5.8.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.2.tgz", + "integrity": "sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/@microsoft/tsdoc": { + "version": "0.15.1", + "resolved": "https://registry.npmjs.org/@microsoft/tsdoc/-/tsdoc-0.15.1.tgz", + "integrity": "sha512-4aErSrCR/On/e5G2hDP0wjooqDdauzEbIq8hIkIe5pXV0rtWJZvdCEKL0ykZxex+IxIwBp0eGeV48hQN07dXtw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@microsoft/tsdoc-config": { + "version": "0.17.1", + "resolved": "https://registry.npmjs.org/@microsoft/tsdoc-config/-/tsdoc-config-0.17.1.tgz", + "integrity": "sha512-UtjIFe0C6oYgTnad4q1QP4qXwLhe6tIpNTRStJ2RZEPIkqQPREAwE5spzVxsdn9UaEMUqhh0AqSx3X4nWAKXWw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@microsoft/tsdoc": "0.15.1", + "ajv": "~8.12.0", + "jju": "~1.4.0", + "resolve": "~1.22.2" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@peculiar/asn1-cms": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-cms/-/asn1-cms-2.5.0.tgz", + "integrity": "sha512-p0SjJ3TuuleIvjPM4aYfvYw8Fk1Hn/zAVyPJZTtZ2eE9/MIer6/18ROxX6N/e6edVSfvuZBqhxAj3YgsmSjQ/A==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-schema": "^2.5.0", + "@peculiar/asn1-x509": "^2.5.0", + "@peculiar/asn1-x509-attr": "^2.5.0", + "asn1js": "^3.0.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/asn1-csr": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-csr/-/asn1-csr-2.5.0.tgz", + "integrity": "sha512-ioigvA6WSYN9h/YssMmmoIwgl3RvZlAYx4A/9jD2qaqXZwGcNlAxaw54eSx2QG1Yu7YyBC5Rku3nNoHrQ16YsQ==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-schema": "^2.5.0", + "@peculiar/asn1-x509": "^2.5.0", + "asn1js": "^3.0.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/asn1-ecc": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-ecc/-/asn1-ecc-2.5.0.tgz", + "integrity": "sha512-t4eYGNhXtLRxaP50h3sfO6aJebUCDGQACoeexcelL4roMFRRVgB20yBIu2LxsPh/tdW9I282gNgMOyg3ywg/mg==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-schema": "^2.5.0", + "@peculiar/asn1-x509": "^2.5.0", + "asn1js": "^3.0.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/asn1-pfx": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-pfx/-/asn1-pfx-2.5.0.tgz", + "integrity": "sha512-Vj0d0wxJZA+Ztqfb7W+/iu8Uasw6hhKtCdLKXLG/P3kEPIQpqGI4P4YXlROfl7gOCqFIbgsj1HzFIFwQ5s20ug==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-cms": "^2.5.0", + "@peculiar/asn1-pkcs8": "^2.5.0", + "@peculiar/asn1-rsa": "^2.5.0", + "@peculiar/asn1-schema": "^2.5.0", + "asn1js": "^3.0.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/asn1-pkcs8": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-pkcs8/-/asn1-pkcs8-2.5.0.tgz", + "integrity": "sha512-L7599HTI2SLlitlpEP8oAPaJgYssByI4eCwQq2C9eC90otFpm8MRn66PpbKviweAlhinWQ3ZjDD2KIVtx7PaVw==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-schema": "^2.5.0", + "@peculiar/asn1-x509": "^2.5.0", + "asn1js": "^3.0.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/asn1-pkcs9": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-pkcs9/-/asn1-pkcs9-2.5.0.tgz", + "integrity": "sha512-UgqSMBLNLR5TzEZ5ZzxR45Nk6VJrammxd60WMSkofyNzd3DQLSNycGWSK5Xg3UTYbXcDFyG8pA/7/y/ztVCa6A==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-cms": "^2.5.0", + "@peculiar/asn1-pfx": "^2.5.0", + "@peculiar/asn1-pkcs8": "^2.5.0", + "@peculiar/asn1-schema": "^2.5.0", + "@peculiar/asn1-x509": "^2.5.0", + "@peculiar/asn1-x509-attr": "^2.5.0", + "asn1js": "^3.0.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/asn1-rsa": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-rsa/-/asn1-rsa-2.5.0.tgz", + "integrity": "sha512-qMZ/vweiTHy9syrkkqWFvbT3eLoedvamcUdnnvwyyUNv5FgFXA3KP8td+ATibnlZ0EANW5PYRm8E6MJzEB/72Q==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-schema": "^2.5.0", + "@peculiar/asn1-x509": "^2.5.0", + "asn1js": "^3.0.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/asn1-schema": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-schema/-/asn1-schema-2.5.0.tgz", + "integrity": "sha512-YM/nFfskFJSlHqv59ed6dZlLZqtZQwjRVJ4bBAiWV08Oc+1rSd5lDZcBEx0lGDHfSoH3UziI2pXt2UM33KerPQ==", + "license": "MIT", + "dependencies": { + "asn1js": "^3.0.6", + "pvtsutils": "^1.3.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/asn1-x509": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-x509/-/asn1-x509-2.5.0.tgz", + "integrity": "sha512-CpwtMCTJvfvYTFMuiME5IH+8qmDe3yEWzKHe7OOADbGfq7ohxeLaXwQo0q4du3qs0AII3UbLCvb9NF/6q0oTKQ==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-schema": "^2.5.0", + "asn1js": "^3.0.6", + "pvtsutils": "^1.3.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/asn1-x509-attr": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/@peculiar/asn1-x509-attr/-/asn1-x509-attr-2.5.0.tgz", + "integrity": "sha512-9f0hPOxiJDoG/bfNLAFven+Bd4gwz/VzrCIIWc1025LEI4BXO0U5fOCTNDPbbp2ll+UzqKsZ3g61mpBp74gk9A==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-schema": "^2.5.0", + "@peculiar/asn1-x509": "^2.5.0", + "asn1js": "^3.0.6", + "tslib": "^2.8.1" + } + }, + "node_modules/@peculiar/json-schema": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/@peculiar/json-schema/-/json-schema-1.1.12.tgz", + "integrity": "sha512-coUfuoMeIB7B8/NMekxaDzLhaYmp0HZNPEjYRm9goRou8UZIC3z21s0sL9AWoCw4EG876QyO3kYrc61WNF9B/w==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.0" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@peculiar/webcrypto": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@peculiar/webcrypto/-/webcrypto-1.5.0.tgz", + "integrity": "sha512-BRs5XUAwiyCDQMsVA9IDvDa7UBR9gAvPHgugOeGng3YN6vJ9JYonyDc0lNczErgtCWtucjR5N7VtaonboD/ezg==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-schema": "^2.3.8", + "@peculiar/json-schema": "^1.1.12", + "pvtsutils": "^1.3.5", + "tslib": "^2.6.2", + "webcrypto-core": "^1.8.0" + }, + "engines": { + "node": ">=10.12.0" + } + }, + "node_modules/@peculiar/x509": { + "version": "1.14.0", + "resolved": "https://registry.npmjs.org/@peculiar/x509/-/x509-1.14.0.tgz", + "integrity": "sha512-Yc4PDxN3OrxUPiXgU63c+ZRXKGE8YKF2McTciYhUHFtHVB0KMnjeFSU0qpztGhsp4P0uKix4+J2xEpIEDu8oXg==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-cms": "^2.5.0", + "@peculiar/asn1-csr": "^2.5.0", + "@peculiar/asn1-ecc": "^2.5.0", + "@peculiar/asn1-pkcs9": "^2.5.0", + "@peculiar/asn1-rsa": "^2.5.0", + "@peculiar/asn1-schema": "^2.5.0", + "@peculiar/asn1-x509": "^2.5.0", + "pvtsutils": "^1.3.6", + "reflect-metadata": "^0.2.2", + "tslib": "^2.8.1", + "tsyringe": "^4.10.0" + } + }, + "node_modules/@protobuf-ts/grpc-transport": { + "version": "2.11.1", + "resolved": "https://registry.npmjs.org/@protobuf-ts/grpc-transport/-/grpc-transport-2.11.1.tgz", + "integrity": "sha512-l6wrcFffY+tuNnuyrNCkRM8hDIsAZVLA8Mn7PKdVyYxITosYh60qW663p9kL6TWXYuDCL3oxH8ih3vLKTDyhtg==", + "license": "Apache-2.0", + "dependencies": { + "@protobuf-ts/runtime": "^2.11.1", + "@protobuf-ts/runtime-rpc": "^2.11.1" + }, + "peerDependencies": { + "@grpc/grpc-js": "^1.6.0" + } + }, + "node_modules/@protobuf-ts/runtime": { + "version": "2.11.1", + "resolved": "https://registry.npmjs.org/@protobuf-ts/runtime/-/runtime-2.11.1.tgz", + "integrity": "sha512-KuDaT1IfHkugM2pyz+FwiY80ejWrkH1pAtOBOZFuR6SXEFTsnb/jiQWQ1rCIrcKx2BtyxnxW6BWwsVSA/Ie+WQ==", + "license": "(Apache-2.0 AND BSD-3-Clause)" + }, + "node_modules/@protobuf-ts/runtime-rpc": { + "version": "2.11.1", + "resolved": "https://registry.npmjs.org/@protobuf-ts/runtime-rpc/-/runtime-rpc-2.11.1.tgz", + "integrity": "sha512-4CqqUmNA+/uMz00+d3CYKgElXO9VrEbucjnBFEjqI4GuDrEQ32MaI3q+9qPBvIGOlL4PmHXrzM32vBPWRhQKWQ==", + "license": "Apache-2.0", + "dependencies": { + "@protobuf-ts/runtime": "^2.11.1" + } + }, + "node_modules/@protobufjs/aspromise": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", + "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/base64": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", + "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/codegen": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", + "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/eventemitter": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", + "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/fetch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", + "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", + "license": "BSD-3-Clause", + "dependencies": { + "@protobufjs/aspromise": "^1.1.1", + "@protobufjs/inquire": "^1.1.0" + } + }, + "node_modules/@protobufjs/float": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", + "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/inquire": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", + "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/path": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", + "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/pool": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", + "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/utf8": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", + "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==", + "license": "BSD-3-Clause" + }, + "node_modules/@rollup/plugin-json": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/@rollup/plugin-json/-/plugin-json-6.1.0.tgz", + "integrity": "sha512-EGI2te5ENk1coGeADSIwZ7G2Q8CJS2sF120T7jLw4xFw9n7wIOXHo+kIYRAoVpJAN+kmqZSoO3Fp4JtoNF4ReA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@rollup/pluginutils": "^5.1.0" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } + } + }, + "node_modules/@rollup/plugin-node-resolve": { + "version": "16.0.3", + "resolved": "https://registry.npmjs.org/@rollup/plugin-node-resolve/-/plugin-node-resolve-16.0.3.tgz", + "integrity": "sha512-lUYM3UBGuM93CnMPG1YocWu7X802BrNF3jW2zny5gQyLQgRFJhV1Sq0Zi74+dh/6NBx1DxFC4b4GXg9wUCG5Qg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@rollup/pluginutils": "^5.0.1", + "@types/resolve": "1.20.2", + "deepmerge": "^4.2.2", + "is-module": "^1.0.0", + "resolve": "^1.22.1" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "rollup": "^2.78.0||^3.0.0||^4.0.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } + } + }, + "node_modules/@rollup/pluginutils": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.3.0.tgz", + "integrity": "sha512-5EdhGZtnu3V88ces7s53hhfK5KSASnJZv8Lulpc04cWO3REESroJXg73DFsOmgbU2BhwV0E20bu2IDZb3VKW4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-walker": "^2.0.2", + "picomatch": "^4.0.2" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } + } + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.52.5.tgz", + "integrity": "sha512-8c1vW4ocv3UOMp9K+gToY5zL2XiiVw3k7f1ksf4yO1FlDFQ1C2u72iACFnSOceJFsWskc2WZNqeRhFRPzv+wtQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.52.5.tgz", + "integrity": "sha512-mQGfsIEFcu21mvqkEKKu2dYmtuSZOBMmAl5CFlPGLY94Vlcm+zWApK7F/eocsNzp8tKmbeBP8yXyAbx0XHsFNA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.52.5.tgz", + "integrity": "sha512-takF3CR71mCAGA+v794QUZ0b6ZSrgJkArC+gUiG6LB6TQty9T0Mqh3m2ImRBOxS2IeYBo4lKWIieSvnEk2OQWA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.52.5.tgz", + "integrity": "sha512-W901Pla8Ya95WpxDn//VF9K9u2JbocwV/v75TE0YIHNTbhqUTv9w4VuQ9MaWlNOkkEfFwkdNhXgcLqPSmHy0fA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.52.5.tgz", + "integrity": "sha512-QofO7i7JycsYOWxe0GFqhLmF6l1TqBswJMvICnRUjqCx8b47MTo46W8AoeQwiokAx3zVryVnxtBMcGcnX12LvA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.52.5.tgz", + "integrity": "sha512-jr21b/99ew8ujZubPo9skbrItHEIE50WdV86cdSoRkKtmWa+DDr6fu2c/xyRT0F/WazZpam6kk7IHBerSL7LDQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.52.5.tgz", + "integrity": "sha512-PsNAbcyv9CcecAUagQefwX8fQn9LQ4nZkpDboBOttmyffnInRy8R8dSg6hxxl2Re5QhHBf6FYIDhIj5v982ATQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.52.5.tgz", + "integrity": "sha512-Fw4tysRutyQc/wwkmcyoqFtJhh0u31K+Q6jYjeicsGJJ7bbEq8LwPWV/w0cnzOqR2m694/Af6hpFayLJZkG2VQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.52.5.tgz", + "integrity": "sha512-a+3wVnAYdQClOTlyapKmyI6BLPAFYs0JM8HRpgYZQO02rMR09ZcV9LbQB+NL6sljzG38869YqThrRnfPMCDtZg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.52.5.tgz", + "integrity": "sha512-AvttBOMwO9Pcuuf7m9PkC1PUIKsfaAJ4AYhy944qeTJgQOqJYJ9oVl2nYgY7Rk0mkbsuOpCAYSs6wLYB2Xiw0Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.52.5.tgz", + "integrity": "sha512-DkDk8pmXQV2wVrF6oq5tONK6UHLz/XcEVow4JTTerdeV1uqPeHxwcg7aFsfnSm9L+OO8WJsWotKM2JJPMWrQtA==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.52.5.tgz", + "integrity": "sha512-W/b9ZN/U9+hPQVvlGwjzi+Wy4xdoH2I8EjaCkMvzpI7wJUs8sWJ03Rq96jRnHkSrcHTpQe8h5Tg3ZzUPGauvAw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.52.5.tgz", + "integrity": "sha512-sjQLr9BW7R/ZiXnQiWPkErNfLMkkWIoCz7YMn27HldKsADEKa5WYdobaa1hmN6slu9oWQbB6/jFpJ+P2IkVrmw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.52.5.tgz", + "integrity": "sha512-hq3jU/kGyjXWTvAh2awn8oHroCbrPm8JqM7RUpKjalIRWWXE01CQOf/tUNWNHjmbMHg/hmNCwc/Pz3k1T/j/Lg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.52.5.tgz", + "integrity": "sha512-gn8kHOrku8D4NGHMK1Y7NA7INQTRdVOntt1OCYypZPRt6skGbddska44K8iocdpxHTMMNui5oH4elPH4QOLrFQ==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.50.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.50.2.tgz", + "integrity": "sha512-9Jie/At6qk70dNIcopcL4p+1UirusEtznpNtcq/u/C5cC4HBX7qSGsYIcG6bdxj15EYWhHiu02YvmdPzylIZlA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.52.5.tgz", + "integrity": "sha512-arCGIcuNKjBoKAXD+y7XomR9gY6Mw7HnFBv5Rw7wQRvwYLR7gBAgV7Mb2QTyjXfTveBNFAtPt46/36vV9STLNg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.52.5.tgz", + "integrity": "sha512-QoFqB6+/9Rly/RiPjaomPLmR/13cgkIGfA40LHly9zcH1S0bN2HVFYk3a1eAyHQyjs3ZJYlXvIGtcCs5tko9Cw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.52.5.tgz", + "integrity": "sha512-w0cDWVR6MlTstla1cIfOGyl8+qb93FlAVutcor14Gf5Md5ap5ySfQ7R9S/NjNaMLSFdUnKGEasmVnu3lCMqB7w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.52.5.tgz", + "integrity": "sha512-Aufdpzp7DpOTULJCuvzqcItSGDH73pF3ko/f+ckJhxQyHtp67rHw3HMNxoIdDMUITJESNE6a8uh4Lo4SLouOUg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.52.5.tgz", + "integrity": "sha512-UGBUGPFp1vkj6p8wCRraqNhqwX/4kNQPS57BCFc8wYh0g94iVIW33wJtQAx3G7vrjjNtRaxiMUylM0ktp/TRSQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.52.5.tgz", + "integrity": "sha512-TAcgQh2sSkykPRWLrdyy2AiceMckNf5loITqXxFI5VuQjS5tSuw3WlwdN8qv8vzjLAUTvYaH/mVjSFpbkFbpTg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rushstack/node-core-library": { + "version": "5.17.0", + "resolved": "https://registry.npmjs.org/@rushstack/node-core-library/-/node-core-library-5.17.0.tgz", + "integrity": "sha512-24vt1GbHN6kyIglRMTVpyEiNRRRJK8uZHc1XoGAhmnTDKnrWet8OmOpImMswJIe6gM78eV8cMg1HXwuUHkSSgg==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "~8.13.0", + "ajv-draft-04": "~1.0.0", + "ajv-formats": "~3.0.1", + "fs-extra": "~11.3.0", + "import-lazy": "~4.0.0", + "jju": "~1.4.0", + "resolve": "~1.22.1", + "semver": "~7.5.4" + }, + "peerDependencies": { + "@types/node": "*" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@rushstack/node-core-library/node_modules/ajv": { + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.13.0.tgz", + "integrity": "sha512-PRA911Blj99jR5RMeTunVbNXMF6Lp4vZXnk5GQjcnUWUTsrXtekg/pnmFFI2u/I36Y/2bITGS30GZCXei6uNkA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.4.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/@rushstack/problem-matcher": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/@rushstack/problem-matcher/-/problem-matcher-0.1.1.tgz", + "integrity": "sha512-Fm5XtS7+G8HLcJHCWpES5VmeMyjAKaWeyZU5qPzZC+22mPlJzAsOxymHiWIfuirtPckX3aptWws+K2d0BzniJA==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@types/node": "*" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@rushstack/rig-package": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/@rushstack/rig-package/-/rig-package-0.6.0.tgz", + "integrity": "sha512-ZQmfzsLE2+Y91GF15c65L/slMRVhF6Hycq04D4TwtdGaUAbIXXg9c5pKA5KFU7M4QMaihoobp9JJYpYcaY3zOw==", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve": "~1.22.1", + "strip-json-comments": "~3.1.1" + } + }, + "node_modules/@rushstack/terminal": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@rushstack/terminal/-/terminal-0.19.1.tgz", + "integrity": "sha512-jsBuSad67IDVMO2yp0hDfs0OdE4z3mDIjIL2pclDT3aEJboeZXE85e1HjuD0F6JoW3XgHvDwoX+WOV+AVTDQeA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@rushstack/node-core-library": "5.17.0", + "@rushstack/problem-matcher": "0.1.1", + "supports-color": "~8.1.1" + }, + "peerDependencies": { + "@types/node": "*" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@rushstack/ts-command-line": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/@rushstack/ts-command-line/-/ts-command-line-5.1.1.tgz", + "integrity": "sha512-HPzFsUcr+wZ3oQI08Ec/E6cuiAVHKzrXZGHhwiwIGygAFiqN5QzX+ff30n70NU2WyE26CykgMwBZZSSyHCJrzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@rushstack/terminal": "0.19.1", + "@types/argparse": "1.0.38", + "argparse": "~1.0.9", + "string-argv": "~0.3.1" + } + }, + "node_modules/@tsconfig/node10": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.11.tgz", + "integrity": "sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node12": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", + "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node14": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", + "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node16": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", + "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/argparse": { + "version": "1.0.38", + "resolved": "https://registry.npmjs.org/@types/argparse/-/argparse-1.0.38.tgz", + "integrity": "sha512-ebDJ9b0e702Yr7pWgB0jzm+CX4Srzz8RcXtLJDJB+BSccqMa36uyH/zUsSYao5+BD1ytv3k3rPYCq4mAE1hsXA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/chai": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.2.tgz", + "integrity": "sha512-8kB30R7Hwqf40JPiKhVzodJs2Qc1ZJ5zuT3uzw5Hq/dhNCl3G3l83jfpdI1e20BP348+fV7VIL/+FxaXkqBmWg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*" + } + }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/@types/node": { + "version": "22.19.1", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.1.tgz", + "integrity": "sha512-LCCV0HdSZZZb34qifBsyWlUmok6W7ouER+oQIGBScS8EsZsQbrtFTUrDX4hOl+CS6p7cnNC4td+qrSVGSCTUfQ==", + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@types/resolve": { + "version": "1.20.2", + "resolved": "https://registry.npmjs.org/@types/resolve/-/resolve-1.20.2.tgz", + "integrity": "sha512-60BCwRFOZCQhDncwQdxxeOEEkbc5dIMccYLwbxsS4TUNeVECQ/pBJ0j09mrHOl/JJvpRPGwO9SvE4nR2Nb/a4Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/uuid": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-10.0.0.tgz", + "integrity": "sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "8.46.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.46.1.tgz", + "integrity": "sha512-rUsLh8PXmBjdiPY+Emjz9NX2yHvhS11v0SR6xNJkm5GM1MO9ea/1GoDKlHHZGrOJclL/cZ2i/vRUYVtjRhrHVQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.10.0", + "@typescript-eslint/scope-manager": "8.46.1", + "@typescript-eslint/type-utils": "8.46.1", + "@typescript-eslint/utils": "8.46.1", + "@typescript-eslint/visitor-keys": "8.46.1", + "graphemer": "^1.4.0", + "ignore": "^7.0.0", + "natural-compare": "^1.4.0", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^8.46.1", + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/ignore": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", + "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "8.46.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.46.1.tgz", + "integrity": "sha512-6JSSaBZmsKvEkbRUkf7Zj7dru/8ZCrJxAqArcLaVMee5907JdtEbKGsZ7zNiIm/UAkpGUkaSMZEXShnN2D1HZA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/scope-manager": "8.46.1", + "@typescript-eslint/types": "8.46.1", + "@typescript-eslint/typescript-estree": "8.46.1", + "@typescript-eslint/visitor-keys": "8.46.1", + "debug": "^4.3.4" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/project-service": { + "version": "8.46.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.46.1.tgz", + "integrity": "sha512-FOIaFVMHzRskXr5J4Jp8lFVV0gz5ngv3RHmn+E4HYxSJ3DgDzU7fVI1/M7Ijh1zf6S7HIoaIOtln1H5y8V+9Zg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/tsconfig-utils": "^8.46.1", + "@typescript-eslint/types": "^8.46.1", + "debug": "^4.3.4" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "8.46.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.46.1.tgz", + "integrity": "sha512-weL9Gg3/5F0pVQKiF8eOXFZp8emqWzZsOJuWRUNtHT+UNV2xSJegmpCNQHy37aEQIbToTq7RHKhWvOsmbM680A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.46.1", + "@typescript-eslint/visitor-keys": "8.46.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/tsconfig-utils": { + "version": "8.46.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.46.1.tgz", + "integrity": "sha512-X88+J/CwFvlJB+mK09VFqx5FE4H5cXD+H/Bdza2aEWkSb8hnWIQorNcscRl4IEo1Cz9VI/+/r/jnGWkbWPx54g==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "8.46.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.46.1.tgz", + "integrity": "sha512-+BlmiHIiqufBxkVnOtFwjah/vrkF4MtKKvpXrKSPLCkCtAp8H01/VV43sfqA98Od7nJpDcFnkwgyfQbOG0AMvw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.46.1", + "@typescript-eslint/typescript-estree": "8.46.1", + "@typescript-eslint/utils": "8.46.1", + "debug": "^4.3.4", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/types": { + "version": "8.46.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.46.1.tgz", + "integrity": "sha512-C+soprGBHwWBdkDpbaRC4paGBrkIXxVlNohadL5o0kfhsXqOC6GYH2S/Obmig+I0HTDl8wMaRySwrfrXVP8/pQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "8.46.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.46.1.tgz", + "integrity": "sha512-uIifjT4s8cQKFQ8ZBXXyoUODtRoAd7F7+G8MKmtzj17+1UbdzFl52AzRyZRyKqPHhgzvXunnSckVu36flGy8cg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/project-service": "8.46.1", + "@typescript-eslint/tsconfig-utils": "8.46.1", + "@typescript-eslint/types": "8.46.1", + "@typescript-eslint/visitor-keys": "8.46.1", + "debug": "^4.3.4", + "fast-glob": "^3.3.2", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": { + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", + "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "8.46.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.46.1.tgz", + "integrity": "sha512-vkYUy6LdZS7q1v/Gxb2Zs7zziuXN0wxqsetJdeZdRe/f5dwJFglmuvZBfTUivCtjH725C1jWCDfpadadD95EDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.7.0", + "@typescript-eslint/scope-manager": "8.46.1", + "@typescript-eslint/types": "8.46.1", + "@typescript-eslint/typescript-estree": "8.46.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "8.46.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.46.1.tgz", + "integrity": "sha512-ptkmIf2iDkNUjdeu2bQqhFPV1m6qTnFFjg7PPDjxKWaMaP0Z6I9l30Jr3g5QqbZGdw8YdYvLp+XnqnWWZOg/NA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.46.1", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@vitest/expect": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-3.2.4.tgz", + "integrity": "sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/chai": "^5.2.2", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/mocker": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-3.2.4.tgz", + "integrity": "sha512-46ryTE9RZO/rfDd7pEqFl7etuyzekzEhUbTW3BvmeO/BcCMEgq59BKhek3dXDWgAj4oMK6OZi+vRr1wPW6qjEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "3.2.4", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.17" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/mocker/node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/@vitest/pretty-format": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-3.2.4.tgz", + "integrity": "sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-3.2.4.tgz", + "integrity": "sha512-oukfKT9Mk41LreEW09vt45f8wx7DordoWUZMYdY/cyAk7w5TWkTRCNZYF7sX7n2wB7jyGAl74OxgwhPgKaqDMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/utils": "3.2.4", + "pathe": "^2.0.3", + "strip-literal": "^3.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-3.2.4.tgz", + "integrity": "sha512-dEYtS7qQP2CjU27QBC5oUOxLE/v5eLkGqPE0ZKEIDGMs4vKWe7IjgLOeauHsR0D5YuuycGRO5oSRXnwnmA78fQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "magic-string": "^0.30.17", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-3.2.4.tgz", + "integrity": "sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyspy": "^4.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-3.2.4.tgz", + "integrity": "sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "loupe": "^3.1.4", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peer": true, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/acorn-walk": { + "version": "8.3.4", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", + "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "acorn": "^8.11.0" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-draft-04": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/ajv-draft-04/-/ajv-draft-04-1.0.0.tgz", + "integrity": "sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "ajv": "^8.5.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ajv-formats": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-3.0.1.tgz", + "integrity": "sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/arg": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", + "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", + "dev": true, + "license": "MIT" + }, + "node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "license": "MIT", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/asn1js": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/asn1js/-/asn1js-3.0.6.tgz", + "integrity": "sha512-UOCGPYbl0tv8+006qks/dTgV9ajs97X2p0FAbyS2iyCRrmLSRolDaHdp+v/CLgnzHc3fVB+CwYiUmei7ndFcgA==", + "license": "BSD-3-Clause", + "dependencies": { + "pvtsutils": "^1.3.6", + "pvutils": "^1.1.3", + "tslib": "^2.8.1" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/chai": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.3.3.tgz", + "integrity": "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "assertion-error": "^2.0.1", + "check-error": "^2.1.1", + "deep-eql": "^5.0.1", + "loupe": "^3.1.0", + "pathval": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chalk/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/check-error": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", + "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 16" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "license": "MIT" + }, + "node_modules/commondir": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", + "integrity": "sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==", + "dev": true, + "license": "MIT" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/create-require": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", + "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/deep-eql": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", + "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/diff": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", + "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", + "dev": true, + "license": "MIT" + }, + "node_modules/esbuild": { + "version": "0.25.11", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.11.tgz", + "integrity": "sha512-KohQwyzrKTQmhXDW1PjCv3Tyspn9n5GcY2RTDqeORIdIJY8yKIF7sTSopFmn/wpMPW4rdPXI0UE5LJLuq3bx0Q==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.25.11", + "@esbuild/android-arm": "0.25.11", + "@esbuild/android-arm64": "0.25.11", + "@esbuild/android-x64": "0.25.11", + "@esbuild/darwin-arm64": "0.25.11", + "@esbuild/darwin-x64": "0.25.11", + "@esbuild/freebsd-arm64": "0.25.11", + "@esbuild/freebsd-x64": "0.25.11", + "@esbuild/linux-arm": "0.25.11", + "@esbuild/linux-arm64": "0.25.11", + "@esbuild/linux-ia32": "0.25.11", + "@esbuild/linux-loong64": "0.25.11", + "@esbuild/linux-mips64el": "0.25.11", + "@esbuild/linux-ppc64": "0.25.11", + "@esbuild/linux-riscv64": "0.25.11", + "@esbuild/linux-s390x": "0.25.11", + "@esbuild/linux-x64": "0.25.11", + "@esbuild/netbsd-arm64": "0.25.11", + "@esbuild/netbsd-x64": "0.25.11", + "@esbuild/openbsd-arm64": "0.25.11", + "@esbuild/openbsd-x64": "0.25.11", + "@esbuild/openharmony-arm64": "0.25.11", + "@esbuild/sunos-x64": "0.25.11", + "@esbuild/win32-arm64": "0.25.11", + "@esbuild/win32-ia32": "0.25.11", + "@esbuild/win32-x64": "0.25.11" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "9.38.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.38.0.tgz", + "integrity": "sha512-t5aPOpmtJcZcz5UJyY2GbvpDlsK5E8JqRqoKtfiKE3cNh437KIqfJr3A3AKf5k64NPx6d0G3dno6XDY05PqPtw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.8.0", + "@eslint-community/regexpp": "^4.12.1", + "@eslint/config-array": "^0.21.1", + "@eslint/config-helpers": "^0.4.1", + "@eslint/core": "^0.16.0", + "@eslint/eslintrc": "^3.3.1", + "@eslint/js": "9.38.0", + "@eslint/plugin-kit": "^0.4.0", + "@humanfs/node": "^0.16.6", + "@humanwhocodes/module-importer": "^1.0.1", + "@humanwhocodes/retry": "^0.4.2", + "@types/estree": "^1.0.6", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.6", + "debug": "^4.3.2", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^8.4.0", + "eslint-visitor-keys": "^4.2.1", + "espree": "^10.4.0", + "esquery": "^1.5.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^8.0.0", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + }, + "peerDependencies": { + "jiti": "*" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + } + } + }, + "node_modules/eslint-scope": { + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", + "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", + "dev": true, + "license": "BSD-2-Clause", + "peer": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/eslint/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "peer": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/espree": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", + "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", + "dev": true, + "license": "BSD-2-Clause", + "peer": true, + "dependencies": { + "acorn": "^8.15.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz", + "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==", + "dev": true, + "license": "BSD-3-Clause", + "peer": true, + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "peer": true, + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "peer": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-walker": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", + "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", + "dev": true, + "license": "MIT" + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/expect-type": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.2.2.tgz", + "integrity": "sha512-JhFGDVJ7tmDJItKhYgJCGLOWjuK9vPxiXoUFLwLDc99NlmklilbiQJwoctZtt13+xMw91MCk/REan6MWHqDjyA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/fastq": { + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz", + "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/file-entry-cache": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", + "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "flat-cache": "^4.0.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-cache-dir": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.2.tgz", + "integrity": "sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig==", + "dev": true, + "license": "MIT", + "dependencies": { + "commondir": "^1.0.1", + "make-dir": "^3.0.2", + "pkg-dir": "^4.1.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/avajs/find-cache-dir?sponsor=1" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", + "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.4" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/flatted": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "dev": true, + "license": "ISC", + "peer": true + }, + "node_modules/fs-extra": { + "version": "11.3.2", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.2.tgz", + "integrity": "sha512-Xr9F6z6up6Ws+NjzMCZc6WXg2YFRlrLP9NQDO3VQrWrfiojdhS56TzueT88ze0uBdCTwEIhQ3ptnmKeWGFAe0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=14.14" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "peer": true, + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/globals": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", + "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true, + "license": "MIT" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-lazy": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-4.0.0.tgz", + "integrity": "sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-module": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-module/-/is-module-1.0.0.tgz", + "integrity": "sha512-51ypPSPCoTEIN9dy5Oy+h4pShgJmPCygKfyRCISBI+JoWT/2oJvK8QPxmwv7b/p239jXrm9M1mlQbyKJ5A152g==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC", + "peer": true + }, + "node_modules/jju": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/jju/-/jju-1.4.0.tgz", + "integrity": "sha512-8wb9Yw966OSxApiCt0K3yNJL8pnNeIv+OEq2YMidz4FKP6nonSRoOXc80iXY4JaN2FC11B9qsNmDsm+ZOfMROA==", + "dev": true, + "license": "MIT" + }, + "node_modules/js-tokens": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz", + "integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/js-yaml/node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0", + "peer": true + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.camelcase": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", + "integrity": "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==", + "license": "MIT" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/long": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/long/-/long-5.3.2.tgz", + "integrity": "sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==", + "license": "Apache-2.0" + }, + "node_modules/loupe": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.1.tgz", + "integrity": "sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/magic-string": { + "version": "0.30.19", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.19.tgz", + "integrity": "sha512-2N21sPY9Ws53PZvsEpVtNuSW+ScYbQdp4b9qUaL+9QkHUrGFKo56Lg9Emg5s9V/qrtNBmiR01sYhUOwu3H+VOw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/make-dir": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", + "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^6.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-dir/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/make-error": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", + "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", + "dev": true, + "license": "ISC" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/micromatch/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/minimatch": { + "version": "10.0.3", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.0.3.tgz", + "integrity": "sha512-IPZ167aShDZZUMdRk66cyQAW3qr0WzbHkPdMYa8bzZhlHhO3jALbKdxcaak7W9FfT2rZNpQuUu4Od7ILEpXSaw==", + "dev": true, + "license": "ISC", + "dependencies": { + "@isaacs/brace-expansion": "^5.0.0" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "dev": true, + "license": "MIT" + }, + "node_modules/pathval": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz", + "integrity": "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.16" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-dir/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/protobufjs": { + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.5.4.tgz", + "integrity": "sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==", + "hasInstallScript": true, + "license": "BSD-3-Clause", + "dependencies": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.4", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.0", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.0", + "@types/node": ">=13.7.0", + "long": "^5.0.0" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/pvtsutils": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/pvtsutils/-/pvtsutils-1.3.6.tgz", + "integrity": "sha512-PLgQXQ6H2FWCaeRak8vvk1GW462lMxB5s3Jm673N82zI4vqtVUPuZdffdZbPDFRoU8kAhItWFtPCWiPpp4/EDg==", + "license": "MIT", + "dependencies": { + "tslib": "^2.8.1" + } + }, + "node_modules/pvutils": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/pvutils/-/pvutils-1.1.3.tgz", + "integrity": "sha512-pMpnA0qRdFp32b1sJl1wOJNxZLQ2cbQx+k6tjNtZ8CpvVhNqEPRgivZ2WOUev2YMajecdH7ctUPDvEe87nariQ==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/reflect-metadata": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/reflect-metadata/-/reflect-metadata-0.2.2.tgz", + "integrity": "sha512-urBwgfrvVP/eAyXx4hluJivBKzuEbSQs9rKWCrCkbSxNv8mxPcUZKeuoF3Uy4mJl3Lwprp6yy5/39VWigZ4K6Q==", + "license": "Apache-2.0" + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve": { + "version": "1.22.10", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", + "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rollup": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.52.5.tgz", + "integrity": "sha512-3GuObel8h7Kqdjt0gxkEzaifHTqLVW56Y/bjN7PSQtkKr0w3V/QYSdt6QWYtd7A1xUtYQigtdUfgj1RvWVtorw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.52.5", + "@rollup/rollup-android-arm64": "4.52.5", + "@rollup/rollup-darwin-arm64": "4.52.5", + "@rollup/rollup-darwin-x64": "4.52.5", + "@rollup/rollup-freebsd-arm64": "4.52.5", + "@rollup/rollup-freebsd-x64": "4.52.5", + "@rollup/rollup-linux-arm-gnueabihf": "4.52.5", + "@rollup/rollup-linux-arm-musleabihf": "4.52.5", + "@rollup/rollup-linux-arm64-gnu": "4.52.5", + "@rollup/rollup-linux-arm64-musl": "4.52.5", + "@rollup/rollup-linux-loong64-gnu": "4.52.5", + "@rollup/rollup-linux-ppc64-gnu": "4.52.5", + "@rollup/rollup-linux-riscv64-gnu": "4.52.5", + "@rollup/rollup-linux-riscv64-musl": "4.52.5", + "@rollup/rollup-linux-s390x-gnu": "4.52.5", + "@rollup/rollup-linux-x64-gnu": "4.52.5", + "@rollup/rollup-linux-x64-musl": "4.52.5", + "@rollup/rollup-openharmony-arm64": "4.52.5", + "@rollup/rollup-win32-arm64-msvc": "4.52.5", + "@rollup/rollup-win32-ia32-msvc": "4.52.5", + "@rollup/rollup-win32-x64-gnu": "4.52.5", + "@rollup/rollup-win32-x64-msvc": "4.52.5", + "fsevents": "~2.3.2" + } + }, + "node_modules/rollup-plugin-typescript2": { + "version": "0.36.0", + "resolved": "https://registry.npmjs.org/rollup-plugin-typescript2/-/rollup-plugin-typescript2-0.36.0.tgz", + "integrity": "sha512-NB2CSQDxSe9+Oe2ahZbf+B4bh7pHwjV5L+RSYpCu7Q5ROuN94F9b6ioWwKfz3ueL3KTtmX4o2MUH2cgHDIEUsw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@rollup/pluginutils": "^4.1.2", + "find-cache-dir": "^3.3.2", + "fs-extra": "^10.0.0", + "semver": "^7.5.4", + "tslib": "^2.6.2" + }, + "peerDependencies": { + "rollup": ">=1.26.3", + "typescript": ">=2.4.0" + } + }, + "node_modules/rollup-plugin-typescript2/node_modules/@rollup/pluginutils": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-4.2.1.tgz", + "integrity": "sha512-iKnFXr7NkdZAIHiIWE+BX5ULi/ucVFYWD6TbAV+rZctiRTY2PL6tsIKhoIOaoskiWAkgu+VsbXgUVDNLHf+InQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "estree-walker": "^2.0.1", + "picomatch": "^2.2.2" + }, + "engines": { + "node": ">= 8.0.0" + } + }, + "node_modules/rollup-plugin-typescript2/node_modules/fs-extra": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", + "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/rollup-plugin-typescript2/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/rollup/node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.52.5", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.52.5.tgz", + "integrity": "sha512-hXGLYpdhiNElzN770+H2nlx+jRog8TyynpTVzdlc6bndktjKWyZyiCsuDAlpd+j+W+WNqfcyAWz9HxxIGfZm1Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/semver": { + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "dev": true, + "license": "ISC", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", + "dev": true, + "license": "ISC" + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/spiffe": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/spiffe/-/spiffe-0.4.0.tgz", + "integrity": "sha512-yjHNB+r2h/ybIIjdwl7TPRHNPfi95oNflRDfUo70w9nV7hrSluPfFecKHDx3X1j3rkO4iuwbssTrwwyJIjpWPw==", + "license": "MIT", + "dependencies": { + "@grpc/grpc-js": "^1.9.11", + "@peculiar/webcrypto": "^1.4.3", + "@peculiar/x509": "^1.9.5", + "@protobuf-ts/grpc-transport": "^2.9.1", + "@protobuf-ts/runtime": "^2.9.1", + "@protobuf-ts/runtime-rpc": "^2.9.1", + "protobufjs": "^7.2.5" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", + "dev": true, + "license": "MIT" + }, + "node_modules/std-env": { + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz", + "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==", + "dev": true, + "license": "MIT" + }, + "node_modules/string-argv": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/string-argv/-/string-argv-0.3.2.tgz", + "integrity": "sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.6.19" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/strip-literal": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-3.1.0.tgz", + "integrity": "sha512-8r3mkIM/2+PpjHoOtiAW8Rg3jJLHaV7xPwG+YRGrv6FP0wwk/toTpATxWYOW0BKdWwl82VT2tFYi5DlROa0Mxg==", + "dev": true, + "license": "MIT", + "dependencies": { + "js-tokens": "^9.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tinybench": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", + "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz", + "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinypool": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.1.1.tgz", + "integrity": "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.0.0 || >=20.0.0" + } + }, + "node_modules/tinyrainbow": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-2.0.0.tgz", + "integrity": "sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tinyspy": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-4.0.4.tgz", + "integrity": "sha512-azl+t0z7pw/z958Gy9svOTuzqIk6xq+NSheJzn5MMWtWTFywIacg2wUlzKFGtt3cthx0r2SxMK0yzJOR0IES7Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/ts-api-utils": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.1.0.tgz", + "integrity": "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, + "node_modules/ts-node": { + "version": "10.9.2", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", + "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@cspotcode/source-map-support": "^0.8.0", + "@tsconfig/node10": "^1.0.7", + "@tsconfig/node12": "^1.0.7", + "@tsconfig/node14": "^1.0.0", + "@tsconfig/node16": "^1.0.2", + "acorn": "^8.4.1", + "acorn-walk": "^8.1.1", + "arg": "^4.1.0", + "create-require": "^1.1.0", + "diff": "^4.0.1", + "make-error": "^1.1.1", + "v8-compile-cache-lib": "^3.0.1", + "yn": "3.1.1" + }, + "bin": { + "ts-node": "dist/bin.js", + "ts-node-cwd": "dist/bin-cwd.js", + "ts-node-esm": "dist/bin-esm.js", + "ts-node-script": "dist/bin-script.js", + "ts-node-transpile-only": "dist/bin-transpile.js", + "ts-script": "dist/bin-script-deprecated.js" + }, + "peerDependencies": { + "@swc/core": ">=1.2.50", + "@swc/wasm": ">=1.2.50", + "@types/node": "*", + "typescript": ">=2.7" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "@swc/wasm": { + "optional": true + } + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, + "node_modules/tsyringe": { + "version": "4.10.0", + "resolved": "https://registry.npmjs.org/tsyringe/-/tsyringe-4.10.0.tgz", + "integrity": "sha512-axr3IdNuVIxnaK5XGEUFTu3YmAQ6lllgrvqfEoR16g/HGnYY/6We4oWENtAnzK6/LpJ2ur9PAb80RBt7/U4ugw==", + "license": "MIT", + "dependencies": { + "tslib": "^1.9.3" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/tsyringe/node_modules/tslib": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", + "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", + "license": "0BSD" + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/typescript-eslint": { + "version": "8.46.1", + "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.46.1.tgz", + "integrity": "sha512-VHgijW803JafdSsDO8I761r3SHrgk4T00IdyQ+/UsthtgPRsBWQLqoSxOolxTpxRKi1kGXK0bSz4CoAc9ObqJA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/eslint-plugin": "8.46.1", + "@typescript-eslint/parser": "8.46.1", + "@typescript-eslint/typescript-estree": "8.46.1", + "@typescript-eslint/utils": "8.46.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "license": "MIT" + }, + "node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/uuid": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-11.1.0.tgz", + "integrity": "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==", + "dev": true, + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist/esm/bin/uuid" + } + }, + "node_modules/v8-compile-cache-lib": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", + "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", + "dev": true, + "license": "MIT" + }, + "node_modules/vite": { + "version": "7.1.11", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.1.11.tgz", + "integrity": "sha512-uzcxnSDVjAopEUjljkWh8EIrg6tlzrjFUfMcR1EVsRDGwf/ccef0qQPRyOrROwhrTDaApueq+ja+KLPlzR/zdg==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.25.0", + "fdir": "^6.5.0", + "picomatch": "^4.0.3", + "postcss": "^8.5.6", + "rollup": "^4.43.0", + "tinyglobby": "^0.2.15" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "lightningcss": "^1.21.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/vite-node": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-3.2.4.tgz", + "integrity": "sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cac": "^6.7.14", + "debug": "^4.4.1", + "es-module-lexer": "^1.7.0", + "pathe": "^2.0.3", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" + }, + "bin": { + "vite-node": "vite-node.mjs" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/vitest": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-3.2.4.tgz", + "integrity": "sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/chai": "^5.2.2", + "@vitest/expect": "3.2.4", + "@vitest/mocker": "3.2.4", + "@vitest/pretty-format": "^3.2.4", + "@vitest/runner": "3.2.4", + "@vitest/snapshot": "3.2.4", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "debug": "^4.4.1", + "expect-type": "^1.2.1", + "magic-string": "^0.30.17", + "pathe": "^2.0.3", + "picomatch": "^4.0.2", + "std-env": "^3.9.0", + "tinybench": "^2.9.0", + "tinyexec": "^0.3.2", + "tinyglobby": "^0.2.14", + "tinypool": "^1.1.1", + "tinyrainbow": "^2.0.0", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0", + "vite-node": "3.2.4", + "why-is-node-running": "^2.3.0" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@types/debug": "^4.1.12", + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "@vitest/browser": "3.2.4", + "@vitest/ui": "3.2.4", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@types/debug": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "node_modules/webcrypto-core": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/webcrypto-core/-/webcrypto-core-1.8.1.tgz", + "integrity": "sha512-P+x1MvlNCXlKbLSOY4cYrdreqPG5hbzkmawbcXLKN/mf6DZW0SdNNkZ+sjwsqVkI4A4Ko2sPZmkZtCKY58w83A==", + "license": "MIT", + "dependencies": { + "@peculiar/asn1-schema": "^2.3.13", + "@peculiar/json-schema": "^1.1.12", + "asn1js": "^3.0.5", + "pvtsutils": "^1.3.5", + "tslib": "^2.7.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "peer": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/why-is-node-running": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", + "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", + "dev": true, + "license": "MIT", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/workerpool": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-10.0.1.tgz", + "integrity": "sha512-NAnKwZJxWlj/U1cp6ZkEtPE+GQY1S6KtOS3AlCiPfPFLxV3m64giSp7g2LsNJxzYCocDT7TSl+7T0sgrDp3KoQ==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "license": "ISC" + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "license": "MIT", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/yn": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", + "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "src/models": { + "version": "0.0.0", + "extraneous": true, + "peerDependencies": { + "@bufbuild/protobuf": "^2.9.0" + } + } + } +} diff --git a/sdk/dir-js/package.json b/sdk/dir-js/package.json index 54efdf3a3..85d6ccfc2 100644 --- a/sdk/dir-js/package.json +++ b/sdk/dir-js/package.json @@ -1,83 +1,83 @@ -{ - "name": "agntcy-dir", - "version": "0.6.0", - "description": "Directory SDK", - "homepage": "https://github.com/agntcy/dir", - "bugs": { - "url": "https://github.com/agntcy/dir/issues" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/agntcy/dir.git" - }, - "license": "Apache-2.0", - "keywords": [ - "sdk", - "client", - "directory", - "agntcy", - "typescript", - "javascript" - ], - "type": "module", - "main": "dist/index.mjs", - "module": "dist/index.mjs", - "typings": "dist/agntcy-dir.d.ts", - "exports": { - ".": { - "types": "./dist/agntcy-dir.d.ts", - "import": "./dist/index.mjs", - "require": "./dist/index.cjs", - "default": "./dist/index.mjs" - } - }, - "scripts": { - "build": "rollup -c && cp -r ./src/models/agntcy ./dist/src/models/agntcy && api-extractor run --local --verbose --diagnostics", - "clean": "rm -rf dist/", - "lint": "eslint '**/*.ts'", - "fix": "eslint '**/*.ts' --fix", - "pretest": "npm run build", - "test": "vitest run" - }, - "packageManager": "npm@11.6.0", - "dependencies": { - "@bufbuild/protobuf": "^2.9.0", - "@connectrpc/connect": "^2.1.0", - "@connectrpc/connect-node": "^2.1.0", - "@grpc/grpc-js": "^1.13.4", - "spiffe": "^0.4.0" - }, - "devDependencies": { - "@microsoft/api-extractor": "^7.52.13", - "@rollup/plugin-json": "^6.1.0", - "@rollup/plugin-node-resolve": "^16.0.1", - "@types/node": "^22.19.1", - "@types/uuid": "^10.0.0", - "rollup-plugin-typescript2": "^0.36.0", - "ts-node": "^10.9.2", - "typescript": "^5.9.3", - "typescript-eslint": "^8.44.0", - "uuid": "^11.1.0", - "vitest": "^3.2.4", - "workerpool": "^10.0.1" - }, - "optionalDependencies": { - "@rollup/rollup-linux-x64-gnu": "4.50.2" - }, - "engines": { - "node": ">=20.0.0" - }, - "overrides": { - "tmp": "^0.2.4" - }, - "files": [ - "dist/agntcy-dir.d.ts", - "dist/index.mjs", - "dist/index.cjs", - "dist/index.mjs.map" - ], - "directories": { - "test": "test" - }, - "author": "" -} +{ + "name": "agntcy-dir", + "version": "0.6.0", + "description": "Directory SDK", + "homepage": "https://github.com/agntcy/dir", + "bugs": { + "url": "https://github.com/agntcy/dir/issues" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/agntcy/dir.git" + }, + "license": "Apache-2.0", + "keywords": [ + "sdk", + "client", + "directory", + "agntcy", + "typescript", + "javascript" + ], + "type": "module", + "main": "dist/index.mjs", + "module": "dist/index.mjs", + "typings": "dist/agntcy-dir.d.ts", + "exports": { + ".": { + "types": "./dist/agntcy-dir.d.ts", + "import": "./dist/index.mjs", + "require": "./dist/index.cjs", + "default": "./dist/index.mjs" + } + }, + "scripts": { + "build": "rollup -c && cp -r ./src/models/agntcy ./dist/src/models/agntcy && api-extractor run --local --verbose --diagnostics", + "clean": "rm -rf dist/", + "lint": "eslint '**/*.ts'", + "fix": "eslint '**/*.ts' --fix", + "pretest": "npm run build", + "test": "vitest run" + }, + "packageManager": "npm@11.6.0", + "dependencies": { + "@bufbuild/protobuf": "^2.9.0", + "@connectrpc/connect": "^2.1.0", + "@connectrpc/connect-node": "^2.1.0", + "@grpc/grpc-js": "^1.13.4", + "spiffe": "^0.4.0" + }, + "devDependencies": { + "@microsoft/api-extractor": "^7.52.13", + "@rollup/plugin-json": "^6.1.0", + "@rollup/plugin-node-resolve": "^16.0.1", + "@types/node": "^22.19.1", + "@types/uuid": "^10.0.0", + "rollup-plugin-typescript2": "^0.36.0", + "ts-node": "^10.9.2", + "typescript": "^5.9.3", + "typescript-eslint": "^8.44.0", + "uuid": "^11.1.0", + "vitest": "^3.2.4", + "workerpool": "^10.0.1" + }, + "optionalDependencies": { + "@rollup/rollup-linux-x64-gnu": "4.50.2" + }, + "engines": { + "node": ">=20.0.0" + }, + "overrides": { + "tmp": "^0.2.4" + }, + "files": [ + "dist/agntcy-dir.d.ts", + "dist/index.mjs", + "dist/index.cjs", + "dist/index.mjs.map" + ], + "directories": { + "test": "test" + }, + "author": "" +} diff --git a/sdk/dir-js/rollup.config.mjs b/sdk/dir-js/rollup.config.mjs index 44b4c2419..e2d292c2d 100644 --- a/sdk/dir-js/rollup.config.mjs +++ b/sdk/dir-js/rollup.config.mjs @@ -1,53 +1,53 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -import json from '@rollup/plugin-json'; -import { readFileSync } from 'fs'; -import typescript from 'rollup-plugin-typescript2'; -import { nodeResolve } from '@rollup/plugin-node-resolve'; - -const pkg = JSON.parse( - readFileSync(new URL('./package.json', import.meta.url), 'utf8'), -); - -const rollupPlugins = [ - nodeResolve(), - typescript({ - tsconfigOverride: { - exclude: ['test/**'], - }, - }), - json({ - preferConst: true, - }), -]; - -const externalPackages = [ - 'spiffe', -]; - -export default [ - // Cross ES module (dist/index.mjs) - { - input: 'src/index.ts', - output: { - file: pkg.exports['.']['import'], - format: 'es', - sourcemap: true, - }, - plugins: rollupPlugins, - external: externalPackages, - }, - - // Cross CJS module (dist/index.cjs) - { - input: 'src/index.ts', - output: { - file: pkg.exports['.']['require'], - format: 'cjs', - sourcemap: true, - }, - plugins: rollupPlugins, - external: externalPackages, - }, -]; +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +import json from '@rollup/plugin-json'; +import { readFileSync } from 'fs'; +import typescript from 'rollup-plugin-typescript2'; +import { nodeResolve } from '@rollup/plugin-node-resolve'; + +const pkg = JSON.parse( + readFileSync(new URL('./package.json', import.meta.url), 'utf8'), +); + +const rollupPlugins = [ + nodeResolve(), + typescript({ + tsconfigOverride: { + exclude: ['test/**'], + }, + }), + json({ + preferConst: true, + }), +]; + +const externalPackages = [ + 'spiffe', +]; + +export default [ + // Cross ES module (dist/index.mjs) + { + input: 'src/index.ts', + output: { + file: pkg.exports['.']['import'], + format: 'es', + sourcemap: true, + }, + plugins: rollupPlugins, + external: externalPackages, + }, + + // Cross CJS module (dist/index.cjs) + { + input: 'src/index.ts', + output: { + file: pkg.exports['.']['require'], + format: 'cjs', + sourcemap: true, + }, + plugins: rollupPlugins, + external: externalPackages, + }, +]; diff --git a/sdk/dir-js/src/client/client.ts b/sdk/dir-js/src/client/client.ts index a7b65d64c..72af41ad2 100644 --- a/sdk/dir-js/src/client/client.ts +++ b/sdk/dir-js/src/client/client.ts @@ -1,1099 +1,1099 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -import { tmpdir } from 'node:os'; -import { join } from 'node:path'; -import { env } from 'node:process'; -import { readFileSync, writeFileSync } from 'node:fs'; -import { spawnSync, SpawnSyncReturns } from 'node:child_process'; - -import { - Client as GrpcClient, - createClient, - Interceptor, - Transport, -} from '@connectrpc/connect'; -import { createGrpcTransport } from '@connectrpc/connect-node'; -import { createClient as createClientSpiffe, X509SVID } from 'spiffe'; -import * as models from '../models'; - -/** - * Configuration class for the AGNTCY Directory client. - * - * This class manages configuration settings for connecting to the Directory service - * and provides default values and environment-based configuration loading. - */ -export class Config { - static DEFAULT_SERVER_ADDRESS = '127.0.0.1:8888'; - static DEFAULT_DIRCTL_PATH = 'dirctl'; - static DEFAULT_SPIFFE_ENDPOINT_SOCKET = ''; - static DEFAULT_AUTH_MODE = ''; - static DEFAULT_JWT_AUDIENCE = ''; - static DEFAULT_TLS_CA_FILE = ''; - static DEFAULT_TLS_CERT_FILE = ''; - static DEFAULT_TLS_KEY_FILE = ''; - - serverAddress: string; - dirctlPath: string; - spiffeEndpointSocket: string; - authMode: '' | 'x509' | 'jwt' | 'tls'; - jwtAudience: string; - tlsCaFile: string; - tlsCertFile: string - tlsKeyFile: string; - - /** - * Creates a new Config instance. - * - * @param serverAddress - The server address to connect to. Defaults to '127.0.0.1:8888' - * @param dirctlPath - Path to the dirctl executable. Defaults to 'dirctl' - * @param spiffeEndpointSocket - Path to the spire server socket. Defaults to empty string. - * @param authMode - Authentication mode: '' for insecure, 'x509', 'jwt' or 'tls'. Defaults to '' - * @param jwtAudience - JWT audience for JWT authentication. Required when authMode is 'jwt' - */ - constructor( - serverAddress = Config.DEFAULT_SERVER_ADDRESS, - dirctlPath = Config.DEFAULT_DIRCTL_PATH, - spiffeEndpointSocket = Config.DEFAULT_SPIFFE_ENDPOINT_SOCKET, - authMode: '' | 'x509' | 'jwt' | 'tls' = Config.DEFAULT_AUTH_MODE as '' | 'x509' | 'jwt' | 'tls', - jwtAudience = Config.DEFAULT_JWT_AUDIENCE, - tlsCaFile = Config.DEFAULT_TLS_CA_FILE, - tlsCertFile = Config.DEFAULT_TLS_CERT_FILE, - tlsKeyFile = Config.DEFAULT_TLS_KEY_FILE - ) { - // add protocol prefix if not set - // use unsafe http unless spire/auth is used - if ( - !serverAddress.startsWith('http://') && - !serverAddress.startsWith('https://') - ) { - // use https protocol when X.509, JWT, or TLS auth is used - if (authMode === 'x509' || authMode === 'jwt' || authMode === 'tls') { - serverAddress = `https://${serverAddress}`; - } else { - serverAddress = `http://${serverAddress}`; - } - } - - this.serverAddress = serverAddress; - this.dirctlPath = dirctlPath; - this.spiffeEndpointSocket = spiffeEndpointSocket; - this.authMode = authMode; - this.jwtAudience = jwtAudience; - this.tlsCaFile = tlsCaFile; - this.tlsCertFile = tlsCertFile; - this.tlsKeyFile = tlsKeyFile; - } - - /** - * Load configuration from environment variables. - * - * @param prefix - Environment variable prefix. Defaults to 'DIRECTORY_CLIENT_' - * @returns A new Config instance with values loaded from environment variables - * - * @example - * ```typescript - * // Load with default prefix - * const config = Config.loadFromEnv(); - * - * // Load with custom prefix - * const config = Config.loadFromEnv("MY_APP_"); - * ``` - */ - static loadFromEnv(prefix = 'DIRECTORY_CLIENT_') { - // Load dirctl path from env without env prefix - const dirctlPath = env['DIRCTL_PATH'] || Config.DEFAULT_DIRCTL_PATH; - - // Load other config values with env prefix - const serverAddress = - env[`${prefix}SERVER_ADDRESS`] || Config.DEFAULT_SERVER_ADDRESS; - const spiffeEndpointSocketPath = env[`${prefix}SPIFFE_SOCKET_PATH`] || Config.DEFAULT_SPIFFE_ENDPOINT_SOCKET; - const authMode = (env[`${prefix}AUTH_MODE`] || Config.DEFAULT_AUTH_MODE) as '' | 'x509' | 'jwt' | 'tls'; - const jwtAudience = env[`${prefix}JWT_AUDIENCE`] || Config.DEFAULT_JWT_AUDIENCE; - const tlsCaFile = env[`${prefix}TLS_CA_FILE`] || Config.DEFAULT_TLS_CA_FILE; - const tlsCertFile = env[`${prefix}TLS_CERT_FILE`] || Config.DEFAULT_TLS_CERT_FILE; - const tlsKeyFile = env[`${prefix}TLS_KEY_FILE`] || Config.DEFAULT_TLS_KEY_FILE; - - return new Config(serverAddress, dirctlPath, spiffeEndpointSocketPath, authMode, jwtAudience, tlsCaFile, tlsCertFile, tlsKeyFile); - } -} - -/** - * High-level client for interacting with AGNTCY Directory services. - * - * This client provides a unified interface for operations across the Directory API. - * It handles gRPC communication and provides convenient methods for common operations - * including storage, routing, search, signing, and synchronization. - * - * @example - * ```typescript - * // Create client with default configuration - * const client = new Client(); - * - * // Create client with custom configuration - * const config = new Config('localhost:8888', '/usr/local/bin/dirctl'); - * const client = new Client(config); - * - * // Use client for operations - * const records = await client.push([record]); - * ``` - */ -export class Client { - config: Config; - storeClient: GrpcClient; - routingClient: GrpcClient; - publicationClient: GrpcClient; - searchClient: GrpcClient; - signClient: GrpcClient; - syncClient: GrpcClient; - eventClient: GrpcClient; - - /** - * Initialize the client with the given configuration. - * - * @param config - Optional client configuration. If null, loads from environment - * variables using Config.loadFromEnv() - * @param grpcTransport - Optional transport to use for gRPC communication. - * Can be created with Client.createGRPCTransport(config) - * - * @throws {Error} If unable to establish connection to the server or configuration is invalid - * - * @example - * ```typescript - * // Load config from environment - * const client = new Client(); - * - * // Use custom config - * const config = new Config('localhost:9999'); - * const grpcTransport = await Client.createGRPCTransport(config); - * const client = new Client(config, grpcTransport); - * ``` - */ - constructor(); - constructor(config?: Config); - constructor(config?: Config, grpcTransport?: Transport); - constructor(config?: Config, grpcTransport?: Transport) { - // Load config from environment if not provided - if (!config) { - config = Config.loadFromEnv(); - } - this.config = config; - - // if no transport provided, use insecure transport - if (!grpcTransport) { - grpcTransport = createGrpcTransport({ - baseUrl: config.serverAddress, - }); - } - - // Set clients for all services - this.storeClient = createClient(models.store_v1.StoreService, grpcTransport); - this.routingClient = createClient( - models.routing_v1.RoutingService, - grpcTransport, - ); - this.publicationClient = createClient(models.routing_v1.PublicationService, grpcTransport); - this.searchClient = createClient(models.search_v1.SearchService, grpcTransport); - this.signClient = createClient(models.sign_v1.SignService, grpcTransport); - this.syncClient = createClient(models.store_v1.SyncService, grpcTransport); - this.eventClient = createClient(models.events_v1.EventService, grpcTransport); - } - - private static convertToPEM(bytes: Uint8Array, label: string): string { - // Convert Uint8Array to base64 string - let binary = ''; - const len = bytes.byteLength; - for (let i = 0; i < len; i++) { - binary += String.fromCharCode(bytes[i]); - } - const base64String = btoa(binary); - - // Split base64 string into 64-character lines - const lines = base64String.match(/.{1,64}/g) || []; - - // Build PEM formatted string with headers and footers - const pem = [ - `-----BEGIN ${label}-----`, - ...lines, - `-----END ${label}-----` - ].join('\n'); - - return pem; - } - - static async createGRPCTransport(config: Config): Promise { - // Handle different authentication modes - switch (config.authMode) { - case '': - return createGrpcTransport({ - baseUrl: config.serverAddress, - }); - - case 'jwt': - return await this.createJWTTransport(config); - - case 'x509': - return await this.createX509Transport(config); - - case 'tls': - return await this.createTLSTransport(config); - - default: - throw new Error(`Unsupported auth mode: ${config.authMode}`); - } - } - - private static async createX509Transport(config: Config): Promise { - if (config.spiffeEndpointSocket === '') { - throw new Error('SPIFFE socket path is required for X.509 authentication'); - } - - // Create secure transport with SPIFFE X.509 - const client = createClientSpiffe(config.spiffeEndpointSocket); - - let svid: X509SVID = { - spiffeId: '', - hint: '', - x509Svid: new Uint8Array(), - x509SvidKey: new Uint8Array(), - bundle: new Uint8Array(), - }; - - const svidStream = client.fetchX509SVID({}); - for await (const message of svidStream.responses) { - message.svids.forEach((_svid) => { - svid = _svid; - }) - - if (message.svids.length > 0) { - break - } - } - - // Create transport settings for gRPC client - const transport = createGrpcTransport({ - baseUrl: config.serverAddress, - nodeOptions: { - ca: this.convertToPEM(svid.bundle, "TRUSTED CERTIFICATE"), - cert: this.convertToPEM(svid.x509Svid, "CERTIFICATE"), - key: this.convertToPEM(svid.x509SvidKey, "PRIVATE KEY"), - }, - }); - - return transport; - } - - private static async createJWTTransport(config: Config): Promise { - if (config.spiffeEndpointSocket === '') { - throw new Error('SPIFFE socket path is required for JWT authentication'); - } - - if (config.jwtAudience === '') { - throw new Error('JWT audience is required for JWT authentication'); - } - - // Create SPIFFE client - const client = createClientSpiffe(config.spiffeEndpointSocket); - - // Fetch X.509 bundle for verifying server's TLS certificate - // In JWT mode, the server presents its X.509-SVID via TLS for transport security - let bundle: Uint8Array | null = null; - const bundleStream = client.fetchX509Bundles({}); - for await (const message of bundleStream.responses) { - // Get the first bundle from the bundles map - // bundles is a map where bytes is ASN.1 DER encoded - for (const [_, bundleData] of Object.entries(message.bundles)) { - // Convert to a new Uint8Array to ensure type compatibility - bundle = new Uint8Array(bundleData); - break; - } - if (bundle !== null) { - break; - } - } - - if (bundle === null || bundle.length === 0) { - throw new Error('Failed to fetch X.509 bundle from SPIRE: no bundles returned'); - } - - // Create JWT interceptor that fetches and injects JWT tokens - const jwtInterceptor: Interceptor = (next) => async (req) => { - // Fetch JWT-SVID from SPIRE - // Note: spiffeId is empty string to use the workload's default identity - const jwtCall = client.fetchJWTSVID({ - spiffeId: '', - audience: [config.jwtAudience] - }); - - const response = await jwtCall.response; - - if (!response.svids || response.svids.length === 0) { - throw new Error('Failed to fetch JWT-SVID from SPIRE: no SVIDs returned'); - } - - const jwtToken = response.svids[0].svid; - - // Add JWT token to request headers - req.header.set('authorization', `Bearer ${jwtToken}`); - - return await next(req); - }; - - // Create transport with JWT interceptor and TLS using SPIFFE bundle - // For JWT mode: Server presents X.509-SVID via TLS, clients authenticate with JWT-SVID - const transport = createGrpcTransport({ - baseUrl: config.serverAddress, - interceptors: [jwtInterceptor], - nodeOptions: { - ca: this.convertToPEM(bundle, "CERTIFICATE"), - }, - }); - - return transport; - } - - private static async createTLSTransport(config: Config): Promise { - if (config.tlsCaFile === '') { - throw new Error('TLS CA file is required for TLS authentication'); - } - if (config.tlsCertFile === '') { - throw new Error('TLS certificate file is required for TLS authentication'); - } - if (config.tlsKeyFile === '') { - throw new Error('TLS key file is required for TLS authentication'); - } - - let root_ca: string; - let cert_chain: string; - let private_key: string; - - try { - root_ca = readFileSync(config.tlsCaFile).toString(); - cert_chain = readFileSync(config.tlsCertFile).toString(); - private_key = readFileSync(config.tlsKeyFile).toString(); - } catch (e) { - console.error('Error reading file:', (e as Error).message); - throw e; - } - - const transport = createGrpcTransport({ - baseUrl: config.serverAddress, - nodeOptions: { - ca: root_ca, - cert: cert_chain, - key: private_key, - }, - }); - - return transport; - } - /** - * Request generator helper function for streaming requests. - */ - private async *requestGenerator(reqs: T[]): AsyncIterable { - for (const req of reqs) { - yield req; - } - } - - /** - * Push records to the Store API. - * - * Uploads one or more records to the content store, making them available - * for retrieval and reference. Each record is assigned a unique content - * identifier (CID) based on its content hash. - * - * @param records - Array of Record objects to push to the store - * @returns Promise that resolves to an array of RecordRef objects containing the CIDs of the pushed records - * - * @throws {Error} If the gRPC call fails or the push operation fails - * - * @example - * ```typescript - * const records = [createRecord("example")]; - * const refs = await client.push(records); - * console.log(`Pushed with CID: ${refs[0].cid}`); - * ``` - */ - async push( - records: models.core_v1.Record[], - ): Promise { - const responses: models.core_v1.RecordRef[] = []; - - for await (const response of this.storeClient.push( - this.requestGenerator(records), - )) { - responses.push(response); - } - - return responses; - } - - /** - * Push records with referrer metadata to the Store API. - * - * Uploads records along with optional artifacts and referrer information. - * This is useful for pushing complex objects that include additional - * metadata or associated artifacts. - * - * @param requests - Array of PushReferrerRequest objects containing records and optional artifacts - * @returns Promise that resolves to an array of PushReferrerResponse objects containing the details of pushed artifacts - * - * @throws {Error} If the gRPC call fails or the push operation fails - * - * @example - * ```typescript - * const requests = [new models.store_v1.PushReferrerRequest({record: record})]; - * const responses = await client.push_referrer(requests); - * ``` - */ - async push_referrer( - requests: models.store_v1.PushReferrerRequest[], - ): Promise { - const responses: models.store_v1.PushReferrerResponse[] = []; - - for await (const response of this.storeClient.pushReferrer( - this.requestGenerator(requests), - )) { - responses.push(response); - } - - return responses; - } - - /** - * Pull records from the Store API by their references. - * - * Retrieves one or more records from the content store using their - * content identifiers (CIDs). - * - * @param refs - Array of RecordRef objects containing the CIDs to retrieve - * @returns Promise that resolves to an array of Record objects retrieved from the store - * - * @throws {Error} If the gRPC call fails or the pull operation fails - * - * @example - * ```typescript - * const refs = [new models.core_v1.RecordRef({cid: "QmExample123"})]; - * const records = await client.pull(refs); - * for (const record of records) { - * console.log(`Retrieved record: ${record}`); - * } - * ``` - */ - async pull( - refs: models.core_v1.RecordRef[], - ): Promise { - const records: models.core_v1.Record[] = []; - - for await (const response of this.storeClient.pull( - this.requestGenerator(refs), - )) { - records.push(response); - } - - return records; - } - - /** - * Pull records with referrer metadata from the Store API. - * - * Retrieves records along with their associated artifacts and referrer - * information. This provides access to complex objects that include - * additional metadata or associated artifacts. - * - * @param requests - Array of PullReferrerRequest objects containing records and optional artifacts for pull operations - * @returns Promise that resolves to an array of PullReferrerResponse objects containing the retrieved records - * - * @throws {Error} If the gRPC call fails or the pull operation fails - * - * @example - * ```typescript - * const requests = [new models.store_v1.PullReferrerRequest({ref: ref})]; - * const responses = await client.pull_referrer(requests); - * for (const response of responses) { - * console.log(`Retrieved: ${response}`); - * } - * ``` - */ - async pull_referrer( - requests: models.store_v1.PullReferrerRequest[], - ): Promise { - const responses: models.store_v1.PullReferrerResponse[] = []; - - for await (const response of this.storeClient.pullReferrer( - this.requestGenerator(requests), - )) { - responses.push(response); - } - - return responses; - } - - /** - * Search objects from the Store API matching the specified queries. - * - * Performs a search across the storage using the provided search queries - * and returns a list of matching CIDs. This is efficient for lookups - * where only the CIDs are needed. - * - * @param request - SearchCIDsRequest containing queries, filters, and search options - * @returns Promise that resolves to an array of SearchCIDsResponse objects matching the queries - * - * @throws {Error} If the gRPC call fails or the search operation fails - * - * @example - * ```typescript - * const request = create(models.search_v1.SearchCIDsRequestSchema, {queries: [query], limit: 10}); - * const responses = await client.searchCIDs(request); - * for (const response of responses) { - * console.log(`Found CID: ${response.recordCid}`); - * } - * ``` - */ - async searchCIDs( - request: models.search_v1.SearchCIDsRequest, - ): Promise { - const responses: models.search_v1.SearchCIDsResponse[] = []; - - for await (const response of this.searchClient.searchCIDs(request)) { - responses.push(response); - } - - return responses; - } - - /** - * Search for full records from the Store API matching the specified queries. - * - * Performs a search across the storage using the provided search queries - * and returns a list of full records with all metadata. - * - * @param request - SearchRecordsRequest containing queries, filters, and search options - * @returns Promise that resolves to an array of SearchRecordsResponse objects matching the queries - * - * @throws {Error} If the gRPC call fails or the search operation fails - * - * @example - * ```typescript - * const request = create(models.search_v1.SearchRecordsRequestSchema, {queries: [query], limit: 10}); - * const responses = await client.searchRecords(request); - * for (const response of responses) { - * console.log(`Found: ${response.record?.name}`); - * } - * ``` - */ - async searchRecords( - request: models.search_v1.SearchRecordsRequest, - ): Promise { - const responses: models.search_v1.SearchRecordsResponse[] = []; - - for await (const response of this.searchClient.searchRecords(request)) { - responses.push(response); - } - - return responses; - } - - /** - * Look up metadata for records in the Store API. - * - * Retrieves metadata information for one or more records without - * downloading the full record content. This is useful for checking - * if records exist and getting basic information about them. - * - * @param refs - Array of RecordRef objects containing the CIDs to look up - * @returns Promise that resolves to an array of RecordMeta objects containing metadata for the records - * - * @throws {Error} If the gRPC call fails or the lookup operation fails - * - * @example - * ```typescript - * const refs = [new models.core_v1.RecordRef({cid: "QmExample123"})]; - * const metadatas = await client.lookup(refs); - * for (const meta of metadatas) { - * console.log(`Record size: ${meta.size}`); - * } - * ``` - */ - async lookup( - refs: models.core_v1.RecordRef[], - ): Promise { - const recordMetas: models.core_v1.RecordMeta[] = []; - - for await (const response of this.storeClient.lookup( - this.requestGenerator(refs), - )) { - recordMetas.push(response); - } - - return recordMetas; - } - - /** - * List objects from the Routing API matching the specified criteria. - * - * Returns a list of objects that match the filtering and - * query criteria specified in the request. - * - * @param request - ListRequest specifying filtering criteria, pagination, etc. - * @returns Promise that resolves to an array of ListResponse objects matching the criteria - * - * @throws {Error} If the gRPC call fails or the list operation fails - * - * @example - * ```typescript - * const request = new models.routing_v1.ListRequest({limit: 10}); - * const responses = await client.list(request); - * for (const response of responses) { - * console.log(`Found object: ${response.cid}`); - * } - * ``` - */ - async list( - request: models.routing_v1.ListRequest, - ): Promise { - const results: models.routing_v1.ListResponse[] = []; - - for await (const response of this.routingClient.list(request)) { - results.push(response); - } - - return results; - } - - /** - * Publish objects to the Routing API matching the specified criteria. - * - * Makes the specified objects available for discovery and retrieval by other - * clients in the network. The objects must already exist in the store before - * they can be published. - * - * @param request - PublishRequest containing the query for the objects to publish - * @returns Promise that resolves when the publish operation is complete - * - * @throws {Error} If the gRPC call fails or the object cannot be published - * - * @example - * ```typescript - * const ref = new models.routing_v1.RecordRef({cid: "QmExample123"}); - * const request = new models.routing_v1.PublishRequest({recordRefs: [ref]}); - * await client.publish(request); - * ``` - */ - async publish(request: models.routing_v1.PublishRequest): Promise { - await this.routingClient.publish(request); - } - - /** - * Unpublish objects from the Routing API matching the specified criteria. - * - * Removes the specified objects from the public network, making them no - * longer discoverable by other clients. The objects remain in the local - * store but are not available for network discovery. - * - * @param request - UnpublishRequest containing the query for the objects to unpublish - * @returns Promise that resolves when the unpublish operation is complete - * - * @throws {Error} If the gRPC call fails or the objects cannot be unpublished - * - * @example - * ```typescript - * const ref = new models.routing_v1.RecordRef({cid: "QmExample123"}); - * const request = new models.routing_v1.UnpublishRequest({recordRefs: [ref]}); - * await client.unpublish(request); - * ``` - */ - async unpublish(request: models.routing_v1.UnpublishRequest): Promise { - await this.routingClient.unpublish(request); - } - - /** - * Delete records from the Store API. - * - * Permanently removes one or more records from the content store using - * their content identifiers (CIDs). This operation cannot be undone. - * - * @param refs - Array of RecordRef objects containing the CIDs to delete - * @returns Promise that resolves when the deletion is complete - * - * @throws {Error} If the gRPC call fails or the delete operation fails - * - * @example - * ```typescript - * const refs = [new models.core_v1.RecordRef({cid: "QmExample123"})]; - * await client.delete(refs); - * ``` - */ - async delete(refs: models.core_v1.RecordRef[]): Promise { - await this.storeClient.delete(this.requestGenerator(refs)); - } - - /** - * Sign a record with a cryptographic signature. - * - * Creates a cryptographic signature for a record using either a private - * key or OIDC-based signing. The signing process uses the external dirctl - * command-line tool to perform the actual cryptographic operations. - * - * @param req - SignRequest containing the record reference and signing provider - * configuration. The provider can specify either key-based signing - * (with a private key) or OIDC-based signing - * @param oidc_client_id - OIDC client identifier for OIDC-based signing. Defaults to "sigstore" - * @returns SignResponse containing the signature - * - * @throws {Error} If the signing operation fails or unsupported provider is supplied - * - * @example - * ```typescript - * const req = new models.sign_v1.SignRequest({ - * recordRef: new models.core_v1.RecordRef({cid: "QmExample123"}), - * provider: new models.sign_v1.SignProvider({key: keyConfig}) - * }); - * const response = client.sign(req); - * console.log(`Signature: ${response.signature}`); - * ``` - */ - sign(req: models.sign_v1.SignRequest, oidc_client_id = 'sigstore'): void { - - var output; - - switch (req.provider?.request.case) { - case 'oidc': - output = this.__sign_with_oidc( - req.recordRef?.cid || '', - req.provider.request.value, - oidc_client_id, - ); - break; - - case 'key': - output = this.__sign_with_key( - req.recordRef?.cid || '', - req.provider.request.value, - ); - break; - - default: - throw new Error('unsupported provider was supplied'); - } - - if (output.status !== 0) { - throw output.error; - } - } - - /** - * Verify a cryptographic signature on a record. - * - * Validates the cryptographic signature of a previously signed record - * to ensure its authenticity and integrity. This operation verifies - * that the record has not been tampered with since signing. - * - * @param request - VerifyRequest containing the record reference and verification parameters - * @returns Promise that resolves to a VerifyResponse containing the verification result and details - * - * @throws {Error} If the gRPC call fails or the verification operation fails - * - * @example - * ```typescript - * const request = new models.sign_v1.VerifyRequest({ - * recordRef: new models.core_v1.RecordRef({cid: "QmExample123"}) - * }); - * const response = await client.verify(request); - * console.log(`Signature valid: ${response.valid}`); - * ``` - */ - async verify( - request: models.sign_v1.VerifyRequest, - ): Promise { - return await this.signClient.verify(request); - } - - /** - * Create a new synchronization configuration. - * - * Creates a new sync configuration that defines how data should be - * synchronized between different Directory servers. This allows for - * automated data replication and consistency across multiple locations. - * - * @param request - CreateSyncRequest containing the sync configuration details - * including source, target, and synchronization parameters - * @returns Promise that resolves to a CreateSyncResponse containing the created sync details - * including the sync ID and configuration - * - * @throws {Error} If the gRPC call fails or the sync creation fails - * - * @example - * ```typescript - * const request = new models.store_v1.CreateSyncRequest(); - * const response = await client.create_sync(request); - * console.log(`Created sync with ID: ${response.syncId}`); - * ``` - */ - async create_sync( - request: models.store_v1.CreateSyncRequest, - ): Promise { - return await this.syncClient.createSync(request); - } - - /** - * List existing synchronization configurations. - * - * Retrieves a list of all sync configurations that have been created, - * with optional filtering and pagination support. This allows you to - * monitor and manage multiple synchronization processes. - * - * @param request - ListSyncsRequest containing filtering criteria, pagination options, - * and other query parameters - * @returns Promise that resolves to an array of ListSyncsItem objects with - * their details including ID, name, status, and configuration parameters - * - * @throws {Error} If the gRPC call fails or the list operation fails - * - * @example - * ```typescript - * const request = new models.store_v1.ListSyncsRequest({limit: 10}); - * const syncs = await client.list_syncs(request); - * for (const sync of syncs) { - * console.log(`Sync: ${sync}`); - * } - * ``` - */ - async list_syncs( - request: models.store_v1.ListSyncsRequest, - ): Promise { - const results: models.store_v1.ListSyncsItem[] = []; - - for await (const response of this.syncClient.listSyncs(request)) { - results.push(response); - } - - return results; - } - - /** - * Retrieve detailed information about a specific synchronization configuration. - * - * Gets comprehensive details about a specific sync configuration including - * its current status, configuration parameters, performance metrics, - * and any recent errors or warnings. - * - * @param request - GetSyncRequest containing the sync ID or identifier to retrieve - * @returns Promise that resolves to a GetSyncResponse with detailed information about the sync configuration - * including status, metrics, configuration, and logs - * - * @throws {Error} If the gRPC call fails or the get operation fails - * - * @example - * ```typescript - * const request = new models.store_v1.GetSyncRequest({syncId: "sync-123"}); - * const response = await client.get_sync(request); - * console.log(`Sync status: ${response.status}`); - * console.log(`Last update: ${response.lastUpdateTime}`); - * ``` - */ - async get_sync( - request: models.store_v1.GetSyncRequest, - ): Promise { - return await this.syncClient.getSync(request); - } - - /** - * Delete a synchronization configuration. - * - * Permanently removes a sync configuration and stops any ongoing - * synchronization processes. This operation cannot be undone and - * will halt all data synchronization for the specified configuration. - * - * @param request - DeleteSyncRequest containing the sync ID or identifier to delete - * @returns Promise that resolves to a DeleteSyncResponse when the deletion is complete - * - * @throws {Error} If the gRPC call fails or the delete operation fails - * - * @example - * ```typescript - * const request = new models.store_v1.DeleteSyncRequest({syncId: "sync-123"}); - * await client.delete_sync(request); - * console.log("Sync deleted"); - * ``` - */ - async delete_sync( - request: models.store_v1.DeleteSyncRequest, - ): Promise { - return await this.syncClient.deleteSync(request); - } - - /** - * Get events from the Event API matching the specified criteria. - * - * Retrieves a list of events that match the filtering and query criteria - * specified in the request. - * - * @param request - ListenRequest specifying filtering criteria, pagination, etc. - * @returns Promise that resolves to an array of ListenResponse objects matching the criteria - * - * @throws {Error} If the gRPC call fails or the get events operation fails - */ - listen( - request: models.events_v1.ListenRequest - ): AsyncIterable { - return this.eventClient.listen(request); - } - - /** - * CreatePublication creates a new publication request that will be processed by the PublicationWorker. - * The publication request can specify either a query, a list of specific CIDs, - * or all records to be announced to the DHT. - * - * @param request - PublishRequest containing record references and queries options. - * - * @returns CreatePublicationResponse returns the result of creating a publication request. - * This includes the publication ID and any relevant metadata. - * - * @throws {Error} If the gRPC call fails or the list operation fails - */ - async create_publication( - request: models.routing_v1.PublishRequest, - ): Promise { - return await this.publicationClient.createPublication(request); - } - - /** - * ListPublications returns a stream of all publication requests in the system. - * This allows monitoring of pending, processing, and completed publication requests. - * - * @param request - ListPublicationsRequest contains optional filters for listing publication requests. - * - * @returns Promise that resolves to an array of ListPublicationsItem represents - * a single publication request in the list response. - * Contains publication details including ID, status, and creation timestamp. - * - * @throws {Error} If the gRPC call fails or the list operation fails - */ - async list_publication( - request: models.routing_v1.ListPublicationsRequest, - ): Promise { - const results: models.routing_v1.ListPublicationsItem[] = []; - - for await (const response of this.publicationClient.listPublications(request)) { - results.push(response); - } - - return results; - } - - /** - * GetPublication retrieves details of a specific publication request by its identifier. - * This includes the current status and any associated metadata. - * - * @param request - GetPublicationRequest specifies which publication to retrieve by its identifier. - * - * @returns GetPublicationResponse contains the full details of a specific publication request. - * Includes status, progress information, and any error details if applicable. - * - * @throws {Error} If the gRPC call fails or the get operation fails - */ - async get_publication( - request: models.routing_v1.GetPublicationRequest, - ): Promise { - return await this.publicationClient.getPublication(request); - } - - - /** - * Sign a record using a private key. - * - * This private method handles key-based signing by writing the private key - * to a temporary file and executing the dirctl command with the key file. - * - * @param cid - Content identifier of the record to sign - * @param req - SignWithKey request containing the private key - * @returns SignResponse containing the signature - * - * @throws {Error} If any error occurs during signing - * - * @private - */ - private __sign_with_key(cid: string, req: models.sign_v1.SignWithKey): SpawnSyncReturns { - // Write private key to a temporary file - const tmp_key_filename = join(tmpdir(), '.p.key'); - writeFileSync(tmp_key_filename, String(req.privateKey)); - - // Prepare environment for command - const shell_env = env; - shell_env['COSIGN_PASSWORD'] = String(req.password); - - let commandArgs = ["sign", cid, "--key", tmp_key_filename]; - - if (this.config.spiffeEndpointSocket !== '') { - commandArgs.push(...["--spiffe-socket-path", this.config.spiffeEndpointSocket]); - } - - // Execute command - let output = spawnSync( - `${this.config.dirctlPath}`, commandArgs, - { env: { ...shell_env }, encoding: 'utf8', stdio: 'pipe' }, - ); - - return output; - } - - /** - * Sign a record using OIDC-based authentication. - * - * This private method handles OIDC-based signing by building the appropriate - * dirctl command with OIDC parameters and executing it. - * - * @param cid - Content identifier of the record to sign - * @param req - SignWithOIDC request containing the OIDC configuration - * @param oidc_client_id - OIDC client identifier for authentication - * @returns SignResponse containing the signature - * - * @throws {Error} If any error occurs during signing - * - * @private - */ - private __sign_with_oidc( - cid: string, - req: models.sign_v1.SignWithOIDC, - oidc_client_id: string, - ): SpawnSyncReturns { - // Prepare command - let commandArgs = ["sign", cid]; - if (req.idToken !== '') { - commandArgs.push(...["--oidc-token", req.idToken]); - } - if ( - req.options?.oidcProviderUrl !== undefined && - req.options.oidcProviderUrl !== '' - ) { - commandArgs.push(...["--oidc-provider-url", req.options.oidcProviderUrl]); - } - if (req.options?.fulcioUrl !== undefined && req.options.fulcioUrl !== '') { - commandArgs.push(...["--fulcio-url", req.options.fulcioUrl]); - } - if (req.options?.rekorUrl !== undefined && req.options.rekorUrl !== '') { - commandArgs.push(...["--rekor-url", req.options.rekorUrl]); - } - if ( - req.options?.timestampUrl !== undefined && - req.options.timestampUrl !== '' - ) { - commandArgs.push(...["--timestamp-url", req.options.timestampUrl]); - } - - if (this.config.spiffeEndpointSocket !== '') { - commandArgs.push(...["--spiffe-socket-path", this.config.spiffeEndpointSocket]); - } - - // Execute command - let output = spawnSync(`${this.config.dirctlPath}`, commandArgs, { - env: { ...env }, - encoding: 'utf8', - stdio: 'pipe', - }); - - return output; - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +import { tmpdir } from 'node:os'; +import { join } from 'node:path'; +import { env } from 'node:process'; +import { readFileSync, writeFileSync } from 'node:fs'; +import { spawnSync, SpawnSyncReturns } from 'node:child_process'; + +import { + Client as GrpcClient, + createClient, + Interceptor, + Transport, +} from '@connectrpc/connect'; +import { createGrpcTransport } from '@connectrpc/connect-node'; +import { createClient as createClientSpiffe, X509SVID } from 'spiffe'; +import * as models from '../models'; + +/** + * Configuration class for the AGNTCY Directory client. + * + * This class manages configuration settings for connecting to the Directory service + * and provides default values and environment-based configuration loading. + */ +export class Config { + static DEFAULT_SERVER_ADDRESS = '127.0.0.1:8888'; + static DEFAULT_DIRCTL_PATH = 'dirctl'; + static DEFAULT_SPIFFE_ENDPOINT_SOCKET = ''; + static DEFAULT_AUTH_MODE = ''; + static DEFAULT_JWT_AUDIENCE = ''; + static DEFAULT_TLS_CA_FILE = ''; + static DEFAULT_TLS_CERT_FILE = ''; + static DEFAULT_TLS_KEY_FILE = ''; + + serverAddress: string; + dirctlPath: string; + spiffeEndpointSocket: string; + authMode: '' | 'x509' | 'jwt' | 'tls'; + jwtAudience: string; + tlsCaFile: string; + tlsCertFile: string + tlsKeyFile: string; + + /** + * Creates a new Config instance. + * + * @param serverAddress - The server address to connect to. Defaults to '127.0.0.1:8888' + * @param dirctlPath - Path to the dirctl executable. Defaults to 'dirctl' + * @param spiffeEndpointSocket - Path to the spire server socket. Defaults to empty string. + * @param authMode - Authentication mode: '' for insecure, 'x509', 'jwt' or 'tls'. Defaults to '' + * @param jwtAudience - JWT audience for JWT authentication. Required when authMode is 'jwt' + */ + constructor( + serverAddress = Config.DEFAULT_SERVER_ADDRESS, + dirctlPath = Config.DEFAULT_DIRCTL_PATH, + spiffeEndpointSocket = Config.DEFAULT_SPIFFE_ENDPOINT_SOCKET, + authMode: '' | 'x509' | 'jwt' | 'tls' = Config.DEFAULT_AUTH_MODE as '' | 'x509' | 'jwt' | 'tls', + jwtAudience = Config.DEFAULT_JWT_AUDIENCE, + tlsCaFile = Config.DEFAULT_TLS_CA_FILE, + tlsCertFile = Config.DEFAULT_TLS_CERT_FILE, + tlsKeyFile = Config.DEFAULT_TLS_KEY_FILE + ) { + // add protocol prefix if not set + // use unsafe http unless spire/auth is used + if ( + !serverAddress.startsWith('http://') && + !serverAddress.startsWith('https://') + ) { + // use https protocol when X.509, JWT, or TLS auth is used + if (authMode === 'x509' || authMode === 'jwt' || authMode === 'tls') { + serverAddress = `https://${serverAddress}`; + } else { + serverAddress = `http://${serverAddress}`; + } + } + + this.serverAddress = serverAddress; + this.dirctlPath = dirctlPath; + this.spiffeEndpointSocket = spiffeEndpointSocket; + this.authMode = authMode; + this.jwtAudience = jwtAudience; + this.tlsCaFile = tlsCaFile; + this.tlsCertFile = tlsCertFile; + this.tlsKeyFile = tlsKeyFile; + } + + /** + * Load configuration from environment variables. + * + * @param prefix - Environment variable prefix. Defaults to 'DIRECTORY_CLIENT_' + * @returns A new Config instance with values loaded from environment variables + * + * @example + * ```typescript + * // Load with default prefix + * const config = Config.loadFromEnv(); + * + * // Load with custom prefix + * const config = Config.loadFromEnv("MY_APP_"); + * ``` + */ + static loadFromEnv(prefix = 'DIRECTORY_CLIENT_') { + // Load dirctl path from env without env prefix + const dirctlPath = env['DIRCTL_PATH'] || Config.DEFAULT_DIRCTL_PATH; + + // Load other config values with env prefix + const serverAddress = + env[`${prefix}SERVER_ADDRESS`] || Config.DEFAULT_SERVER_ADDRESS; + const spiffeEndpointSocketPath = env[`${prefix}SPIFFE_SOCKET_PATH`] || Config.DEFAULT_SPIFFE_ENDPOINT_SOCKET; + const authMode = (env[`${prefix}AUTH_MODE`] || Config.DEFAULT_AUTH_MODE) as '' | 'x509' | 'jwt' | 'tls'; + const jwtAudience = env[`${prefix}JWT_AUDIENCE`] || Config.DEFAULT_JWT_AUDIENCE; + const tlsCaFile = env[`${prefix}TLS_CA_FILE`] || Config.DEFAULT_TLS_CA_FILE; + const tlsCertFile = env[`${prefix}TLS_CERT_FILE`] || Config.DEFAULT_TLS_CERT_FILE; + const tlsKeyFile = env[`${prefix}TLS_KEY_FILE`] || Config.DEFAULT_TLS_KEY_FILE; + + return new Config(serverAddress, dirctlPath, spiffeEndpointSocketPath, authMode, jwtAudience, tlsCaFile, tlsCertFile, tlsKeyFile); + } +} + +/** + * High-level client for interacting with AGNTCY Directory services. + * + * This client provides a unified interface for operations across the Directory API. + * It handles gRPC communication and provides convenient methods for common operations + * including storage, routing, search, signing, and synchronization. + * + * @example + * ```typescript + * // Create client with default configuration + * const client = new Client(); + * + * // Create client with custom configuration + * const config = new Config('localhost:8888', '/usr/local/bin/dirctl'); + * const client = new Client(config); + * + * // Use client for operations + * const records = await client.push([record]); + * ``` + */ +export class Client { + config: Config; + storeClient: GrpcClient; + routingClient: GrpcClient; + publicationClient: GrpcClient; + searchClient: GrpcClient; + signClient: GrpcClient; + syncClient: GrpcClient; + eventClient: GrpcClient; + + /** + * Initialize the client with the given configuration. + * + * @param config - Optional client configuration. If null, loads from environment + * variables using Config.loadFromEnv() + * @param grpcTransport - Optional transport to use for gRPC communication. + * Can be created with Client.createGRPCTransport(config) + * + * @throws {Error} If unable to establish connection to the server or configuration is invalid + * + * @example + * ```typescript + * // Load config from environment + * const client = new Client(); + * + * // Use custom config + * const config = new Config('localhost:9999'); + * const grpcTransport = await Client.createGRPCTransport(config); + * const client = new Client(config, grpcTransport); + * ``` + */ + constructor(); + constructor(config?: Config); + constructor(config?: Config, grpcTransport?: Transport); + constructor(config?: Config, grpcTransport?: Transport) { + // Load config from environment if not provided + if (!config) { + config = Config.loadFromEnv(); + } + this.config = config; + + // if no transport provided, use insecure transport + if (!grpcTransport) { + grpcTransport = createGrpcTransport({ + baseUrl: config.serverAddress, + }); + } + + // Set clients for all services + this.storeClient = createClient(models.store_v1.StoreService, grpcTransport); + this.routingClient = createClient( + models.routing_v1.RoutingService, + grpcTransport, + ); + this.publicationClient = createClient(models.routing_v1.PublicationService, grpcTransport); + this.searchClient = createClient(models.search_v1.SearchService, grpcTransport); + this.signClient = createClient(models.sign_v1.SignService, grpcTransport); + this.syncClient = createClient(models.store_v1.SyncService, grpcTransport); + this.eventClient = createClient(models.events_v1.EventService, grpcTransport); + } + + private static convertToPEM(bytes: Uint8Array, label: string): string { + // Convert Uint8Array to base64 string + let binary = ''; + const len = bytes.byteLength; + for (let i = 0; i < len; i++) { + binary += String.fromCharCode(bytes[i]); + } + const base64String = btoa(binary); + + // Split base64 string into 64-character lines + const lines = base64String.match(/.{1,64}/g) || []; + + // Build PEM formatted string with headers and footers + const pem = [ + `-----BEGIN ${label}-----`, + ...lines, + `-----END ${label}-----` + ].join('\n'); + + return pem; + } + + static async createGRPCTransport(config: Config): Promise { + // Handle different authentication modes + switch (config.authMode) { + case '': + return createGrpcTransport({ + baseUrl: config.serverAddress, + }); + + case 'jwt': + return await this.createJWTTransport(config); + + case 'x509': + return await this.createX509Transport(config); + + case 'tls': + return await this.createTLSTransport(config); + + default: + throw new Error(`Unsupported auth mode: ${config.authMode}`); + } + } + + private static async createX509Transport(config: Config): Promise { + if (config.spiffeEndpointSocket === '') { + throw new Error('SPIFFE socket path is required for X.509 authentication'); + } + + // Create secure transport with SPIFFE X.509 + const client = createClientSpiffe(config.spiffeEndpointSocket); + + let svid: X509SVID = { + spiffeId: '', + hint: '', + x509Svid: new Uint8Array(), + x509SvidKey: new Uint8Array(), + bundle: new Uint8Array(), + }; + + const svidStream = client.fetchX509SVID({}); + for await (const message of svidStream.responses) { + message.svids.forEach((_svid) => { + svid = _svid; + }) + + if (message.svids.length > 0) { + break + } + } + + // Create transport settings for gRPC client + const transport = createGrpcTransport({ + baseUrl: config.serverAddress, + nodeOptions: { + ca: this.convertToPEM(svid.bundle, "TRUSTED CERTIFICATE"), + cert: this.convertToPEM(svid.x509Svid, "CERTIFICATE"), + key: this.convertToPEM(svid.x509SvidKey, "PRIVATE KEY"), + }, + }); + + return transport; + } + + private static async createJWTTransport(config: Config): Promise { + if (config.spiffeEndpointSocket === '') { + throw new Error('SPIFFE socket path is required for JWT authentication'); + } + + if (config.jwtAudience === '') { + throw new Error('JWT audience is required for JWT authentication'); + } + + // Create SPIFFE client + const client = createClientSpiffe(config.spiffeEndpointSocket); + + // Fetch X.509 bundle for verifying server's TLS certificate + // In JWT mode, the server presents its X.509-SVID via TLS for transport security + let bundle: Uint8Array | null = null; + const bundleStream = client.fetchX509Bundles({}); + for await (const message of bundleStream.responses) { + // Get the first bundle from the bundles map + // bundles is a map where bytes is ASN.1 DER encoded + for (const [_, bundleData] of Object.entries(message.bundles)) { + // Convert to a new Uint8Array to ensure type compatibility + bundle = new Uint8Array(bundleData); + break; + } + if (bundle !== null) { + break; + } + } + + if (bundle === null || bundle.length === 0) { + throw new Error('Failed to fetch X.509 bundle from SPIRE: no bundles returned'); + } + + // Create JWT interceptor that fetches and injects JWT tokens + const jwtInterceptor: Interceptor = (next) => async (req) => { + // Fetch JWT-SVID from SPIRE + // Note: spiffeId is empty string to use the workload's default identity + const jwtCall = client.fetchJWTSVID({ + spiffeId: '', + audience: [config.jwtAudience] + }); + + const response = await jwtCall.response; + + if (!response.svids || response.svids.length === 0) { + throw new Error('Failed to fetch JWT-SVID from SPIRE: no SVIDs returned'); + } + + const jwtToken = response.svids[0].svid; + + // Add JWT token to request headers + req.header.set('authorization', `Bearer ${jwtToken}`); + + return await next(req); + }; + + // Create transport with JWT interceptor and TLS using SPIFFE bundle + // For JWT mode: Server presents X.509-SVID via TLS, clients authenticate with JWT-SVID + const transport = createGrpcTransport({ + baseUrl: config.serverAddress, + interceptors: [jwtInterceptor], + nodeOptions: { + ca: this.convertToPEM(bundle, "CERTIFICATE"), + }, + }); + + return transport; + } + + private static async createTLSTransport(config: Config): Promise { + if (config.tlsCaFile === '') { + throw new Error('TLS CA file is required for TLS authentication'); + } + if (config.tlsCertFile === '') { + throw new Error('TLS certificate file is required for TLS authentication'); + } + if (config.tlsKeyFile === '') { + throw new Error('TLS key file is required for TLS authentication'); + } + + let root_ca: string; + let cert_chain: string; + let private_key: string; + + try { + root_ca = readFileSync(config.tlsCaFile).toString(); + cert_chain = readFileSync(config.tlsCertFile).toString(); + private_key = readFileSync(config.tlsKeyFile).toString(); + } catch (e) { + console.error('Error reading file:', (e as Error).message); + throw e; + } + + const transport = createGrpcTransport({ + baseUrl: config.serverAddress, + nodeOptions: { + ca: root_ca, + cert: cert_chain, + key: private_key, + }, + }); + + return transport; + } + /** + * Request generator helper function for streaming requests. + */ + private async *requestGenerator(reqs: T[]): AsyncIterable { + for (const req of reqs) { + yield req; + } + } + + /** + * Push records to the Store API. + * + * Uploads one or more records to the content store, making them available + * for retrieval and reference. Each record is assigned a unique content + * identifier (CID) based on its content hash. + * + * @param records - Array of Record objects to push to the store + * @returns Promise that resolves to an array of RecordRef objects containing the CIDs of the pushed records + * + * @throws {Error} If the gRPC call fails or the push operation fails + * + * @example + * ```typescript + * const records = [createRecord("example")]; + * const refs = await client.push(records); + * console.log(`Pushed with CID: ${refs[0].cid}`); + * ``` + */ + async push( + records: models.core_v1.Record[], + ): Promise { + const responses: models.core_v1.RecordRef[] = []; + + for await (const response of this.storeClient.push( + this.requestGenerator(records), + )) { + responses.push(response); + } + + return responses; + } + + /** + * Push records with referrer metadata to the Store API. + * + * Uploads records along with optional artifacts and referrer information. + * This is useful for pushing complex objects that include additional + * metadata or associated artifacts. + * + * @param requests - Array of PushReferrerRequest objects containing records and optional artifacts + * @returns Promise that resolves to an array of PushReferrerResponse objects containing the details of pushed artifacts + * + * @throws {Error} If the gRPC call fails or the push operation fails + * + * @example + * ```typescript + * const requests = [new models.store_v1.PushReferrerRequest({record: record})]; + * const responses = await client.push_referrer(requests); + * ``` + */ + async push_referrer( + requests: models.store_v1.PushReferrerRequest[], + ): Promise { + const responses: models.store_v1.PushReferrerResponse[] = []; + + for await (const response of this.storeClient.pushReferrer( + this.requestGenerator(requests), + )) { + responses.push(response); + } + + return responses; + } + + /** + * Pull records from the Store API by their references. + * + * Retrieves one or more records from the content store using their + * content identifiers (CIDs). + * + * @param refs - Array of RecordRef objects containing the CIDs to retrieve + * @returns Promise that resolves to an array of Record objects retrieved from the store + * + * @throws {Error} If the gRPC call fails or the pull operation fails + * + * @example + * ```typescript + * const refs = [new models.core_v1.RecordRef({cid: "QmExample123"})]; + * const records = await client.pull(refs); + * for (const record of records) { + * console.log(`Retrieved record: ${record}`); + * } + * ``` + */ + async pull( + refs: models.core_v1.RecordRef[], + ): Promise { + const records: models.core_v1.Record[] = []; + + for await (const response of this.storeClient.pull( + this.requestGenerator(refs), + )) { + records.push(response); + } + + return records; + } + + /** + * Pull records with referrer metadata from the Store API. + * + * Retrieves records along with their associated artifacts and referrer + * information. This provides access to complex objects that include + * additional metadata or associated artifacts. + * + * @param requests - Array of PullReferrerRequest objects containing records and optional artifacts for pull operations + * @returns Promise that resolves to an array of PullReferrerResponse objects containing the retrieved records + * + * @throws {Error} If the gRPC call fails or the pull operation fails + * + * @example + * ```typescript + * const requests = [new models.store_v1.PullReferrerRequest({ref: ref})]; + * const responses = await client.pull_referrer(requests); + * for (const response of responses) { + * console.log(`Retrieved: ${response}`); + * } + * ``` + */ + async pull_referrer( + requests: models.store_v1.PullReferrerRequest[], + ): Promise { + const responses: models.store_v1.PullReferrerResponse[] = []; + + for await (const response of this.storeClient.pullReferrer( + this.requestGenerator(requests), + )) { + responses.push(response); + } + + return responses; + } + + /** + * Search objects from the Store API matching the specified queries. + * + * Performs a search across the storage using the provided search queries + * and returns a list of matching CIDs. This is efficient for lookups + * where only the CIDs are needed. + * + * @param request - SearchCIDsRequest containing queries, filters, and search options + * @returns Promise that resolves to an array of SearchCIDsResponse objects matching the queries + * + * @throws {Error} If the gRPC call fails or the search operation fails + * + * @example + * ```typescript + * const request = create(models.search_v1.SearchCIDsRequestSchema, {queries: [query], limit: 10}); + * const responses = await client.searchCIDs(request); + * for (const response of responses) { + * console.log(`Found CID: ${response.recordCid}`); + * } + * ``` + */ + async searchCIDs( + request: models.search_v1.SearchCIDsRequest, + ): Promise { + const responses: models.search_v1.SearchCIDsResponse[] = []; + + for await (const response of this.searchClient.searchCIDs(request)) { + responses.push(response); + } + + return responses; + } + + /** + * Search for full records from the Store API matching the specified queries. + * + * Performs a search across the storage using the provided search queries + * and returns a list of full records with all metadata. + * + * @param request - SearchRecordsRequest containing queries, filters, and search options + * @returns Promise that resolves to an array of SearchRecordsResponse objects matching the queries + * + * @throws {Error} If the gRPC call fails or the search operation fails + * + * @example + * ```typescript + * const request = create(models.search_v1.SearchRecordsRequestSchema, {queries: [query], limit: 10}); + * const responses = await client.searchRecords(request); + * for (const response of responses) { + * console.log(`Found: ${response.record?.name}`); + * } + * ``` + */ + async searchRecords( + request: models.search_v1.SearchRecordsRequest, + ): Promise { + const responses: models.search_v1.SearchRecordsResponse[] = []; + + for await (const response of this.searchClient.searchRecords(request)) { + responses.push(response); + } + + return responses; + } + + /** + * Look up metadata for records in the Store API. + * + * Retrieves metadata information for one or more records without + * downloading the full record content. This is useful for checking + * if records exist and getting basic information about them. + * + * @param refs - Array of RecordRef objects containing the CIDs to look up + * @returns Promise that resolves to an array of RecordMeta objects containing metadata for the records + * + * @throws {Error} If the gRPC call fails or the lookup operation fails + * + * @example + * ```typescript + * const refs = [new models.core_v1.RecordRef({cid: "QmExample123"})]; + * const metadatas = await client.lookup(refs); + * for (const meta of metadatas) { + * console.log(`Record size: ${meta.size}`); + * } + * ``` + */ + async lookup( + refs: models.core_v1.RecordRef[], + ): Promise { + const recordMetas: models.core_v1.RecordMeta[] = []; + + for await (const response of this.storeClient.lookup( + this.requestGenerator(refs), + )) { + recordMetas.push(response); + } + + return recordMetas; + } + + /** + * List objects from the Routing API matching the specified criteria. + * + * Returns a list of objects that match the filtering and + * query criteria specified in the request. + * + * @param request - ListRequest specifying filtering criteria, pagination, etc. + * @returns Promise that resolves to an array of ListResponse objects matching the criteria + * + * @throws {Error} If the gRPC call fails or the list operation fails + * + * @example + * ```typescript + * const request = new models.routing_v1.ListRequest({limit: 10}); + * const responses = await client.list(request); + * for (const response of responses) { + * console.log(`Found object: ${response.cid}`); + * } + * ``` + */ + async list( + request: models.routing_v1.ListRequest, + ): Promise { + const results: models.routing_v1.ListResponse[] = []; + + for await (const response of this.routingClient.list(request)) { + results.push(response); + } + + return results; + } + + /** + * Publish objects to the Routing API matching the specified criteria. + * + * Makes the specified objects available for discovery and retrieval by other + * clients in the network. The objects must already exist in the store before + * they can be published. + * + * @param request - PublishRequest containing the query for the objects to publish + * @returns Promise that resolves when the publish operation is complete + * + * @throws {Error} If the gRPC call fails or the object cannot be published + * + * @example + * ```typescript + * const ref = new models.routing_v1.RecordRef({cid: "QmExample123"}); + * const request = new models.routing_v1.PublishRequest({recordRefs: [ref]}); + * await client.publish(request); + * ``` + */ + async publish(request: models.routing_v1.PublishRequest): Promise { + await this.routingClient.publish(request); + } + + /** + * Unpublish objects from the Routing API matching the specified criteria. + * + * Removes the specified objects from the public network, making them no + * longer discoverable by other clients. The objects remain in the local + * store but are not available for network discovery. + * + * @param request - UnpublishRequest containing the query for the objects to unpublish + * @returns Promise that resolves when the unpublish operation is complete + * + * @throws {Error} If the gRPC call fails or the objects cannot be unpublished + * + * @example + * ```typescript + * const ref = new models.routing_v1.RecordRef({cid: "QmExample123"}); + * const request = new models.routing_v1.UnpublishRequest({recordRefs: [ref]}); + * await client.unpublish(request); + * ``` + */ + async unpublish(request: models.routing_v1.UnpublishRequest): Promise { + await this.routingClient.unpublish(request); + } + + /** + * Delete records from the Store API. + * + * Permanently removes one or more records from the content store using + * their content identifiers (CIDs). This operation cannot be undone. + * + * @param refs - Array of RecordRef objects containing the CIDs to delete + * @returns Promise that resolves when the deletion is complete + * + * @throws {Error} If the gRPC call fails or the delete operation fails + * + * @example + * ```typescript + * const refs = [new models.core_v1.RecordRef({cid: "QmExample123"})]; + * await client.delete(refs); + * ``` + */ + async delete(refs: models.core_v1.RecordRef[]): Promise { + await this.storeClient.delete(this.requestGenerator(refs)); + } + + /** + * Sign a record with a cryptographic signature. + * + * Creates a cryptographic signature for a record using either a private + * key or OIDC-based signing. The signing process uses the external dirctl + * command-line tool to perform the actual cryptographic operations. + * + * @param req - SignRequest containing the record reference and signing provider + * configuration. The provider can specify either key-based signing + * (with a private key) or OIDC-based signing + * @param oidc_client_id - OIDC client identifier for OIDC-based signing. Defaults to "sigstore" + * @returns SignResponse containing the signature + * + * @throws {Error} If the signing operation fails or unsupported provider is supplied + * + * @example + * ```typescript + * const req = new models.sign_v1.SignRequest({ + * recordRef: new models.core_v1.RecordRef({cid: "QmExample123"}), + * provider: new models.sign_v1.SignProvider({key: keyConfig}) + * }); + * const response = client.sign(req); + * console.log(`Signature: ${response.signature}`); + * ``` + */ + sign(req: models.sign_v1.SignRequest, oidc_client_id = 'sigstore'): void { + + var output; + + switch (req.provider?.request.case) { + case 'oidc': + output = this.__sign_with_oidc( + req.recordRef?.cid || '', + req.provider.request.value, + oidc_client_id, + ); + break; + + case 'key': + output = this.__sign_with_key( + req.recordRef?.cid || '', + req.provider.request.value, + ); + break; + + default: + throw new Error('unsupported provider was supplied'); + } + + if (output.status !== 0) { + throw output.error; + } + } + + /** + * Verify a cryptographic signature on a record. + * + * Validates the cryptographic signature of a previously signed record + * to ensure its authenticity and integrity. This operation verifies + * that the record has not been tampered with since signing. + * + * @param request - VerifyRequest containing the record reference and verification parameters + * @returns Promise that resolves to a VerifyResponse containing the verification result and details + * + * @throws {Error} If the gRPC call fails or the verification operation fails + * + * @example + * ```typescript + * const request = new models.sign_v1.VerifyRequest({ + * recordRef: new models.core_v1.RecordRef({cid: "QmExample123"}) + * }); + * const response = await client.verify(request); + * console.log(`Signature valid: ${response.valid}`); + * ``` + */ + async verify( + request: models.sign_v1.VerifyRequest, + ): Promise { + return await this.signClient.verify(request); + } + + /** + * Create a new synchronization configuration. + * + * Creates a new sync configuration that defines how data should be + * synchronized between different Directory servers. This allows for + * automated data replication and consistency across multiple locations. + * + * @param request - CreateSyncRequest containing the sync configuration details + * including source, target, and synchronization parameters + * @returns Promise that resolves to a CreateSyncResponse containing the created sync details + * including the sync ID and configuration + * + * @throws {Error} If the gRPC call fails or the sync creation fails + * + * @example + * ```typescript + * const request = new models.store_v1.CreateSyncRequest(); + * const response = await client.create_sync(request); + * console.log(`Created sync with ID: ${response.syncId}`); + * ``` + */ + async create_sync( + request: models.store_v1.CreateSyncRequest, + ): Promise { + return await this.syncClient.createSync(request); + } + + /** + * List existing synchronization configurations. + * + * Retrieves a list of all sync configurations that have been created, + * with optional filtering and pagination support. This allows you to + * monitor and manage multiple synchronization processes. + * + * @param request - ListSyncsRequest containing filtering criteria, pagination options, + * and other query parameters + * @returns Promise that resolves to an array of ListSyncsItem objects with + * their details including ID, name, status, and configuration parameters + * + * @throws {Error} If the gRPC call fails or the list operation fails + * + * @example + * ```typescript + * const request = new models.store_v1.ListSyncsRequest({limit: 10}); + * const syncs = await client.list_syncs(request); + * for (const sync of syncs) { + * console.log(`Sync: ${sync}`); + * } + * ``` + */ + async list_syncs( + request: models.store_v1.ListSyncsRequest, + ): Promise { + const results: models.store_v1.ListSyncsItem[] = []; + + for await (const response of this.syncClient.listSyncs(request)) { + results.push(response); + } + + return results; + } + + /** + * Retrieve detailed information about a specific synchronization configuration. + * + * Gets comprehensive details about a specific sync configuration including + * its current status, configuration parameters, performance metrics, + * and any recent errors or warnings. + * + * @param request - GetSyncRequest containing the sync ID or identifier to retrieve + * @returns Promise that resolves to a GetSyncResponse with detailed information about the sync configuration + * including status, metrics, configuration, and logs + * + * @throws {Error} If the gRPC call fails or the get operation fails + * + * @example + * ```typescript + * const request = new models.store_v1.GetSyncRequest({syncId: "sync-123"}); + * const response = await client.get_sync(request); + * console.log(`Sync status: ${response.status}`); + * console.log(`Last update: ${response.lastUpdateTime}`); + * ``` + */ + async get_sync( + request: models.store_v1.GetSyncRequest, + ): Promise { + return await this.syncClient.getSync(request); + } + + /** + * Delete a synchronization configuration. + * + * Permanently removes a sync configuration and stops any ongoing + * synchronization processes. This operation cannot be undone and + * will halt all data synchronization for the specified configuration. + * + * @param request - DeleteSyncRequest containing the sync ID or identifier to delete + * @returns Promise that resolves to a DeleteSyncResponse when the deletion is complete + * + * @throws {Error} If the gRPC call fails or the delete operation fails + * + * @example + * ```typescript + * const request = new models.store_v1.DeleteSyncRequest({syncId: "sync-123"}); + * await client.delete_sync(request); + * console.log("Sync deleted"); + * ``` + */ + async delete_sync( + request: models.store_v1.DeleteSyncRequest, + ): Promise { + return await this.syncClient.deleteSync(request); + } + + /** + * Get events from the Event API matching the specified criteria. + * + * Retrieves a list of events that match the filtering and query criteria + * specified in the request. + * + * @param request - ListenRequest specifying filtering criteria, pagination, etc. + * @returns Promise that resolves to an array of ListenResponse objects matching the criteria + * + * @throws {Error} If the gRPC call fails or the get events operation fails + */ + listen( + request: models.events_v1.ListenRequest + ): AsyncIterable { + return this.eventClient.listen(request); + } + + /** + * CreatePublication creates a new publication request that will be processed by the PublicationWorker. + * The publication request can specify either a query, a list of specific CIDs, + * or all records to be announced to the DHT. + * + * @param request - PublishRequest containing record references and queries options. + * + * @returns CreatePublicationResponse returns the result of creating a publication request. + * This includes the publication ID and any relevant metadata. + * + * @throws {Error} If the gRPC call fails or the list operation fails + */ + async create_publication( + request: models.routing_v1.PublishRequest, + ): Promise { + return await this.publicationClient.createPublication(request); + } + + /** + * ListPublications returns a stream of all publication requests in the system. + * This allows monitoring of pending, processing, and completed publication requests. + * + * @param request - ListPublicationsRequest contains optional filters for listing publication requests. + * + * @returns Promise that resolves to an array of ListPublicationsItem represents + * a single publication request in the list response. + * Contains publication details including ID, status, and creation timestamp. + * + * @throws {Error} If the gRPC call fails or the list operation fails + */ + async list_publication( + request: models.routing_v1.ListPublicationsRequest, + ): Promise { + const results: models.routing_v1.ListPublicationsItem[] = []; + + for await (const response of this.publicationClient.listPublications(request)) { + results.push(response); + } + + return results; + } + + /** + * GetPublication retrieves details of a specific publication request by its identifier. + * This includes the current status and any associated metadata. + * + * @param request - GetPublicationRequest specifies which publication to retrieve by its identifier. + * + * @returns GetPublicationResponse contains the full details of a specific publication request. + * Includes status, progress information, and any error details if applicable. + * + * @throws {Error} If the gRPC call fails or the get operation fails + */ + async get_publication( + request: models.routing_v1.GetPublicationRequest, + ): Promise { + return await this.publicationClient.getPublication(request); + } + + + /** + * Sign a record using a private key. + * + * This private method handles key-based signing by writing the private key + * to a temporary file and executing the dirctl command with the key file. + * + * @param cid - Content identifier of the record to sign + * @param req - SignWithKey request containing the private key + * @returns SignResponse containing the signature + * + * @throws {Error} If any error occurs during signing + * + * @private + */ + private __sign_with_key(cid: string, req: models.sign_v1.SignWithKey): SpawnSyncReturns { + // Write private key to a temporary file + const tmp_key_filename = join(tmpdir(), '.p.key'); + writeFileSync(tmp_key_filename, String(req.privateKey)); + + // Prepare environment for command + const shell_env = env; + shell_env['COSIGN_PASSWORD'] = String(req.password); + + let commandArgs = ["sign", cid, "--key", tmp_key_filename]; + + if (this.config.spiffeEndpointSocket !== '') { + commandArgs.push(...["--spiffe-socket-path", this.config.spiffeEndpointSocket]); + } + + // Execute command + let output = spawnSync( + `${this.config.dirctlPath}`, commandArgs, + { env: { ...shell_env }, encoding: 'utf8', stdio: 'pipe' }, + ); + + return output; + } + + /** + * Sign a record using OIDC-based authentication. + * + * This private method handles OIDC-based signing by building the appropriate + * dirctl command with OIDC parameters and executing it. + * + * @param cid - Content identifier of the record to sign + * @param req - SignWithOIDC request containing the OIDC configuration + * @param oidc_client_id - OIDC client identifier for authentication + * @returns SignResponse containing the signature + * + * @throws {Error} If any error occurs during signing + * + * @private + */ + private __sign_with_oidc( + cid: string, + req: models.sign_v1.SignWithOIDC, + oidc_client_id: string, + ): SpawnSyncReturns { + // Prepare command + let commandArgs = ["sign", cid]; + if (req.idToken !== '') { + commandArgs.push(...["--oidc-token", req.idToken]); + } + if ( + req.options?.oidcProviderUrl !== undefined && + req.options.oidcProviderUrl !== '' + ) { + commandArgs.push(...["--oidc-provider-url", req.options.oidcProviderUrl]); + } + if (req.options?.fulcioUrl !== undefined && req.options.fulcioUrl !== '') { + commandArgs.push(...["--fulcio-url", req.options.fulcioUrl]); + } + if (req.options?.rekorUrl !== undefined && req.options.rekorUrl !== '') { + commandArgs.push(...["--rekor-url", req.options.rekorUrl]); + } + if ( + req.options?.timestampUrl !== undefined && + req.options.timestampUrl !== '' + ) { + commandArgs.push(...["--timestamp-url", req.options.timestampUrl]); + } + + if (this.config.spiffeEndpointSocket !== '') { + commandArgs.push(...["--spiffe-socket-path", this.config.spiffeEndpointSocket]); + } + + // Execute command + let output = spawnSync(`${this.config.dirctlPath}`, commandArgs, { + env: { ...env }, + encoding: 'utf8', + stdio: 'pipe', + }); + + return output; + } +} diff --git a/sdk/dir-js/src/client/index.ts b/sdk/dir-js/src/client/index.ts index e85cba111..a955f059f 100644 --- a/sdk/dir-js/src/client/index.ts +++ b/sdk/dir-js/src/client/index.ts @@ -1,4 +1,4 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -export * from './client'; +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +export * from './client'; diff --git a/sdk/dir-js/src/index.ts b/sdk/dir-js/src/index.ts index 41226f92c..e7f0a070a 100644 --- a/sdk/dir-js/src/index.ts +++ b/sdk/dir-js/src/index.ts @@ -1,5 +1,5 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -export * from './client'; -export * as models from './models'; +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +export * from './client'; +export * as models from './models'; diff --git a/sdk/dir-js/src/models/README.md b/sdk/dir-js/src/models/README.md index fcf29be52..1172a2f70 100644 --- a/sdk/dir-js/src/models/README.md +++ b/sdk/dir-js/src/models/README.md @@ -1,12 +1,12 @@ -# Directory SDK Models - -Directory models are distributed via `buf.build` and generated from Protocol Buffers definitions, -which can become cumbersome to import and use. -This module simplifies the imports and usage of data models needed by Directory APIs. -It re-exports all the models from the generated code into dedicated namespaces so that they can be imported directly from this module. - -For example, instead of importing `RecordMeta` from the generated code, use: - -```js -import { RecordMeta } from 'agntcy-dir/models/core_v1'; -``` +# Directory SDK Models + +Directory models are distributed via `buf.build` and generated from Protocol Buffers definitions, +which can become cumbersome to import and use. +This module simplifies the imports and usage of data models needed by Directory APIs. +It re-exports all the models from the generated code into dedicated namespaces so that they can be imported directly from this module. + +For example, instead of importing `RecordMeta` from the generated code, use: + +```js +import { RecordMeta } from 'agntcy-dir/models/core_v1'; +``` diff --git a/sdk/dir-js/src/models/agntcy/dir/core/v1/record_pb.d.ts b/sdk/dir-js/src/models/agntcy/dir/core/v1/record_pb.d.ts index f43e64f29..0b69eb06f 100644 --- a/sdk/dir-js/src/models/agntcy/dir/core/v1/record_pb.d.ts +++ b/sdk/dir-js/src/models/agntcy/dir/core/v1/record_pb.d.ts @@ -1,156 +1,156 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/core/v1/record.proto (package agntcy.dir.core.v1, syntax proto3) -/* eslint-disable */ - -import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv2"; -import type { JsonObject, Message } from "@bufbuild/protobuf"; - -/** - * Describes the file agntcy/dir/core/v1/record.proto. - */ -export declare const file_agntcy_dir_core_v1_record: GenFile; - -/** - * Defines a reference or a globally unique content identifier of a record. - * - * @generated from message agntcy.dir.core.v1.RecordRef - */ -export declare type RecordRef = Message<"agntcy.dir.core.v1.RecordRef"> & { - /** - * Globally-unique content identifier (CID) of the record. - * Specs: https://github.com/multiformats/cid - * - * @generated from field: string cid = 1; - */ - cid: string; -}; - -/** - * Describes the message agntcy.dir.core.v1.RecordRef. - * Use `create(RecordRefSchema)` to create a new message. - */ -export declare const RecordRefSchema: GenMessage; - -/** - * Defines metadata about a record. - * - * @generated from message agntcy.dir.core.v1.RecordMeta - */ -export declare type RecordMeta = Message<"agntcy.dir.core.v1.RecordMeta"> & { - /** - * CID of the record. - * - * @generated from field: string cid = 1; - */ - cid: string; - - /** - * Annotations attached to the record. - * - * @generated from field: map annotations = 2; - */ - annotations: { [key: string]: string }; - - /** - * Schema version of the record. - * - * @generated from field: string schema_version = 3; - */ - schemaVersion: string; - - /** - * Creation timestamp of the record in the RFC3339 format. - * Specs: https://www.rfc-editor.org/rfc/rfc3339.html - * - * @generated from field: string created_at = 4; - */ - createdAt: string; -}; - -/** - * Describes the message agntcy.dir.core.v1.RecordMeta. - * Use `create(RecordMetaSchema)` to create a new message. - */ -export declare const RecordMetaSchema: GenMessage; - -/** - * Record is a generic object that encapsulates data of different Record types. - * - * Supported schemas: - * - * v0.3.1: https://schema.oasf.outshift.com/0.3.1/objects/agent - * v0.7.0: https://schema.oasf.outshift.com/0.7.0/objects/record - * - * @generated from message agntcy.dir.core.v1.Record - */ -export declare type Record = Message<"agntcy.dir.core.v1.Record"> & { - /** - * @generated from field: google.protobuf.Struct data = 1; - */ - data?: JsonObject; -}; - -/** - * Describes the message agntcy.dir.core.v1.Record. - * Use `create(RecordSchema)` to create a new message. - */ -export declare const RecordSchema: GenMessage; - -/** - * RecordReferrer represents a referrer object or an association - * to a record. The actual structure of the referrer object can vary - * depending on the type of referrer (e.g., signature, public key, etc.). - * - * RecordReferrer types in the `agntcy.dir.` namespace are reserved for - * Directory-specific schemas and will be validated across Dir services. - * - * @generated from message agntcy.dir.core.v1.RecordReferrer - */ -export declare type RecordReferrer = Message<"agntcy.dir.core.v1.RecordReferrer"> & { - /** - * The type of the referrer. - * For example, "agntcy.dir.sign.v1.Signature" for signatures. - * - * @generated from field: string type = 1; - */ - type: string; - - /** - * Record reference to which this referrer is associated. - * - * @generated from field: agntcy.dir.core.v1.RecordRef record_ref = 2; - */ - recordRef?: RecordRef; - - /** - * Annotations attached to the referrer object. - * - * @generated from field: map annotations = 3; - */ - annotations: { [key: string]: string }; - - /** - * Creation timestamp of the record in the RFC3339 format. - * Specs: https://www.rfc-editor.org/rfc/rfc3339.html - * - * @generated from field: string created_at = 4; - */ - createdAt: string; - - /** - * The actual data of the referrer. - * - * @generated from field: google.protobuf.Struct data = 5; - */ - data?: JsonObject; -}; - -/** - * Describes the message agntcy.dir.core.v1.RecordReferrer. - * Use `create(RecordReferrerSchema)` to create a new message. - */ -export declare const RecordReferrerSchema: GenMessage; - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/core/v1/record.proto (package agntcy.dir.core.v1, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv2"; +import type { JsonObject, Message } from "@bufbuild/protobuf"; + +/** + * Describes the file agntcy/dir/core/v1/record.proto. + */ +export declare const file_agntcy_dir_core_v1_record: GenFile; + +/** + * Defines a reference or a globally unique content identifier of a record. + * + * @generated from message agntcy.dir.core.v1.RecordRef + */ +export declare type RecordRef = Message<"agntcy.dir.core.v1.RecordRef"> & { + /** + * Globally-unique content identifier (CID) of the record. + * Specs: https://github.com/multiformats/cid + * + * @generated from field: string cid = 1; + */ + cid: string; +}; + +/** + * Describes the message agntcy.dir.core.v1.RecordRef. + * Use `create(RecordRefSchema)` to create a new message. + */ +export declare const RecordRefSchema: GenMessage; + +/** + * Defines metadata about a record. + * + * @generated from message agntcy.dir.core.v1.RecordMeta + */ +export declare type RecordMeta = Message<"agntcy.dir.core.v1.RecordMeta"> & { + /** + * CID of the record. + * + * @generated from field: string cid = 1; + */ + cid: string; + + /** + * Annotations attached to the record. + * + * @generated from field: map annotations = 2; + */ + annotations: { [key: string]: string }; + + /** + * Schema version of the record. + * + * @generated from field: string schema_version = 3; + */ + schemaVersion: string; + + /** + * Creation timestamp of the record in the RFC3339 format. + * Specs: https://www.rfc-editor.org/rfc/rfc3339.html + * + * @generated from field: string created_at = 4; + */ + createdAt: string; +}; + +/** + * Describes the message agntcy.dir.core.v1.RecordMeta. + * Use `create(RecordMetaSchema)` to create a new message. + */ +export declare const RecordMetaSchema: GenMessage; + +/** + * Record is a generic object that encapsulates data of different Record types. + * + * Supported schemas: + * + * v0.3.1: https://schema.oasf.outshift.com/0.3.1/objects/agent + * v0.7.0: https://schema.oasf.outshift.com/0.7.0/objects/record + * + * @generated from message agntcy.dir.core.v1.Record + */ +export declare type Record = Message<"agntcy.dir.core.v1.Record"> & { + /** + * @generated from field: google.protobuf.Struct data = 1; + */ + data?: JsonObject; +}; + +/** + * Describes the message agntcy.dir.core.v1.Record. + * Use `create(RecordSchema)` to create a new message. + */ +export declare const RecordSchema: GenMessage; + +/** + * RecordReferrer represents a referrer object or an association + * to a record. The actual structure of the referrer object can vary + * depending on the type of referrer (e.g., signature, public key, etc.). + * + * RecordReferrer types in the `agntcy.dir.` namespace are reserved for + * Directory-specific schemas and will be validated across Dir services. + * + * @generated from message agntcy.dir.core.v1.RecordReferrer + */ +export declare type RecordReferrer = Message<"agntcy.dir.core.v1.RecordReferrer"> & { + /** + * The type of the referrer. + * For example, "agntcy.dir.sign.v1.Signature" for signatures. + * + * @generated from field: string type = 1; + */ + type: string; + + /** + * Record reference to which this referrer is associated. + * + * @generated from field: agntcy.dir.core.v1.RecordRef record_ref = 2; + */ + recordRef?: RecordRef; + + /** + * Annotations attached to the referrer object. + * + * @generated from field: map annotations = 3; + */ + annotations: { [key: string]: string }; + + /** + * Creation timestamp of the record in the RFC3339 format. + * Specs: https://www.rfc-editor.org/rfc/rfc3339.html + * + * @generated from field: string created_at = 4; + */ + createdAt: string; + + /** + * The actual data of the referrer. + * + * @generated from field: google.protobuf.Struct data = 5; + */ + data?: JsonObject; +}; + +/** + * Describes the message agntcy.dir.core.v1.RecordReferrer. + * Use `create(RecordReferrerSchema)` to create a new message. + */ +export declare const RecordReferrerSchema: GenMessage; + diff --git a/sdk/dir-js/src/models/agntcy/dir/core/v1/record_pb.js b/sdk/dir-js/src/models/agntcy/dir/core/v1/record_pb.js index 3ba174eab..49dbd2e65 100644 --- a/sdk/dir-js/src/models/agntcy/dir/core/v1/record_pb.js +++ b/sdk/dir-js/src/models/agntcy/dir/core/v1/record_pb.js @@ -1,44 +1,44 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/core/v1/record.proto (package agntcy.dir.core.v1, syntax proto3) -/* eslint-disable */ - -import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv2"; -import { file_google_protobuf_struct } from "@bufbuild/protobuf/wkt"; - -/** - * Describes the file agntcy/dir/core/v1/record.proto. - */ -export const file_agntcy_dir_core_v1_record = /*@__PURE__*/ - fileDesc("Ch9hZ250Y3kvZGlyL2NvcmUvdjEvcmVjb3JkLnByb3RvEhJhZ250Y3kuZGlyLmNvcmUudjEiGAoJUmVjb3JkUmVmEgsKA2NpZBgBIAEoCSK/AQoKUmVjb3JkTWV0YRILCgNjaWQYASABKAkSRAoLYW5ub3RhdGlvbnMYAiADKAsyLy5hZ250Y3kuZGlyLmNvcmUudjEuUmVjb3JkTWV0YS5Bbm5vdGF0aW9uc0VudHJ5EhYKDnNjaGVtYV92ZXJzaW9uGAMgASgJEhIKCmNyZWF0ZWRfYXQYBCABKAkaMgoQQW5ub3RhdGlvbnNFbnRyeRILCgNrZXkYASABKAkSDQoFdmFsdWUYAiABKAk6AjgBIi8KBlJlY29yZBIlCgRkYXRhGAEgASgLMhcuZ29vZ2xlLnByb3RvYnVmLlN0cnVjdCKKAgoOUmVjb3JkUmVmZXJyZXISDAoEdHlwZRgBIAEoCRIxCgpyZWNvcmRfcmVmGAIgASgLMh0uYWdudGN5LmRpci5jb3JlLnYxLlJlY29yZFJlZhJICgthbm5vdGF0aW9ucxgDIAMoCzIzLmFnbnRjeS5kaXIuY29yZS52MS5SZWNvcmRSZWZlcnJlci5Bbm5vdGF0aW9uc0VudHJ5EhIKCmNyZWF0ZWRfYXQYBCABKAkSJQoEZGF0YRgFIAEoCzIXLmdvb2dsZS5wcm90b2J1Zi5TdHJ1Y3QaMgoQQW5ub3RhdGlvbnNFbnRyeRILCgNrZXkYASABKAkSDQoFdmFsdWUYAiABKAk6AjgBQrMBChZjb20uYWdudGN5LmRpci5jb3JlLnYxQgtSZWNvcmRQcm90b1ABWiFnaXRodWIuY29tL2FnbnRjeS9kaXIvYXBpL2NvcmUvdjGiAgNBREOqAhJBZ250Y3kuRGlyLkNvcmUuVjHKAhJBZ250Y3lcRGlyXENvcmVcVjHiAh5BZ250Y3lcRGlyXENvcmVcVjFcR1BCTWV0YWRhdGHqAhVBZ250Y3k6OkRpcjo6Q29yZTo6VjFiBnByb3RvMw", [file_google_protobuf_struct]); - -/** - * Describes the message agntcy.dir.core.v1.RecordRef. - * Use `create(RecordRefSchema)` to create a new message. - */ -export const RecordRefSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_core_v1_record, 0); - -/** - * Describes the message agntcy.dir.core.v1.RecordMeta. - * Use `create(RecordMetaSchema)` to create a new message. - */ -export const RecordMetaSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_core_v1_record, 1); - -/** - * Describes the message agntcy.dir.core.v1.Record. - * Use `create(RecordSchema)` to create a new message. - */ -export const RecordSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_core_v1_record, 2); - -/** - * Describes the message agntcy.dir.core.v1.RecordReferrer. - * Use `create(RecordReferrerSchema)` to create a new message. - */ -export const RecordReferrerSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_core_v1_record, 3); - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/core/v1/record.proto (package agntcy.dir.core.v1, syntax proto3) +/* eslint-disable */ + +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv2"; +import { file_google_protobuf_struct } from "@bufbuild/protobuf/wkt"; + +/** + * Describes the file agntcy/dir/core/v1/record.proto. + */ +export const file_agntcy_dir_core_v1_record = /*@__PURE__*/ + fileDesc("Ch9hZ250Y3kvZGlyL2NvcmUvdjEvcmVjb3JkLnByb3RvEhJhZ250Y3kuZGlyLmNvcmUudjEiGAoJUmVjb3JkUmVmEgsKA2NpZBgBIAEoCSK/AQoKUmVjb3JkTWV0YRILCgNjaWQYASABKAkSRAoLYW5ub3RhdGlvbnMYAiADKAsyLy5hZ250Y3kuZGlyLmNvcmUudjEuUmVjb3JkTWV0YS5Bbm5vdGF0aW9uc0VudHJ5EhYKDnNjaGVtYV92ZXJzaW9uGAMgASgJEhIKCmNyZWF0ZWRfYXQYBCABKAkaMgoQQW5ub3RhdGlvbnNFbnRyeRILCgNrZXkYASABKAkSDQoFdmFsdWUYAiABKAk6AjgBIi8KBlJlY29yZBIlCgRkYXRhGAEgASgLMhcuZ29vZ2xlLnByb3RvYnVmLlN0cnVjdCKKAgoOUmVjb3JkUmVmZXJyZXISDAoEdHlwZRgBIAEoCRIxCgpyZWNvcmRfcmVmGAIgASgLMh0uYWdudGN5LmRpci5jb3JlLnYxLlJlY29yZFJlZhJICgthbm5vdGF0aW9ucxgDIAMoCzIzLmFnbnRjeS5kaXIuY29yZS52MS5SZWNvcmRSZWZlcnJlci5Bbm5vdGF0aW9uc0VudHJ5EhIKCmNyZWF0ZWRfYXQYBCABKAkSJQoEZGF0YRgFIAEoCzIXLmdvb2dsZS5wcm90b2J1Zi5TdHJ1Y3QaMgoQQW5ub3RhdGlvbnNFbnRyeRILCgNrZXkYASABKAkSDQoFdmFsdWUYAiABKAk6AjgBQrMBChZjb20uYWdudGN5LmRpci5jb3JlLnYxQgtSZWNvcmRQcm90b1ABWiFnaXRodWIuY29tL2FnbnRjeS9kaXIvYXBpL2NvcmUvdjGiAgNBREOqAhJBZ250Y3kuRGlyLkNvcmUuVjHKAhJBZ250Y3lcRGlyXENvcmVcVjHiAh5BZ250Y3lcRGlyXENvcmVcVjFcR1BCTWV0YWRhdGHqAhVBZ250Y3k6OkRpcjo6Q29yZTo6VjFiBnByb3RvMw", [file_google_protobuf_struct]); + +/** + * Describes the message agntcy.dir.core.v1.RecordRef. + * Use `create(RecordRefSchema)` to create a new message. + */ +export const RecordRefSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_core_v1_record, 0); + +/** + * Describes the message agntcy.dir.core.v1.RecordMeta. + * Use `create(RecordMetaSchema)` to create a new message. + */ +export const RecordMetaSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_core_v1_record, 1); + +/** + * Describes the message agntcy.dir.core.v1.Record. + * Use `create(RecordSchema)` to create a new message. + */ +export const RecordSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_core_v1_record, 2); + +/** + * Describes the message agntcy.dir.core.v1.RecordReferrer. + * Use `create(RecordReferrerSchema)` to create a new message. + */ +export const RecordReferrerSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_core_v1_record, 3); + diff --git a/sdk/dir-js/src/models/agntcy/dir/events/v1/event_service_pb.d.ts b/sdk/dir-js/src/models/agntcy/dir/events/v1/event_service_pb.d.ts index 9e4e20987..db83805b1 100644 --- a/sdk/dir-js/src/models/agntcy/dir/events/v1/event_service_pb.d.ts +++ b/sdk/dir-js/src/models/agntcy/dir/events/v1/event_service_pb.d.ts @@ -1,242 +1,242 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/events/v1/event_service.proto (package agntcy.dir.events.v1, syntax proto3) -/* eslint-disable */ - -import type { GenEnum, GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv2"; -import type { Message } from "@bufbuild/protobuf"; -import type { Timestamp } from "@bufbuild/protobuf/wkt"; - -/** - * Describes the file agntcy/dir/events/v1/event_service.proto. - */ -export declare const file_agntcy_dir_events_v1_event_service: GenFile; - -/** - * ListenRequest specifies filters for event subscription. - * - * @generated from message agntcy.dir.events.v1.ListenRequest - */ -export declare type ListenRequest = Message<"agntcy.dir.events.v1.ListenRequest"> & { - /** - * Event types to subscribe to. - * If empty, subscribes to all event types. - * - * @generated from field: repeated agntcy.dir.events.v1.EventType event_types = 1; - */ - eventTypes: EventType[]; - - /** - * Optional label filters (e.g., "/skills/AI", "/domains/research"). - * Only events for records matching these labels are delivered. - * Uses substring matching. - * - * @generated from field: repeated string label_filters = 2; - */ - labelFilters: string[]; - - /** - * Optional CID filters. - * Only events for specific CIDs are delivered. - * - * @generated from field: repeated string cid_filters = 3; - */ - cidFilters: string[]; -}; - -/** - * Describes the message agntcy.dir.events.v1.ListenRequest. - * Use `create(ListenRequestSchema)` to create a new message. - */ -export declare const ListenRequestSchema: GenMessage; - -/** - * ListenResponse is the response message for the Listen RPC. - * Wraps the Event message to allow for future extensions without breaking the Event structure. - * - * @generated from message agntcy.dir.events.v1.ListenResponse - */ -export declare type ListenResponse = Message<"agntcy.dir.events.v1.ListenResponse"> & { - /** - * The event that occurred. - * - * @generated from field: agntcy.dir.events.v1.Event event = 1; - */ - event?: Event; -}; - -/** - * Describes the message agntcy.dir.events.v1.ListenResponse. - * Use `create(ListenResponseSchema)` to create a new message. - */ -export declare const ListenResponseSchema: GenMessage; - -/** - * Event represents a system event that occurred. - * - * @generated from message agntcy.dir.events.v1.Event - */ -export declare type Event = Message<"agntcy.dir.events.v1.Event"> & { - /** - * Unique event identifier (generated by the system). - * - * @generated from field: string id = 1; - */ - id: string; - - /** - * Type of event that occurred. - * - * @generated from field: agntcy.dir.events.v1.EventType type = 2; - */ - type: EventType; - - /** - * When the event occurred. - * - * @generated from field: google.protobuf.Timestamp timestamp = 3; - */ - timestamp?: Timestamp; - - /** - * Resource identifier (CID for records, sync_id for syncs, etc.). - * - * @generated from field: string resource_id = 4; - */ - resourceId: string; - - /** - * Optional labels associated with the record (for record events). - * - * @generated from field: repeated string labels = 5; - */ - labels: string[]; - - /** - * Optional metadata for additional context. - * Used for flexible event-specific data that doesn't fit standard fields. - * - * @generated from field: map metadata = 7; - */ - metadata: { [key: string]: string }; -}; - -/** - * Describes the message agntcy.dir.events.v1.Event. - * Use `create(EventSchema)` to create a new message. - */ -export declare const EventSchema: GenMessage; - -/** - * EventType represents all valid event types in the system. - * Each value represents a specific operation that can occur. - * - * Supported Events: - * - Store: RECORD_PUSHED, RECORD_PULLED, RECORD_DELETED - * - Routing: RECORD_PUBLISHED, RECORD_UNPUBLISHED - * - Sync: SYNC_CREATED, SYNC_COMPLETED, SYNC_FAILED - * - Sign: RECORD_SIGNED - * - * @generated from enum agntcy.dir.events.v1.EventType - */ -export enum EventType { - /** - * Unknown/unspecified event type. - * - * @generated from enum value: EVENT_TYPE_UNSPECIFIED = 0; - */ - UNSPECIFIED = 0, - - /** - * A record was pushed to local storage. - * - * @generated from enum value: EVENT_TYPE_RECORD_PUSHED = 1; - */ - RECORD_PUSHED = 1, - - /** - * A record was pulled from storage. - * - * @generated from enum value: EVENT_TYPE_RECORD_PULLED = 2; - */ - RECORD_PULLED = 2, - - /** - * A record was deleted from storage. - * - * @generated from enum value: EVENT_TYPE_RECORD_DELETED = 3; - */ - RECORD_DELETED = 3, - - /** - * A record was published/announced to the network. - * - * @generated from enum value: EVENT_TYPE_RECORD_PUBLISHED = 4; - */ - RECORD_PUBLISHED = 4, - - /** - * A record was unpublished from the network. - * - * @generated from enum value: EVENT_TYPE_RECORD_UNPUBLISHED = 5; - */ - RECORD_UNPUBLISHED = 5, - - /** - * A sync operation was created/initiated. - * - * @generated from enum value: EVENT_TYPE_SYNC_CREATED = 6; - */ - SYNC_CREATED = 6, - - /** - * A sync operation completed successfully. - * - * @generated from enum value: EVENT_TYPE_SYNC_COMPLETED = 7; - */ - SYNC_COMPLETED = 7, - - /** - * A sync operation failed. - * - * @generated from enum value: EVENT_TYPE_SYNC_FAILED = 8; - */ - SYNC_FAILED = 8, - - /** - * A record was signed. - * - * @generated from enum value: EVENT_TYPE_RECORD_SIGNED = 9; - */ - RECORD_SIGNED = 9, -} - -/** - * Describes the enum agntcy.dir.events.v1.EventType. - */ -export declare const EventTypeSchema: GenEnum; - -/** - * EventService provides real-time event streaming for all system operations. - * Events are delivered from subscription time forward with no history or replay. - * This service enables external applications to react to system changes in real-time. - * - * @generated from service agntcy.dir.events.v1.EventService - */ -export declare const EventService: GenService<{ - /** - * Listen establishes a streaming connection to receive events. - * Events are only delivered while the stream is active. - * On disconnect, missed events are not recoverable. - * - * @generated from rpc agntcy.dir.events.v1.EventService.Listen - */ - listen: { - methodKind: "server_streaming"; - input: typeof ListenRequestSchema; - output: typeof ListenResponseSchema; - }, -}>; - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/events/v1/event_service.proto (package agntcy.dir.events.v1, syntax proto3) +/* eslint-disable */ + +import type { GenEnum, GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv2"; +import type { Message } from "@bufbuild/protobuf"; +import type { Timestamp } from "@bufbuild/protobuf/wkt"; + +/** + * Describes the file agntcy/dir/events/v1/event_service.proto. + */ +export declare const file_agntcy_dir_events_v1_event_service: GenFile; + +/** + * ListenRequest specifies filters for event subscription. + * + * @generated from message agntcy.dir.events.v1.ListenRequest + */ +export declare type ListenRequest = Message<"agntcy.dir.events.v1.ListenRequest"> & { + /** + * Event types to subscribe to. + * If empty, subscribes to all event types. + * + * @generated from field: repeated agntcy.dir.events.v1.EventType event_types = 1; + */ + eventTypes: EventType[]; + + /** + * Optional label filters (e.g., "/skills/AI", "/domains/research"). + * Only events for records matching these labels are delivered. + * Uses substring matching. + * + * @generated from field: repeated string label_filters = 2; + */ + labelFilters: string[]; + + /** + * Optional CID filters. + * Only events for specific CIDs are delivered. + * + * @generated from field: repeated string cid_filters = 3; + */ + cidFilters: string[]; +}; + +/** + * Describes the message agntcy.dir.events.v1.ListenRequest. + * Use `create(ListenRequestSchema)` to create a new message. + */ +export declare const ListenRequestSchema: GenMessage; + +/** + * ListenResponse is the response message for the Listen RPC. + * Wraps the Event message to allow for future extensions without breaking the Event structure. + * + * @generated from message agntcy.dir.events.v1.ListenResponse + */ +export declare type ListenResponse = Message<"agntcy.dir.events.v1.ListenResponse"> & { + /** + * The event that occurred. + * + * @generated from field: agntcy.dir.events.v1.Event event = 1; + */ + event?: Event; +}; + +/** + * Describes the message agntcy.dir.events.v1.ListenResponse. + * Use `create(ListenResponseSchema)` to create a new message. + */ +export declare const ListenResponseSchema: GenMessage; + +/** + * Event represents a system event that occurred. + * + * @generated from message agntcy.dir.events.v1.Event + */ +export declare type Event = Message<"agntcy.dir.events.v1.Event"> & { + /** + * Unique event identifier (generated by the system). + * + * @generated from field: string id = 1; + */ + id: string; + + /** + * Type of event that occurred. + * + * @generated from field: agntcy.dir.events.v1.EventType type = 2; + */ + type: EventType; + + /** + * When the event occurred. + * + * @generated from field: google.protobuf.Timestamp timestamp = 3; + */ + timestamp?: Timestamp; + + /** + * Resource identifier (CID for records, sync_id for syncs, etc.). + * + * @generated from field: string resource_id = 4; + */ + resourceId: string; + + /** + * Optional labels associated with the record (for record events). + * + * @generated from field: repeated string labels = 5; + */ + labels: string[]; + + /** + * Optional metadata for additional context. + * Used for flexible event-specific data that doesn't fit standard fields. + * + * @generated from field: map metadata = 7; + */ + metadata: { [key: string]: string }; +}; + +/** + * Describes the message agntcy.dir.events.v1.Event. + * Use `create(EventSchema)` to create a new message. + */ +export declare const EventSchema: GenMessage; + +/** + * EventType represents all valid event types in the system. + * Each value represents a specific operation that can occur. + * + * Supported Events: + * - Store: RECORD_PUSHED, RECORD_PULLED, RECORD_DELETED + * - Routing: RECORD_PUBLISHED, RECORD_UNPUBLISHED + * - Sync: SYNC_CREATED, SYNC_COMPLETED, SYNC_FAILED + * - Sign: RECORD_SIGNED + * + * @generated from enum agntcy.dir.events.v1.EventType + */ +export enum EventType { + /** + * Unknown/unspecified event type. + * + * @generated from enum value: EVENT_TYPE_UNSPECIFIED = 0; + */ + UNSPECIFIED = 0, + + /** + * A record was pushed to local storage. + * + * @generated from enum value: EVENT_TYPE_RECORD_PUSHED = 1; + */ + RECORD_PUSHED = 1, + + /** + * A record was pulled from storage. + * + * @generated from enum value: EVENT_TYPE_RECORD_PULLED = 2; + */ + RECORD_PULLED = 2, + + /** + * A record was deleted from storage. + * + * @generated from enum value: EVENT_TYPE_RECORD_DELETED = 3; + */ + RECORD_DELETED = 3, + + /** + * A record was published/announced to the network. + * + * @generated from enum value: EVENT_TYPE_RECORD_PUBLISHED = 4; + */ + RECORD_PUBLISHED = 4, + + /** + * A record was unpublished from the network. + * + * @generated from enum value: EVENT_TYPE_RECORD_UNPUBLISHED = 5; + */ + RECORD_UNPUBLISHED = 5, + + /** + * A sync operation was created/initiated. + * + * @generated from enum value: EVENT_TYPE_SYNC_CREATED = 6; + */ + SYNC_CREATED = 6, + + /** + * A sync operation completed successfully. + * + * @generated from enum value: EVENT_TYPE_SYNC_COMPLETED = 7; + */ + SYNC_COMPLETED = 7, + + /** + * A sync operation failed. + * + * @generated from enum value: EVENT_TYPE_SYNC_FAILED = 8; + */ + SYNC_FAILED = 8, + + /** + * A record was signed. + * + * @generated from enum value: EVENT_TYPE_RECORD_SIGNED = 9; + */ + RECORD_SIGNED = 9, +} + +/** + * Describes the enum agntcy.dir.events.v1.EventType. + */ +export declare const EventTypeSchema: GenEnum; + +/** + * EventService provides real-time event streaming for all system operations. + * Events are delivered from subscription time forward with no history or replay. + * This service enables external applications to react to system changes in real-time. + * + * @generated from service agntcy.dir.events.v1.EventService + */ +export declare const EventService: GenService<{ + /** + * Listen establishes a streaming connection to receive events. + * Events are only delivered while the stream is active. + * On disconnect, missed events are not recoverable. + * + * @generated from rpc agntcy.dir.events.v1.EventService.Listen + */ + listen: { + methodKind: "server_streaming"; + input: typeof ListenRequestSchema; + output: typeof ListenResponseSchema; + }, +}>; + diff --git a/sdk/dir-js/src/models/agntcy/dir/events/v1/event_service_pb.js b/sdk/dir-js/src/models/agntcy/dir/events/v1/event_service_pb.js index 6d48778f7..072aaee05 100644 --- a/sdk/dir-js/src/models/agntcy/dir/events/v1/event_service_pb.js +++ b/sdk/dir-js/src/models/agntcy/dir/events/v1/event_service_pb.js @@ -1,68 +1,68 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/events/v1/event_service.proto (package agntcy.dir.events.v1, syntax proto3) -/* eslint-disable */ - -import { enumDesc, fileDesc, messageDesc, serviceDesc, tsEnum } from "@bufbuild/protobuf/codegenv2"; -import { file_google_protobuf_timestamp } from "@bufbuild/protobuf/wkt"; - -/** - * Describes the file agntcy/dir/events/v1/event_service.proto. - */ -export const file_agntcy_dir_events_v1_event_service = /*@__PURE__*/ - fileDesc("CihhZ250Y3kvZGlyL2V2ZW50cy92MS9ldmVudF9zZXJ2aWNlLnByb3RvEhRhZ250Y3kuZGlyLmV2ZW50cy52MSJxCg1MaXN0ZW5SZXF1ZXN0EjQKC2V2ZW50X3R5cGVzGAEgAygOMh8uYWdudGN5LmRpci5ldmVudHMudjEuRXZlbnRUeXBlEhUKDWxhYmVsX2ZpbHRlcnMYAiADKAkSEwoLY2lkX2ZpbHRlcnMYAyADKAkiPAoOTGlzdGVuUmVzcG9uc2USKgoFZXZlbnQYASABKAsyGy5hZ250Y3kuZGlyLmV2ZW50cy52MS5FdmVudCKEAgoFRXZlbnQSCgoCaWQYASABKAkSLQoEdHlwZRgCIAEoDjIfLmFnbnRjeS5kaXIuZXZlbnRzLnYxLkV2ZW50VHlwZRItCgl0aW1lc3RhbXAYAyABKAsyGi5nb29nbGUucHJvdG9idWYuVGltZXN0YW1wEhMKC3Jlc291cmNlX2lkGAQgASgJEg4KBmxhYmVscxgFIAMoCRI7CghtZXRhZGF0YRgHIAMoCzIpLmFnbnRjeS5kaXIuZXZlbnRzLnYxLkV2ZW50Lk1ldGFkYXRhRW50cnkaLwoNTWV0YWRhdGFFbnRyeRILCgNrZXkYASABKAkSDQoFdmFsdWUYAiABKAk6AjgBKrwCCglFdmVudFR5cGUSGgoWRVZFTlRfVFlQRV9VTlNQRUNJRklFRBAAEhwKGEVWRU5UX1RZUEVfUkVDT1JEX1BVU0hFRBABEhwKGEVWRU5UX1RZUEVfUkVDT1JEX1BVTExFRBACEh0KGUVWRU5UX1RZUEVfUkVDT1JEX0RFTEVURUQQAxIfChtFVkVOVF9UWVBFX1JFQ09SRF9QVUJMSVNIRUQQBBIhCh1FVkVOVF9UWVBFX1JFQ09SRF9VTlBVQkxJU0hFRBAFEhsKF0VWRU5UX1RZUEVfU1lOQ19DUkVBVEVEEAYSHQoZRVZFTlRfVFlQRV9TWU5DX0NPTVBMRVRFRBAHEhoKFkVWRU5UX1RZUEVfU1lOQ19GQUlMRUQQCBIcChhFVkVOVF9UWVBFX1JFQ09SRF9TSUdORUQQCTJlCgxFdmVudFNlcnZpY2USVQoGTGlzdGVuEiMuYWdudGN5LmRpci5ldmVudHMudjEuTGlzdGVuUmVxdWVzdBokLmFnbnRjeS5kaXIuZXZlbnRzLnYxLkxpc3RlblJlc3BvbnNlMAFCxQEKGGNvbS5hZ250Y3kuZGlyLmV2ZW50cy52MUIRRXZlbnRTZXJ2aWNlUHJvdG9QAVojZ2l0aHViLmNvbS9hZ250Y3kvZGlyL2FwaS9ldmVudHMvdjGiAgNBREWqAhRBZ250Y3kuRGlyLkV2ZW50cy5WMcoCFEFnbnRjeVxEaXJcRXZlbnRzXFYx4gIgQWdudGN5XERpclxFdmVudHNcVjFcR1BCTWV0YWRhdGHqAhdBZ250Y3k6OkRpcjo6RXZlbnRzOjpWMWIGcHJvdG8z", [file_google_protobuf_timestamp]); - -/** - * Describes the message agntcy.dir.events.v1.ListenRequest. - * Use `create(ListenRequestSchema)` to create a new message. - */ -export const ListenRequestSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_events_v1_event_service, 0); - -/** - * Describes the message agntcy.dir.events.v1.ListenResponse. - * Use `create(ListenResponseSchema)` to create a new message. - */ -export const ListenResponseSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_events_v1_event_service, 1); - -/** - * Describes the message agntcy.dir.events.v1.Event. - * Use `create(EventSchema)` to create a new message. - */ -export const EventSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_events_v1_event_service, 2); - -/** - * Describes the enum agntcy.dir.events.v1.EventType. - */ -export const EventTypeSchema = /*@__PURE__*/ - enumDesc(file_agntcy_dir_events_v1_event_service, 0); - -/** - * EventType represents all valid event types in the system. - * Each value represents a specific operation that can occur. - * - * Supported Events: - * - Store: RECORD_PUSHED, RECORD_PULLED, RECORD_DELETED - * - Routing: RECORD_PUBLISHED, RECORD_UNPUBLISHED - * - Sync: SYNC_CREATED, SYNC_COMPLETED, SYNC_FAILED - * - Sign: RECORD_SIGNED - * - * @generated from enum agntcy.dir.events.v1.EventType - */ -export const EventType = /*@__PURE__*/ - tsEnum(EventTypeSchema); - -/** - * EventService provides real-time event streaming for all system operations. - * Events are delivered from subscription time forward with no history or replay. - * This service enables external applications to react to system changes in real-time. - * - * @generated from service agntcy.dir.events.v1.EventService - */ -export const EventService = /*@__PURE__*/ - serviceDesc(file_agntcy_dir_events_v1_event_service, 0); - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/events/v1/event_service.proto (package agntcy.dir.events.v1, syntax proto3) +/* eslint-disable */ + +import { enumDesc, fileDesc, messageDesc, serviceDesc, tsEnum } from "@bufbuild/protobuf/codegenv2"; +import { file_google_protobuf_timestamp } from "@bufbuild/protobuf/wkt"; + +/** + * Describes the file agntcy/dir/events/v1/event_service.proto. + */ +export const file_agntcy_dir_events_v1_event_service = /*@__PURE__*/ + fileDesc("CihhZ250Y3kvZGlyL2V2ZW50cy92MS9ldmVudF9zZXJ2aWNlLnByb3RvEhRhZ250Y3kuZGlyLmV2ZW50cy52MSJxCg1MaXN0ZW5SZXF1ZXN0EjQKC2V2ZW50X3R5cGVzGAEgAygOMh8uYWdudGN5LmRpci5ldmVudHMudjEuRXZlbnRUeXBlEhUKDWxhYmVsX2ZpbHRlcnMYAiADKAkSEwoLY2lkX2ZpbHRlcnMYAyADKAkiPAoOTGlzdGVuUmVzcG9uc2USKgoFZXZlbnQYASABKAsyGy5hZ250Y3kuZGlyLmV2ZW50cy52MS5FdmVudCKEAgoFRXZlbnQSCgoCaWQYASABKAkSLQoEdHlwZRgCIAEoDjIfLmFnbnRjeS5kaXIuZXZlbnRzLnYxLkV2ZW50VHlwZRItCgl0aW1lc3RhbXAYAyABKAsyGi5nb29nbGUucHJvdG9idWYuVGltZXN0YW1wEhMKC3Jlc291cmNlX2lkGAQgASgJEg4KBmxhYmVscxgFIAMoCRI7CghtZXRhZGF0YRgHIAMoCzIpLmFnbnRjeS5kaXIuZXZlbnRzLnYxLkV2ZW50Lk1ldGFkYXRhRW50cnkaLwoNTWV0YWRhdGFFbnRyeRILCgNrZXkYASABKAkSDQoFdmFsdWUYAiABKAk6AjgBKrwCCglFdmVudFR5cGUSGgoWRVZFTlRfVFlQRV9VTlNQRUNJRklFRBAAEhwKGEVWRU5UX1RZUEVfUkVDT1JEX1BVU0hFRBABEhwKGEVWRU5UX1RZUEVfUkVDT1JEX1BVTExFRBACEh0KGUVWRU5UX1RZUEVfUkVDT1JEX0RFTEVURUQQAxIfChtFVkVOVF9UWVBFX1JFQ09SRF9QVUJMSVNIRUQQBBIhCh1FVkVOVF9UWVBFX1JFQ09SRF9VTlBVQkxJU0hFRBAFEhsKF0VWRU5UX1RZUEVfU1lOQ19DUkVBVEVEEAYSHQoZRVZFTlRfVFlQRV9TWU5DX0NPTVBMRVRFRBAHEhoKFkVWRU5UX1RZUEVfU1lOQ19GQUlMRUQQCBIcChhFVkVOVF9UWVBFX1JFQ09SRF9TSUdORUQQCTJlCgxFdmVudFNlcnZpY2USVQoGTGlzdGVuEiMuYWdudGN5LmRpci5ldmVudHMudjEuTGlzdGVuUmVxdWVzdBokLmFnbnRjeS5kaXIuZXZlbnRzLnYxLkxpc3RlblJlc3BvbnNlMAFCxQEKGGNvbS5hZ250Y3kuZGlyLmV2ZW50cy52MUIRRXZlbnRTZXJ2aWNlUHJvdG9QAVojZ2l0aHViLmNvbS9hZ250Y3kvZGlyL2FwaS9ldmVudHMvdjGiAgNBREWqAhRBZ250Y3kuRGlyLkV2ZW50cy5WMcoCFEFnbnRjeVxEaXJcRXZlbnRzXFYx4gIgQWdudGN5XERpclxFdmVudHNcVjFcR1BCTWV0YWRhdGHqAhdBZ250Y3k6OkRpcjo6RXZlbnRzOjpWMWIGcHJvdG8z", [file_google_protobuf_timestamp]); + +/** + * Describes the message agntcy.dir.events.v1.ListenRequest. + * Use `create(ListenRequestSchema)` to create a new message. + */ +export const ListenRequestSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_events_v1_event_service, 0); + +/** + * Describes the message agntcy.dir.events.v1.ListenResponse. + * Use `create(ListenResponseSchema)` to create a new message. + */ +export const ListenResponseSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_events_v1_event_service, 1); + +/** + * Describes the message agntcy.dir.events.v1.Event. + * Use `create(EventSchema)` to create a new message. + */ +export const EventSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_events_v1_event_service, 2); + +/** + * Describes the enum agntcy.dir.events.v1.EventType. + */ +export const EventTypeSchema = /*@__PURE__*/ + enumDesc(file_agntcy_dir_events_v1_event_service, 0); + +/** + * EventType represents all valid event types in the system. + * Each value represents a specific operation that can occur. + * + * Supported Events: + * - Store: RECORD_PUSHED, RECORD_PULLED, RECORD_DELETED + * - Routing: RECORD_PUBLISHED, RECORD_UNPUBLISHED + * - Sync: SYNC_CREATED, SYNC_COMPLETED, SYNC_FAILED + * - Sign: RECORD_SIGNED + * + * @generated from enum agntcy.dir.events.v1.EventType + */ +export const EventType = /*@__PURE__*/ + tsEnum(EventTypeSchema); + +/** + * EventService provides real-time event streaming for all system operations. + * Events are delivered from subscription time forward with no history or replay. + * This service enables external applications to react to system changes in real-time. + * + * @generated from service agntcy.dir.events.v1.EventService + */ +export const EventService = /*@__PURE__*/ + serviceDesc(file_agntcy_dir_events_v1_event_service, 0); + diff --git a/sdk/dir-js/src/models/agntcy/dir/routing/v1/peer_pb.d.ts b/sdk/dir-js/src/models/agntcy/dir/routing/v1/peer_pb.d.ts index 80da9e888..b4d5dbea6 100644 --- a/sdk/dir-js/src/models/agntcy/dir/routing/v1/peer_pb.d.ts +++ b/sdk/dir-js/src/models/agntcy/dir/routing/v1/peer_pb.d.ts @@ -1,102 +1,102 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/routing/v1/peer.proto (package agntcy.dir.routing.v1, syntax proto3) -/* eslint-disable */ - -import type { GenEnum, GenFile, GenMessage } from "@bufbuild/protobuf/codegenv2"; -import type { Message } from "@bufbuild/protobuf"; - -/** - * Describes the file agntcy/dir/routing/v1/peer.proto. - */ -export declare const file_agntcy_dir_routing_v1_peer: GenFile; - -/** - * @generated from message agntcy.dir.routing.v1.Peer - */ -export declare type Peer = Message<"agntcy.dir.routing.v1.Peer"> & { - /** - * ID of a given peer, typically described by a protocol. - * For example: - * - SPIFFE: "spiffe://example.org/service/foo" - * - JWT: "jwt:sub=alice,iss=https://issuer.example.com" - * - Tor: "onion:abcdefghijklmno.onion" - * - DID: "did:example:123456789abcdefghi" - * - IPFS: "ipfs:QmYwAPJzv5CZsnAzt8auVZRn2E6sD1c4x8pN5o6d5cW4D5" - * - * @generated from field: string id = 1; - */ - id: string; - - /** - * Multiaddrs for a given peer. - * For example: - * - "/ip4/127.0.0.1/tcp/4001" - * - "/ip6/::1/tcp/4001" - * - "/dns4/example.com/tcp/443/https" - * - * @generated from field: repeated string addrs = 2; - */ - addrs: string[]; - - /** - * Additional metadata about the peer. - * - * @generated from field: map annotations = 3; - */ - annotations: { [key: string]: string }; - - /** - * Used to signal the sender's connection capabilities to the peer. - * - * @generated from field: agntcy.dir.routing.v1.PeerConnectionType connection = 4; - */ - connection: PeerConnectionType; -}; - -/** - * Describes the message agntcy.dir.routing.v1.Peer. - * Use `create(PeerSchema)` to create a new message. - */ -export declare const PeerSchema: GenMessage; - -/** - * @generated from enum agntcy.dir.routing.v1.PeerConnectionType - */ -export enum PeerConnectionType { - /** - * Sender does not have a connection to peer, and no extra information (default) - * - * @generated from enum value: PEER_CONNECTION_TYPE_NOT_CONNECTED = 0; - */ - NOT_CONNECTED = 0, - - /** - * Sender has a live connection to peer. - * - * @generated from enum value: PEER_CONNECTION_TYPE_CONNECTED = 1; - */ - CONNECTED = 1, - - /** - * Sender recently connected to peer. - * - * @generated from enum value: PEER_CONNECTION_TYPE_CAN_CONNECT = 2; - */ - CAN_CONNECT = 2, - - /** - * Sender made strong effort to connect to peer repeatedly but failed. - * - * @generated from enum value: PEER_CONNECTION_TYPE_CANNOT_CONNECT = 3; - */ - CANNOT_CONNECT = 3, -} - -/** - * Describes the enum agntcy.dir.routing.v1.PeerConnectionType. - */ -export declare const PeerConnectionTypeSchema: GenEnum; - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/routing/v1/peer.proto (package agntcy.dir.routing.v1, syntax proto3) +/* eslint-disable */ + +import type { GenEnum, GenFile, GenMessage } from "@bufbuild/protobuf/codegenv2"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file agntcy/dir/routing/v1/peer.proto. + */ +export declare const file_agntcy_dir_routing_v1_peer: GenFile; + +/** + * @generated from message agntcy.dir.routing.v1.Peer + */ +export declare type Peer = Message<"agntcy.dir.routing.v1.Peer"> & { + /** + * ID of a given peer, typically described by a protocol. + * For example: + * - SPIFFE: "spiffe://example.org/service/foo" + * - JWT: "jwt:sub=alice,iss=https://issuer.example.com" + * - Tor: "onion:abcdefghijklmno.onion" + * - DID: "did:example:123456789abcdefghi" + * - IPFS: "ipfs:QmYwAPJzv5CZsnAzt8auVZRn2E6sD1c4x8pN5o6d5cW4D5" + * + * @generated from field: string id = 1; + */ + id: string; + + /** + * Multiaddrs for a given peer. + * For example: + * - "/ip4/127.0.0.1/tcp/4001" + * - "/ip6/::1/tcp/4001" + * - "/dns4/example.com/tcp/443/https" + * + * @generated from field: repeated string addrs = 2; + */ + addrs: string[]; + + /** + * Additional metadata about the peer. + * + * @generated from field: map annotations = 3; + */ + annotations: { [key: string]: string }; + + /** + * Used to signal the sender's connection capabilities to the peer. + * + * @generated from field: agntcy.dir.routing.v1.PeerConnectionType connection = 4; + */ + connection: PeerConnectionType; +}; + +/** + * Describes the message agntcy.dir.routing.v1.Peer. + * Use `create(PeerSchema)` to create a new message. + */ +export declare const PeerSchema: GenMessage; + +/** + * @generated from enum agntcy.dir.routing.v1.PeerConnectionType + */ +export enum PeerConnectionType { + /** + * Sender does not have a connection to peer, and no extra information (default) + * + * @generated from enum value: PEER_CONNECTION_TYPE_NOT_CONNECTED = 0; + */ + NOT_CONNECTED = 0, + + /** + * Sender has a live connection to peer. + * + * @generated from enum value: PEER_CONNECTION_TYPE_CONNECTED = 1; + */ + CONNECTED = 1, + + /** + * Sender recently connected to peer. + * + * @generated from enum value: PEER_CONNECTION_TYPE_CAN_CONNECT = 2; + */ + CAN_CONNECT = 2, + + /** + * Sender made strong effort to connect to peer repeatedly but failed. + * + * @generated from enum value: PEER_CONNECTION_TYPE_CANNOT_CONNECT = 3; + */ + CANNOT_CONNECT = 3, +} + +/** + * Describes the enum agntcy.dir.routing.v1.PeerConnectionType. + */ +export declare const PeerConnectionTypeSchema: GenEnum; + diff --git a/sdk/dir-js/src/models/agntcy/dir/routing/v1/peer_pb.js b/sdk/dir-js/src/models/agntcy/dir/routing/v1/peer_pb.js index 917fe097f..c31dfea02 100644 --- a/sdk/dir-js/src/models/agntcy/dir/routing/v1/peer_pb.js +++ b/sdk/dir-js/src/models/agntcy/dir/routing/v1/peer_pb.js @@ -1,34 +1,34 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/routing/v1/peer.proto (package agntcy.dir.routing.v1, syntax proto3) -/* eslint-disable */ - -import { enumDesc, fileDesc, messageDesc, tsEnum } from "@bufbuild/protobuf/codegenv2"; - -/** - * Describes the file agntcy/dir/routing/v1/peer.proto. - */ -export const file_agntcy_dir_routing_v1_peer = /*@__PURE__*/ - fileDesc("CiBhZ250Y3kvZGlyL3JvdXRpbmcvdjEvcGVlci5wcm90bxIVYWdudGN5LmRpci5yb3V0aW5nLnYxItcBCgRQZWVyEgoKAmlkGAEgASgJEg0KBWFkZHJzGAIgAygJEkEKC2Fubm90YXRpb25zGAMgAygLMiwuYWdudGN5LmRpci5yb3V0aW5nLnYxLlBlZXIuQW5ub3RhdGlvbnNFbnRyeRI9Cgpjb25uZWN0aW9uGAQgASgOMikuYWdudGN5LmRpci5yb3V0aW5nLnYxLlBlZXJDb25uZWN0aW9uVHlwZRoyChBBbm5vdGF0aW9uc0VudHJ5EgsKA2tleRgBIAEoCRINCgV2YWx1ZRgCIAEoCToCOAEqrwEKElBlZXJDb25uZWN0aW9uVHlwZRImCiJQRUVSX0NPTk5FQ1RJT05fVFlQRV9OT1RfQ09OTkVDVEVEEAASIgoeUEVFUl9DT05ORUNUSU9OX1RZUEVfQ09OTkVDVEVEEAESJAogUEVFUl9DT05ORUNUSU9OX1RZUEVfQ0FOX0NPTk5FQ1QQAhInCiNQRUVSX0NPTk5FQ1RJT05fVFlQRV9DQU5OT1RfQ09OTkVDVBADQsMBChljb20uYWdudGN5LmRpci5yb3V0aW5nLnYxQglQZWVyUHJvdG9QAVokZ2l0aHViLmNvbS9hZ250Y3kvZGlyL2FwaS9yb3V0aW5nL3YxogIDQURSqgIVQWdudGN5LkRpci5Sb3V0aW5nLlYxygIVQWdudGN5XERpclxSb3V0aW5nXFYx4gIhQWdudGN5XERpclxSb3V0aW5nXFYxXEdQQk1ldGFkYXRh6gIYQWdudGN5OjpEaXI6OlJvdXRpbmc6OlYxYgZwcm90bzM"); - -/** - * Describes the message agntcy.dir.routing.v1.Peer. - * Use `create(PeerSchema)` to create a new message. - */ -export const PeerSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_routing_v1_peer, 0); - -/** - * Describes the enum agntcy.dir.routing.v1.PeerConnectionType. - */ -export const PeerConnectionTypeSchema = /*@__PURE__*/ - enumDesc(file_agntcy_dir_routing_v1_peer, 0); - -/** - * @generated from enum agntcy.dir.routing.v1.PeerConnectionType - */ -export const PeerConnectionType = /*@__PURE__*/ - tsEnum(PeerConnectionTypeSchema); - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/routing/v1/peer.proto (package agntcy.dir.routing.v1, syntax proto3) +/* eslint-disable */ + +import { enumDesc, fileDesc, messageDesc, tsEnum } from "@bufbuild/protobuf/codegenv2"; + +/** + * Describes the file agntcy/dir/routing/v1/peer.proto. + */ +export const file_agntcy_dir_routing_v1_peer = /*@__PURE__*/ + fileDesc("CiBhZ250Y3kvZGlyL3JvdXRpbmcvdjEvcGVlci5wcm90bxIVYWdudGN5LmRpci5yb3V0aW5nLnYxItcBCgRQZWVyEgoKAmlkGAEgASgJEg0KBWFkZHJzGAIgAygJEkEKC2Fubm90YXRpb25zGAMgAygLMiwuYWdudGN5LmRpci5yb3V0aW5nLnYxLlBlZXIuQW5ub3RhdGlvbnNFbnRyeRI9Cgpjb25uZWN0aW9uGAQgASgOMikuYWdudGN5LmRpci5yb3V0aW5nLnYxLlBlZXJDb25uZWN0aW9uVHlwZRoyChBBbm5vdGF0aW9uc0VudHJ5EgsKA2tleRgBIAEoCRINCgV2YWx1ZRgCIAEoCToCOAEqrwEKElBlZXJDb25uZWN0aW9uVHlwZRImCiJQRUVSX0NPTk5FQ1RJT05fVFlQRV9OT1RfQ09OTkVDVEVEEAASIgoeUEVFUl9DT05ORUNUSU9OX1RZUEVfQ09OTkVDVEVEEAESJAogUEVFUl9DT05ORUNUSU9OX1RZUEVfQ0FOX0NPTk5FQ1QQAhInCiNQRUVSX0NPTk5FQ1RJT05fVFlQRV9DQU5OT1RfQ09OTkVDVBADQsMBChljb20uYWdudGN5LmRpci5yb3V0aW5nLnYxQglQZWVyUHJvdG9QAVokZ2l0aHViLmNvbS9hZ250Y3kvZGlyL2FwaS9yb3V0aW5nL3YxogIDQURSqgIVQWdudGN5LkRpci5Sb3V0aW5nLlYxygIVQWdudGN5XERpclxSb3V0aW5nXFYx4gIhQWdudGN5XERpclxSb3V0aW5nXFYxXEdQQk1ldGFkYXRh6gIYQWdudGN5OjpEaXI6OlJvdXRpbmc6OlYxYgZwcm90bzM"); + +/** + * Describes the message agntcy.dir.routing.v1.Peer. + * Use `create(PeerSchema)` to create a new message. + */ +export const PeerSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_routing_v1_peer, 0); + +/** + * Describes the enum agntcy.dir.routing.v1.PeerConnectionType. + */ +export const PeerConnectionTypeSchema = /*@__PURE__*/ + enumDesc(file_agntcy_dir_routing_v1_peer, 0); + +/** + * @generated from enum agntcy.dir.routing.v1.PeerConnectionType + */ +export const PeerConnectionType = /*@__PURE__*/ + tsEnum(PeerConnectionTypeSchema); + diff --git a/sdk/dir-js/src/models/agntcy/dir/routing/v1/publication_service_pb.d.ts b/sdk/dir-js/src/models/agntcy/dir/routing/v1/publication_service_pb.d.ts index a5f3727bf..685ee5d37 100644 --- a/sdk/dir-js/src/models/agntcy/dir/routing/v1/publication_service_pb.d.ts +++ b/sdk/dir-js/src/models/agntcy/dir/routing/v1/publication_service_pb.d.ts @@ -1,266 +1,266 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/routing/v1/publication_service.proto (package agntcy.dir.routing.v1, syntax proto3) -/* eslint-disable */ - -import type { GenEnum, GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv2"; -import type { Message } from "@bufbuild/protobuf"; -import type { PublishRequestSchema } from "./routing_service_pb.js"; - -/** - * Describes the file agntcy/dir/routing/v1/publication_service.proto. - */ -export declare const file_agntcy_dir_routing_v1_publication_service: GenFile; - -/** - * CreatePublicationResponse returns the result of creating a publication request. - * This includes the publication ID and any relevant metadata. - * - * @generated from message agntcy.dir.routing.v1.CreatePublicationResponse - */ -export declare type CreatePublicationResponse = Message<"agntcy.dir.routing.v1.CreatePublicationResponse"> & { - /** - * Unique identifier of the publication operation. - * - * @generated from field: string publication_id = 1; - */ - publicationId: string; -}; - -/** - * Describes the message agntcy.dir.routing.v1.CreatePublicationResponse. - * Use `create(CreatePublicationResponseSchema)` to create a new message. - */ -export declare const CreatePublicationResponseSchema: GenMessage; - -/** - * ListPublicationsRequest contains optional filters for listing publication requests. - * - * @generated from message agntcy.dir.routing.v1.ListPublicationsRequest - */ -export declare type ListPublicationsRequest = Message<"agntcy.dir.routing.v1.ListPublicationsRequest"> & { - /** - * Optional limit on the number of results to return. - * - * @generated from field: optional uint32 limit = 2; - */ - limit?: number; - - /** - * Optional offset for pagination of results. - * - * @generated from field: optional uint32 offset = 3; - */ - offset?: number; -}; - -/** - * Describes the message agntcy.dir.routing.v1.ListPublicationsRequest. - * Use `create(ListPublicationsRequestSchema)` to create a new message. - */ -export declare const ListPublicationsRequestSchema: GenMessage; - -/** - * ListPublicationsItem represents a single publication request in the list response. - * Contains publication details including ID, status, and creation timestamp. - * - * @generated from message agntcy.dir.routing.v1.ListPublicationsItem - */ -export declare type ListPublicationsItem = Message<"agntcy.dir.routing.v1.ListPublicationsItem"> & { - /** - * Unique identifier of the publication operation. - * - * @generated from field: string publication_id = 1; - */ - publicationId: string; - - /** - * Current status of the publication operation. - * - * @generated from field: agntcy.dir.routing.v1.PublicationStatus status = 2; - */ - status: PublicationStatus; - - /** - * Timestamp when the publication operation was created in the RFC3339 format. - * Specs: https://www.rfc-editor.org/rfc/rfc3339.html - * - * @generated from field: string created_time = 3; - */ - createdTime: string; - - /** - * Timestamp of the most recent status update for this publication in the RFC3339 format. - * - * @generated from field: string last_update_time = 4; - */ - lastUpdateTime: string; -}; - -/** - * Describes the message agntcy.dir.routing.v1.ListPublicationsItem. - * Use `create(ListPublicationsItemSchema)` to create a new message. - */ -export declare const ListPublicationsItemSchema: GenMessage; - -/** - * GetPublicationRequest specifies which publication to retrieve by its identifier. - * - * @generated from message agntcy.dir.routing.v1.GetPublicationRequest - */ -export declare type GetPublicationRequest = Message<"agntcy.dir.routing.v1.GetPublicationRequest"> & { - /** - * Unique identifier of the publication operation to query. - * - * @generated from field: string publication_id = 1; - */ - publicationId: string; -}; - -/** - * Describes the message agntcy.dir.routing.v1.GetPublicationRequest. - * Use `create(GetPublicationRequestSchema)` to create a new message. - */ -export declare const GetPublicationRequestSchema: GenMessage; - -/** - * GetPublicationResponse contains the full details of a specific publication request. - * Includes status, progress information, and any error details if applicable. - * - * @generated from message agntcy.dir.routing.v1.GetPublicationResponse - */ -export declare type GetPublicationResponse = Message<"agntcy.dir.routing.v1.GetPublicationResponse"> & { - /** - * Unique identifier of the publication operation. - * - * @generated from field: string publication_id = 1; - */ - publicationId: string; - - /** - * Current status of the publication operation. - * - * @generated from field: agntcy.dir.routing.v1.PublicationStatus status = 2; - */ - status: PublicationStatus; - - /** - * Timestamp when the publication operation was created in the RFC3339 format. - * Specs: https://www.rfc-editor.org/rfc/rfc3339.html - * - * @generated from field: string created_time = 3; - */ - createdTime: string; - - /** - * Timestamp of the most recent status update for this publication in the RFC3339 format. - * - * @generated from field: string last_update_time = 4; - */ - lastUpdateTime: string; -}; - -/** - * Describes the message agntcy.dir.routing.v1.GetPublicationResponse. - * Use `create(GetPublicationResponseSchema)` to create a new message. - */ -export declare const GetPublicationResponseSchema: GenMessage; - -/** - * PublicationStatus represents the current state of a publication request. - * Publications progress from pending to processing to completed or failed states. - * - * @generated from enum agntcy.dir.routing.v1.PublicationStatus - */ -export enum PublicationStatus { - /** - * Default/unset status - should not be used in practice - * - * @generated from enum value: PUBLICATION_STATUS_UNSPECIFIED = 0; - */ - UNSPECIFIED = 0, - - /** - * Sync operation has been created but not yet started - * - * @generated from enum value: PUBLICATION_STATUS_PENDING = 1; - */ - PENDING = 1, - - /** - * Sync operation is actively discovering and transferring objects - * - * @generated from enum value: PUBLICATION_STATUS_IN_PROGRESS = 2; - */ - IN_PROGRESS = 2, - - /** - * Sync operation has been successfully completed - * - * @generated from enum value: PUBLICATION_STATUS_COMPLETED = 3; - */ - COMPLETED = 3, - - /** - * Sync operation encountered an error and stopped - * - * @generated from enum value: PUBLICATION_STATUS_FAILED = 4; - */ - FAILED = 4, -} - -/** - * Describes the enum agntcy.dir.routing.v1.PublicationStatus. - */ -export declare const PublicationStatusSchema: GenEnum; - -/** - * PublicationService manages publication requests for announcing records to the DHT. - * - * Publications are stored in the database and processed by a worker that runs every hour. - * The publication workflow: - * 1. Publications are created via routing's Publish RPC by specifying either a query, a list of CIDs, or all records - * 2. Publication requests are added to the database - * 3. PublicationWorker queries the data using the publication request from the database to get the list of CIDs to be published - * 4. PublicationWorker announces the records with these CIDs to the DHT - * - * @generated from service agntcy.dir.routing.v1.PublicationService - */ -export declare const PublicationService: GenService<{ - /** - * CreatePublication creates a new publication request that will be processed by the PublicationWorker. - * The publication request can specify either a query, a list of specific CIDs, or all records to be announced to the DHT. - * - * @generated from rpc agntcy.dir.routing.v1.PublicationService.CreatePublication - */ - createPublication: { - methodKind: "unary"; - input: typeof PublishRequestSchema; - output: typeof CreatePublicationResponseSchema; - }, - /** - * ListPublications returns a stream of all publication requests in the system. - * This allows monitoring of pending, processing, and completed publication requests. - * - * @generated from rpc agntcy.dir.routing.v1.PublicationService.ListPublications - */ - listPublications: { - methodKind: "server_streaming"; - input: typeof ListPublicationsRequestSchema; - output: typeof ListPublicationsItemSchema; - }, - /** - * GetPublication retrieves details of a specific publication request by its identifier. - * This includes the current status and any associated metadata. - * - * @generated from rpc agntcy.dir.routing.v1.PublicationService.GetPublication - */ - getPublication: { - methodKind: "unary"; - input: typeof GetPublicationRequestSchema; - output: typeof GetPublicationResponseSchema; - }, -}>; - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/routing/v1/publication_service.proto (package agntcy.dir.routing.v1, syntax proto3) +/* eslint-disable */ + +import type { GenEnum, GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv2"; +import type { Message } from "@bufbuild/protobuf"; +import type { PublishRequestSchema } from "./routing_service_pb.js"; + +/** + * Describes the file agntcy/dir/routing/v1/publication_service.proto. + */ +export declare const file_agntcy_dir_routing_v1_publication_service: GenFile; + +/** + * CreatePublicationResponse returns the result of creating a publication request. + * This includes the publication ID and any relevant metadata. + * + * @generated from message agntcy.dir.routing.v1.CreatePublicationResponse + */ +export declare type CreatePublicationResponse = Message<"agntcy.dir.routing.v1.CreatePublicationResponse"> & { + /** + * Unique identifier of the publication operation. + * + * @generated from field: string publication_id = 1; + */ + publicationId: string; +}; + +/** + * Describes the message agntcy.dir.routing.v1.CreatePublicationResponse. + * Use `create(CreatePublicationResponseSchema)` to create a new message. + */ +export declare const CreatePublicationResponseSchema: GenMessage; + +/** + * ListPublicationsRequest contains optional filters for listing publication requests. + * + * @generated from message agntcy.dir.routing.v1.ListPublicationsRequest + */ +export declare type ListPublicationsRequest = Message<"agntcy.dir.routing.v1.ListPublicationsRequest"> & { + /** + * Optional limit on the number of results to return. + * + * @generated from field: optional uint32 limit = 2; + */ + limit?: number; + + /** + * Optional offset for pagination of results. + * + * @generated from field: optional uint32 offset = 3; + */ + offset?: number; +}; + +/** + * Describes the message agntcy.dir.routing.v1.ListPublicationsRequest. + * Use `create(ListPublicationsRequestSchema)` to create a new message. + */ +export declare const ListPublicationsRequestSchema: GenMessage; + +/** + * ListPublicationsItem represents a single publication request in the list response. + * Contains publication details including ID, status, and creation timestamp. + * + * @generated from message agntcy.dir.routing.v1.ListPublicationsItem + */ +export declare type ListPublicationsItem = Message<"agntcy.dir.routing.v1.ListPublicationsItem"> & { + /** + * Unique identifier of the publication operation. + * + * @generated from field: string publication_id = 1; + */ + publicationId: string; + + /** + * Current status of the publication operation. + * + * @generated from field: agntcy.dir.routing.v1.PublicationStatus status = 2; + */ + status: PublicationStatus; + + /** + * Timestamp when the publication operation was created in the RFC3339 format. + * Specs: https://www.rfc-editor.org/rfc/rfc3339.html + * + * @generated from field: string created_time = 3; + */ + createdTime: string; + + /** + * Timestamp of the most recent status update for this publication in the RFC3339 format. + * + * @generated from field: string last_update_time = 4; + */ + lastUpdateTime: string; +}; + +/** + * Describes the message agntcy.dir.routing.v1.ListPublicationsItem. + * Use `create(ListPublicationsItemSchema)` to create a new message. + */ +export declare const ListPublicationsItemSchema: GenMessage; + +/** + * GetPublicationRequest specifies which publication to retrieve by its identifier. + * + * @generated from message agntcy.dir.routing.v1.GetPublicationRequest + */ +export declare type GetPublicationRequest = Message<"agntcy.dir.routing.v1.GetPublicationRequest"> & { + /** + * Unique identifier of the publication operation to query. + * + * @generated from field: string publication_id = 1; + */ + publicationId: string; +}; + +/** + * Describes the message agntcy.dir.routing.v1.GetPublicationRequest. + * Use `create(GetPublicationRequestSchema)` to create a new message. + */ +export declare const GetPublicationRequestSchema: GenMessage; + +/** + * GetPublicationResponse contains the full details of a specific publication request. + * Includes status, progress information, and any error details if applicable. + * + * @generated from message agntcy.dir.routing.v1.GetPublicationResponse + */ +export declare type GetPublicationResponse = Message<"agntcy.dir.routing.v1.GetPublicationResponse"> & { + /** + * Unique identifier of the publication operation. + * + * @generated from field: string publication_id = 1; + */ + publicationId: string; + + /** + * Current status of the publication operation. + * + * @generated from field: agntcy.dir.routing.v1.PublicationStatus status = 2; + */ + status: PublicationStatus; + + /** + * Timestamp when the publication operation was created in the RFC3339 format. + * Specs: https://www.rfc-editor.org/rfc/rfc3339.html + * + * @generated from field: string created_time = 3; + */ + createdTime: string; + + /** + * Timestamp of the most recent status update for this publication in the RFC3339 format. + * + * @generated from field: string last_update_time = 4; + */ + lastUpdateTime: string; +}; + +/** + * Describes the message agntcy.dir.routing.v1.GetPublicationResponse. + * Use `create(GetPublicationResponseSchema)` to create a new message. + */ +export declare const GetPublicationResponseSchema: GenMessage; + +/** + * PublicationStatus represents the current state of a publication request. + * Publications progress from pending to processing to completed or failed states. + * + * @generated from enum agntcy.dir.routing.v1.PublicationStatus + */ +export enum PublicationStatus { + /** + * Default/unset status - should not be used in practice + * + * @generated from enum value: PUBLICATION_STATUS_UNSPECIFIED = 0; + */ + UNSPECIFIED = 0, + + /** + * Sync operation has been created but not yet started + * + * @generated from enum value: PUBLICATION_STATUS_PENDING = 1; + */ + PENDING = 1, + + /** + * Sync operation is actively discovering and transferring objects + * + * @generated from enum value: PUBLICATION_STATUS_IN_PROGRESS = 2; + */ + IN_PROGRESS = 2, + + /** + * Sync operation has been successfully completed + * + * @generated from enum value: PUBLICATION_STATUS_COMPLETED = 3; + */ + COMPLETED = 3, + + /** + * Sync operation encountered an error and stopped + * + * @generated from enum value: PUBLICATION_STATUS_FAILED = 4; + */ + FAILED = 4, +} + +/** + * Describes the enum agntcy.dir.routing.v1.PublicationStatus. + */ +export declare const PublicationStatusSchema: GenEnum; + +/** + * PublicationService manages publication requests for announcing records to the DHT. + * + * Publications are stored in the database and processed by a worker that runs every hour. + * The publication workflow: + * 1. Publications are created via routing's Publish RPC by specifying either a query, a list of CIDs, or all records + * 2. Publication requests are added to the database + * 3. PublicationWorker queries the data using the publication request from the database to get the list of CIDs to be published + * 4. PublicationWorker announces the records with these CIDs to the DHT + * + * @generated from service agntcy.dir.routing.v1.PublicationService + */ +export declare const PublicationService: GenService<{ + /** + * CreatePublication creates a new publication request that will be processed by the PublicationWorker. + * The publication request can specify either a query, a list of specific CIDs, or all records to be announced to the DHT. + * + * @generated from rpc agntcy.dir.routing.v1.PublicationService.CreatePublication + */ + createPublication: { + methodKind: "unary"; + input: typeof PublishRequestSchema; + output: typeof CreatePublicationResponseSchema; + }, + /** + * ListPublications returns a stream of all publication requests in the system. + * This allows monitoring of pending, processing, and completed publication requests. + * + * @generated from rpc agntcy.dir.routing.v1.PublicationService.ListPublications + */ + listPublications: { + methodKind: "server_streaming"; + input: typeof ListPublicationsRequestSchema; + output: typeof ListPublicationsItemSchema; + }, + /** + * GetPublication retrieves details of a specific publication request by its identifier. + * This includes the current status and any associated metadata. + * + * @generated from rpc agntcy.dir.routing.v1.PublicationService.GetPublication + */ + getPublication: { + methodKind: "unary"; + input: typeof GetPublicationRequestSchema; + output: typeof GetPublicationResponseSchema; + }, +}>; + diff --git a/sdk/dir-js/src/models/agntcy/dir/routing/v1/publication_service_pb.js b/sdk/dir-js/src/models/agntcy/dir/routing/v1/publication_service_pb.js index 23ef7af66..8c1c0ae0f 100644 --- a/sdk/dir-js/src/models/agntcy/dir/routing/v1/publication_service_pb.js +++ b/sdk/dir-js/src/models/agntcy/dir/routing/v1/publication_service_pb.js @@ -1,81 +1,81 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/routing/v1/publication_service.proto (package agntcy.dir.routing.v1, syntax proto3) -/* eslint-disable */ - -import { enumDesc, fileDesc, messageDesc, serviceDesc, tsEnum } from "@bufbuild/protobuf/codegenv2"; -import { file_agntcy_dir_routing_v1_routing_service } from "./routing_service_pb.js"; - -/** - * Describes the file agntcy/dir/routing/v1/publication_service.proto. - */ -export const file_agntcy_dir_routing_v1_publication_service = /*@__PURE__*/ - fileDesc("Ci9hZ250Y3kvZGlyL3JvdXRpbmcvdjEvcHVibGljYXRpb25fc2VydmljZS5wcm90bxIVYWdudGN5LmRpci5yb3V0aW5nLnYxIjMKGUNyZWF0ZVB1YmxpY2F0aW9uUmVzcG9uc2USFgoOcHVibGljYXRpb25faWQYASABKAkiVwoXTGlzdFB1YmxpY2F0aW9uc1JlcXVlc3QSEgoFbGltaXQYAiABKA1IAIgBARITCgZvZmZzZXQYAyABKA1IAYgBAUIICgZfbGltaXRCCQoHX29mZnNldCKYAQoUTGlzdFB1YmxpY2F0aW9uc0l0ZW0SFgoOcHVibGljYXRpb25faWQYASABKAkSOAoGc3RhdHVzGAIgASgOMiguYWdudGN5LmRpci5yb3V0aW5nLnYxLlB1YmxpY2F0aW9uU3RhdHVzEhQKDGNyZWF0ZWRfdGltZRgDIAEoCRIYChBsYXN0X3VwZGF0ZV90aW1lGAQgASgJIi8KFUdldFB1YmxpY2F0aW9uUmVxdWVzdBIWCg5wdWJsaWNhdGlvbl9pZBgBIAEoCSKaAQoWR2V0UHVibGljYXRpb25SZXNwb25zZRIWCg5wdWJsaWNhdGlvbl9pZBgBIAEoCRI4CgZzdGF0dXMYAiABKA4yKC5hZ250Y3kuZGlyLnJvdXRpbmcudjEuUHVibGljYXRpb25TdGF0dXMSFAoMY3JlYXRlZF90aW1lGAMgASgJEhgKEGxhc3RfdXBkYXRlX3RpbWUYBCABKAkqvAEKEVB1YmxpY2F0aW9uU3RhdHVzEiIKHlBVQkxJQ0FUSU9OX1NUQVRVU19VTlNQRUNJRklFRBAAEh4KGlBVQkxJQ0FUSU9OX1NUQVRVU19QRU5ESU5HEAESIgoeUFVCTElDQVRJT05fU1RBVFVTX0lOX1BST0dSRVNTEAISIAocUFVCTElDQVRJT05fU1RBVFVTX0NPTVBMRVRFRBADEh0KGVBVQkxJQ0FUSU9OX1NUQVRVU19GQUlMRUQQBDLkAgoSUHVibGljYXRpb25TZXJ2aWNlEmwKEUNyZWF0ZVB1YmxpY2F0aW9uEiUuYWdudGN5LmRpci5yb3V0aW5nLnYxLlB1Ymxpc2hSZXF1ZXN0GjAuYWdudGN5LmRpci5yb3V0aW5nLnYxLkNyZWF0ZVB1YmxpY2F0aW9uUmVzcG9uc2UScQoQTGlzdFB1YmxpY2F0aW9ucxIuLmFnbnRjeS5kaXIucm91dGluZy52MS5MaXN0UHVibGljYXRpb25zUmVxdWVzdBorLmFnbnRjeS5kaXIucm91dGluZy52MS5MaXN0UHVibGljYXRpb25zSXRlbTABEm0KDkdldFB1YmxpY2F0aW9uEiwuYWdudGN5LmRpci5yb3V0aW5nLnYxLkdldFB1YmxpY2F0aW9uUmVxdWVzdBotLmFnbnRjeS5kaXIucm91dGluZy52MS5HZXRQdWJsaWNhdGlvblJlc3BvbnNlQtEBChljb20uYWdudGN5LmRpci5yb3V0aW5nLnYxQhdQdWJsaWNhdGlvblNlcnZpY2VQcm90b1ABWiRnaXRodWIuY29tL2FnbnRjeS9kaXIvYXBpL3JvdXRpbmcvdjGiAgNBRFKqAhVBZ250Y3kuRGlyLlJvdXRpbmcuVjHKAhVBZ250Y3lcRGlyXFJvdXRpbmdcVjHiAiFBZ250Y3lcRGlyXFJvdXRpbmdcVjFcR1BCTWV0YWRhdGHqAhhBZ250Y3k6OkRpcjo6Um91dGluZzo6VjFiBnByb3RvMw", [file_agntcy_dir_routing_v1_routing_service]); - -/** - * Describes the message agntcy.dir.routing.v1.CreatePublicationResponse. - * Use `create(CreatePublicationResponseSchema)` to create a new message. - */ -export const CreatePublicationResponseSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_routing_v1_publication_service, 0); - -/** - * Describes the message agntcy.dir.routing.v1.ListPublicationsRequest. - * Use `create(ListPublicationsRequestSchema)` to create a new message. - */ -export const ListPublicationsRequestSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_routing_v1_publication_service, 1); - -/** - * Describes the message agntcy.dir.routing.v1.ListPublicationsItem. - * Use `create(ListPublicationsItemSchema)` to create a new message. - */ -export const ListPublicationsItemSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_routing_v1_publication_service, 2); - -/** - * Describes the message agntcy.dir.routing.v1.GetPublicationRequest. - * Use `create(GetPublicationRequestSchema)` to create a new message. - */ -export const GetPublicationRequestSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_routing_v1_publication_service, 3); - -/** - * Describes the message agntcy.dir.routing.v1.GetPublicationResponse. - * Use `create(GetPublicationResponseSchema)` to create a new message. - */ -export const GetPublicationResponseSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_routing_v1_publication_service, 4); - -/** - * Describes the enum agntcy.dir.routing.v1.PublicationStatus. - */ -export const PublicationStatusSchema = /*@__PURE__*/ - enumDesc(file_agntcy_dir_routing_v1_publication_service, 0); - -/** - * PublicationStatus represents the current state of a publication request. - * Publications progress from pending to processing to completed or failed states. - * - * @generated from enum agntcy.dir.routing.v1.PublicationStatus - */ -export const PublicationStatus = /*@__PURE__*/ - tsEnum(PublicationStatusSchema); - -/** - * PublicationService manages publication requests for announcing records to the DHT. - * - * Publications are stored in the database and processed by a worker that runs every hour. - * The publication workflow: - * 1. Publications are created via routing's Publish RPC by specifying either a query, a list of CIDs, or all records - * 2. Publication requests are added to the database - * 3. PublicationWorker queries the data using the publication request from the database to get the list of CIDs to be published - * 4. PublicationWorker announces the records with these CIDs to the DHT - * - * @generated from service agntcy.dir.routing.v1.PublicationService - */ -export const PublicationService = /*@__PURE__*/ - serviceDesc(file_agntcy_dir_routing_v1_publication_service, 0); - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/routing/v1/publication_service.proto (package agntcy.dir.routing.v1, syntax proto3) +/* eslint-disable */ + +import { enumDesc, fileDesc, messageDesc, serviceDesc, tsEnum } from "@bufbuild/protobuf/codegenv2"; +import { file_agntcy_dir_routing_v1_routing_service } from "./routing_service_pb.js"; + +/** + * Describes the file agntcy/dir/routing/v1/publication_service.proto. + */ +export const file_agntcy_dir_routing_v1_publication_service = /*@__PURE__*/ + fileDesc("Ci9hZ250Y3kvZGlyL3JvdXRpbmcvdjEvcHVibGljYXRpb25fc2VydmljZS5wcm90bxIVYWdudGN5LmRpci5yb3V0aW5nLnYxIjMKGUNyZWF0ZVB1YmxpY2F0aW9uUmVzcG9uc2USFgoOcHVibGljYXRpb25faWQYASABKAkiVwoXTGlzdFB1YmxpY2F0aW9uc1JlcXVlc3QSEgoFbGltaXQYAiABKA1IAIgBARITCgZvZmZzZXQYAyABKA1IAYgBAUIICgZfbGltaXRCCQoHX29mZnNldCKYAQoUTGlzdFB1YmxpY2F0aW9uc0l0ZW0SFgoOcHVibGljYXRpb25faWQYASABKAkSOAoGc3RhdHVzGAIgASgOMiguYWdudGN5LmRpci5yb3V0aW5nLnYxLlB1YmxpY2F0aW9uU3RhdHVzEhQKDGNyZWF0ZWRfdGltZRgDIAEoCRIYChBsYXN0X3VwZGF0ZV90aW1lGAQgASgJIi8KFUdldFB1YmxpY2F0aW9uUmVxdWVzdBIWCg5wdWJsaWNhdGlvbl9pZBgBIAEoCSKaAQoWR2V0UHVibGljYXRpb25SZXNwb25zZRIWCg5wdWJsaWNhdGlvbl9pZBgBIAEoCRI4CgZzdGF0dXMYAiABKA4yKC5hZ250Y3kuZGlyLnJvdXRpbmcudjEuUHVibGljYXRpb25TdGF0dXMSFAoMY3JlYXRlZF90aW1lGAMgASgJEhgKEGxhc3RfdXBkYXRlX3RpbWUYBCABKAkqvAEKEVB1YmxpY2F0aW9uU3RhdHVzEiIKHlBVQkxJQ0FUSU9OX1NUQVRVU19VTlNQRUNJRklFRBAAEh4KGlBVQkxJQ0FUSU9OX1NUQVRVU19QRU5ESU5HEAESIgoeUFVCTElDQVRJT05fU1RBVFVTX0lOX1BST0dSRVNTEAISIAocUFVCTElDQVRJT05fU1RBVFVTX0NPTVBMRVRFRBADEh0KGVBVQkxJQ0FUSU9OX1NUQVRVU19GQUlMRUQQBDLkAgoSUHVibGljYXRpb25TZXJ2aWNlEmwKEUNyZWF0ZVB1YmxpY2F0aW9uEiUuYWdudGN5LmRpci5yb3V0aW5nLnYxLlB1Ymxpc2hSZXF1ZXN0GjAuYWdudGN5LmRpci5yb3V0aW5nLnYxLkNyZWF0ZVB1YmxpY2F0aW9uUmVzcG9uc2UScQoQTGlzdFB1YmxpY2F0aW9ucxIuLmFnbnRjeS5kaXIucm91dGluZy52MS5MaXN0UHVibGljYXRpb25zUmVxdWVzdBorLmFnbnRjeS5kaXIucm91dGluZy52MS5MaXN0UHVibGljYXRpb25zSXRlbTABEm0KDkdldFB1YmxpY2F0aW9uEiwuYWdudGN5LmRpci5yb3V0aW5nLnYxLkdldFB1YmxpY2F0aW9uUmVxdWVzdBotLmFnbnRjeS5kaXIucm91dGluZy52MS5HZXRQdWJsaWNhdGlvblJlc3BvbnNlQtEBChljb20uYWdudGN5LmRpci5yb3V0aW5nLnYxQhdQdWJsaWNhdGlvblNlcnZpY2VQcm90b1ABWiRnaXRodWIuY29tL2FnbnRjeS9kaXIvYXBpL3JvdXRpbmcvdjGiAgNBRFKqAhVBZ250Y3kuRGlyLlJvdXRpbmcuVjHKAhVBZ250Y3lcRGlyXFJvdXRpbmdcVjHiAiFBZ250Y3lcRGlyXFJvdXRpbmdcVjFcR1BCTWV0YWRhdGHqAhhBZ250Y3k6OkRpcjo6Um91dGluZzo6VjFiBnByb3RvMw", [file_agntcy_dir_routing_v1_routing_service]); + +/** + * Describes the message agntcy.dir.routing.v1.CreatePublicationResponse. + * Use `create(CreatePublicationResponseSchema)` to create a new message. + */ +export const CreatePublicationResponseSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_routing_v1_publication_service, 0); + +/** + * Describes the message agntcy.dir.routing.v1.ListPublicationsRequest. + * Use `create(ListPublicationsRequestSchema)` to create a new message. + */ +export const ListPublicationsRequestSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_routing_v1_publication_service, 1); + +/** + * Describes the message agntcy.dir.routing.v1.ListPublicationsItem. + * Use `create(ListPublicationsItemSchema)` to create a new message. + */ +export const ListPublicationsItemSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_routing_v1_publication_service, 2); + +/** + * Describes the message agntcy.dir.routing.v1.GetPublicationRequest. + * Use `create(GetPublicationRequestSchema)` to create a new message. + */ +export const GetPublicationRequestSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_routing_v1_publication_service, 3); + +/** + * Describes the message agntcy.dir.routing.v1.GetPublicationResponse. + * Use `create(GetPublicationResponseSchema)` to create a new message. + */ +export const GetPublicationResponseSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_routing_v1_publication_service, 4); + +/** + * Describes the enum agntcy.dir.routing.v1.PublicationStatus. + */ +export const PublicationStatusSchema = /*@__PURE__*/ + enumDesc(file_agntcy_dir_routing_v1_publication_service, 0); + +/** + * PublicationStatus represents the current state of a publication request. + * Publications progress from pending to processing to completed or failed states. + * + * @generated from enum agntcy.dir.routing.v1.PublicationStatus + */ +export const PublicationStatus = /*@__PURE__*/ + tsEnum(PublicationStatusSchema); + +/** + * PublicationService manages publication requests for announcing records to the DHT. + * + * Publications are stored in the database and processed by a worker that runs every hour. + * The publication workflow: + * 1. Publications are created via routing's Publish RPC by specifying either a query, a list of CIDs, or all records + * 2. Publication requests are added to the database + * 3. PublicationWorker queries the data using the publication request from the database to get the list of CIDs to be published + * 4. PublicationWorker announces the records with these CIDs to the DHT + * + * @generated from service agntcy.dir.routing.v1.PublicationService + */ +export const PublicationService = /*@__PURE__*/ + serviceDesc(file_agntcy_dir_routing_v1_publication_service, 0); + diff --git a/sdk/dir-js/src/models/agntcy/dir/routing/v1/record_query_pb.d.ts b/sdk/dir-js/src/models/agntcy/dir/routing/v1/record_query_pb.d.ts index ce45040ea..8f637fe19 100644 --- a/sdk/dir-js/src/models/agntcy/dir/routing/v1/record_query_pb.d.ts +++ b/sdk/dir-js/src/models/agntcy/dir/routing/v1/record_query_pb.d.ts @@ -1,94 +1,94 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/routing/v1/record_query.proto (package agntcy.dir.routing.v1, syntax proto3) -/* eslint-disable */ - -import type { GenEnum, GenFile, GenMessage } from "@bufbuild/protobuf/codegenv2"; -import type { Message } from "@bufbuild/protobuf"; - -/** - * Describes the file agntcy/dir/routing/v1/record_query.proto. - */ -export declare const file_agntcy_dir_routing_v1_record_query: GenFile; - -/** - * A query to match the record against during discovery. - * For example: - * { type: RECORD_QUERY_TYPE_SKILL, value: "Natural Language Processing" } - * { type: RECORD_QUERY_TYPE_LOCATOR, value: "helm-chart" } - * { type: RECORD_QUERY_TYPE_DOMAIN, value: "research" } - * { type: RECORD_QUERY_TYPE_MODULE, value: "core/llm/model" } - * - * @generated from message agntcy.dir.routing.v1.RecordQuery - */ -export declare type RecordQuery = Message<"agntcy.dir.routing.v1.RecordQuery"> & { - /** - * The type of the query to match against. - * - * @generated from field: agntcy.dir.routing.v1.RecordQueryType type = 1; - */ - type: RecordQueryType; - - /** - * The query value to match against. - * - * @generated from field: string value = 2; - */ - value: string; -}; - -/** - * Describes the message agntcy.dir.routing.v1.RecordQuery. - * Use `create(RecordQuerySchema)` to create a new message. - */ -export declare const RecordQuerySchema: GenMessage; - -/** - * Defines a list of supported record query types. - * - * @generated from enum agntcy.dir.routing.v1.RecordQueryType - */ -export enum RecordQueryType { - /** - * Unspecified query type. - * - * @generated from enum value: RECORD_QUERY_TYPE_UNSPECIFIED = 0; - */ - UNSPECIFIED = 0, - - /** - * Query for a skill name. - * - * @generated from enum value: RECORD_QUERY_TYPE_SKILL = 1; - */ - SKILL = 1, - - /** - * Query for a locator type. - * - * @generated from enum value: RECORD_QUERY_TYPE_LOCATOR = 2; - */ - LOCATOR = 2, - - /** - * Query for a domain name. - * - * @generated from enum value: RECORD_QUERY_TYPE_DOMAIN = 3; - */ - DOMAIN = 3, - - /** - * Query for a module name. - * - * @generated from enum value: RECORD_QUERY_TYPE_MODULE = 4; - */ - MODULE = 4, -} - -/** - * Describes the enum agntcy.dir.routing.v1.RecordQueryType. - */ -export declare const RecordQueryTypeSchema: GenEnum; - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/routing/v1/record_query.proto (package agntcy.dir.routing.v1, syntax proto3) +/* eslint-disable */ + +import type { GenEnum, GenFile, GenMessage } from "@bufbuild/protobuf/codegenv2"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file agntcy/dir/routing/v1/record_query.proto. + */ +export declare const file_agntcy_dir_routing_v1_record_query: GenFile; + +/** + * A query to match the record against during discovery. + * For example: + * { type: RECORD_QUERY_TYPE_SKILL, value: "Natural Language Processing" } + * { type: RECORD_QUERY_TYPE_LOCATOR, value: "helm-chart" } + * { type: RECORD_QUERY_TYPE_DOMAIN, value: "research" } + * { type: RECORD_QUERY_TYPE_MODULE, value: "core/llm/model" } + * + * @generated from message agntcy.dir.routing.v1.RecordQuery + */ +export declare type RecordQuery = Message<"agntcy.dir.routing.v1.RecordQuery"> & { + /** + * The type of the query to match against. + * + * @generated from field: agntcy.dir.routing.v1.RecordQueryType type = 1; + */ + type: RecordQueryType; + + /** + * The query value to match against. + * + * @generated from field: string value = 2; + */ + value: string; +}; + +/** + * Describes the message agntcy.dir.routing.v1.RecordQuery. + * Use `create(RecordQuerySchema)` to create a new message. + */ +export declare const RecordQuerySchema: GenMessage; + +/** + * Defines a list of supported record query types. + * + * @generated from enum agntcy.dir.routing.v1.RecordQueryType + */ +export enum RecordQueryType { + /** + * Unspecified query type. + * + * @generated from enum value: RECORD_QUERY_TYPE_UNSPECIFIED = 0; + */ + UNSPECIFIED = 0, + + /** + * Query for a skill name. + * + * @generated from enum value: RECORD_QUERY_TYPE_SKILL = 1; + */ + SKILL = 1, + + /** + * Query for a locator type. + * + * @generated from enum value: RECORD_QUERY_TYPE_LOCATOR = 2; + */ + LOCATOR = 2, + + /** + * Query for a domain name. + * + * @generated from enum value: RECORD_QUERY_TYPE_DOMAIN = 3; + */ + DOMAIN = 3, + + /** + * Query for a module name. + * + * @generated from enum value: RECORD_QUERY_TYPE_MODULE = 4; + */ + MODULE = 4, +} + +/** + * Describes the enum agntcy.dir.routing.v1.RecordQueryType. + */ +export declare const RecordQueryTypeSchema: GenEnum; + diff --git a/sdk/dir-js/src/models/agntcy/dir/routing/v1/record_query_pb.js b/sdk/dir-js/src/models/agntcy/dir/routing/v1/record_query_pb.js index 4c04054ef..eac5c060e 100644 --- a/sdk/dir-js/src/models/agntcy/dir/routing/v1/record_query_pb.js +++ b/sdk/dir-js/src/models/agntcy/dir/routing/v1/record_query_pb.js @@ -1,36 +1,36 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/routing/v1/record_query.proto (package agntcy.dir.routing.v1, syntax proto3) -/* eslint-disable */ - -import { enumDesc, fileDesc, messageDesc, tsEnum } from "@bufbuild/protobuf/codegenv2"; - -/** - * Describes the file agntcy/dir/routing/v1/record_query.proto. - */ -export const file_agntcy_dir_routing_v1_record_query = /*@__PURE__*/ - fileDesc("CihhZ250Y3kvZGlyL3JvdXRpbmcvdjEvcmVjb3JkX3F1ZXJ5LnByb3RvEhVhZ250Y3kuZGlyLnJvdXRpbmcudjEiUgoLUmVjb3JkUXVlcnkSNAoEdHlwZRgBIAEoDjImLmFnbnRjeS5kaXIucm91dGluZy52MS5SZWNvcmRRdWVyeVR5cGUSDQoFdmFsdWUYAiABKAkqrAEKD1JlY29yZFF1ZXJ5VHlwZRIhCh1SRUNPUkRfUVVFUllfVFlQRV9VTlNQRUNJRklFRBAAEhsKF1JFQ09SRF9RVUVSWV9UWVBFX1NLSUxMEAESHQoZUkVDT1JEX1FVRVJZX1RZUEVfTE9DQVRPUhACEhwKGFJFQ09SRF9RVUVSWV9UWVBFX0RPTUFJThADEhwKGFJFQ09SRF9RVUVSWV9UWVBFX01PRFVMRRAEQsoBChljb20uYWdudGN5LmRpci5yb3V0aW5nLnYxQhBSZWNvcmRRdWVyeVByb3RvUAFaJGdpdGh1Yi5jb20vYWdudGN5L2Rpci9hcGkvcm91dGluZy92MaICA0FEUqoCFUFnbnRjeS5EaXIuUm91dGluZy5WMcoCFUFnbnRjeVxEaXJcUm91dGluZ1xWMeICIUFnbnRjeVxEaXJcUm91dGluZ1xWMVxHUEJNZXRhZGF0YeoCGEFnbnRjeTo6RGlyOjpSb3V0aW5nOjpWMWIGcHJvdG8z"); - -/** - * Describes the message agntcy.dir.routing.v1.RecordQuery. - * Use `create(RecordQuerySchema)` to create a new message. - */ -export const RecordQuerySchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_routing_v1_record_query, 0); - -/** - * Describes the enum agntcy.dir.routing.v1.RecordQueryType. - */ -export const RecordQueryTypeSchema = /*@__PURE__*/ - enumDesc(file_agntcy_dir_routing_v1_record_query, 0); - -/** - * Defines a list of supported record query types. - * - * @generated from enum agntcy.dir.routing.v1.RecordQueryType - */ -export const RecordQueryType = /*@__PURE__*/ - tsEnum(RecordQueryTypeSchema); - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/routing/v1/record_query.proto (package agntcy.dir.routing.v1, syntax proto3) +/* eslint-disable */ + +import { enumDesc, fileDesc, messageDesc, tsEnum } from "@bufbuild/protobuf/codegenv2"; + +/** + * Describes the file agntcy/dir/routing/v1/record_query.proto. + */ +export const file_agntcy_dir_routing_v1_record_query = /*@__PURE__*/ + fileDesc("CihhZ250Y3kvZGlyL3JvdXRpbmcvdjEvcmVjb3JkX3F1ZXJ5LnByb3RvEhVhZ250Y3kuZGlyLnJvdXRpbmcudjEiUgoLUmVjb3JkUXVlcnkSNAoEdHlwZRgBIAEoDjImLmFnbnRjeS5kaXIucm91dGluZy52MS5SZWNvcmRRdWVyeVR5cGUSDQoFdmFsdWUYAiABKAkqrAEKD1JlY29yZFF1ZXJ5VHlwZRIhCh1SRUNPUkRfUVVFUllfVFlQRV9VTlNQRUNJRklFRBAAEhsKF1JFQ09SRF9RVUVSWV9UWVBFX1NLSUxMEAESHQoZUkVDT1JEX1FVRVJZX1RZUEVfTE9DQVRPUhACEhwKGFJFQ09SRF9RVUVSWV9UWVBFX0RPTUFJThADEhwKGFJFQ09SRF9RVUVSWV9UWVBFX01PRFVMRRAEQsoBChljb20uYWdudGN5LmRpci5yb3V0aW5nLnYxQhBSZWNvcmRRdWVyeVByb3RvUAFaJGdpdGh1Yi5jb20vYWdudGN5L2Rpci9hcGkvcm91dGluZy92MaICA0FEUqoCFUFnbnRjeS5EaXIuUm91dGluZy5WMcoCFUFnbnRjeVxEaXJcUm91dGluZ1xWMeICIUFnbnRjeVxEaXJcUm91dGluZ1xWMVxHUEJNZXRhZGF0YeoCGEFnbnRjeTo6RGlyOjpSb3V0aW5nOjpWMWIGcHJvdG8z"); + +/** + * Describes the message agntcy.dir.routing.v1.RecordQuery. + * Use `create(RecordQuerySchema)` to create a new message. + */ +export const RecordQuerySchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_routing_v1_record_query, 0); + +/** + * Describes the enum agntcy.dir.routing.v1.RecordQueryType. + */ +export const RecordQueryTypeSchema = /*@__PURE__*/ + enumDesc(file_agntcy_dir_routing_v1_record_query, 0); + +/** + * Defines a list of supported record query types. + * + * @generated from enum agntcy.dir.routing.v1.RecordQueryType + */ +export const RecordQueryType = /*@__PURE__*/ + tsEnum(RecordQueryTypeSchema); + diff --git a/sdk/dir-js/src/models/agntcy/dir/routing/v1/routing_service_pb.d.ts b/sdk/dir-js/src/models/agntcy/dir/routing/v1/routing_service_pb.d.ts index ddf99249d..04feca44d 100644 --- a/sdk/dir-js/src/models/agntcy/dir/routing/v1/routing_service_pb.d.ts +++ b/sdk/dir-js/src/models/agntcy/dir/routing/v1/routing_service_pb.d.ts @@ -1,314 +1,314 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/routing/v1/routing_service.proto (package agntcy.dir.routing.v1, syntax proto3) -/* eslint-disable */ - -import type { GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv2"; -import type { Message } from "@bufbuild/protobuf"; -import type { RecordRef } from "../../core/v1/record_pb.js"; -import type { RecordQuery } from "../../search/v1/record_query_pb.js"; -import type { RecordQuery as RecordQuery$1 } from "./record_query_pb.js"; -import type { Peer } from "./peer_pb.js"; -import type { EmptySchema } from "@bufbuild/protobuf/wkt"; - -/** - * Describes the file agntcy/dir/routing/v1/routing_service.proto. - */ -export declare const file_agntcy_dir_routing_v1_routing_service: GenFile; - -/** - * @generated from message agntcy.dir.routing.v1.PublishRequest - */ -export declare type PublishRequest = Message<"agntcy.dir.routing.v1.PublishRequest"> & { - /** - * @generated from oneof agntcy.dir.routing.v1.PublishRequest.request - */ - request: { - /** - * References to the records to be published. - * - * @generated from field: agntcy.dir.routing.v1.RecordRefs record_refs = 1; - */ - value: RecordRefs; - case: "recordRefs"; - } | { - /** - * Queries to match against the records to be published. - * - * @generated from field: agntcy.dir.routing.v1.RecordQueries queries = 2; - */ - value: RecordQueries; - case: "queries"; - } | { case: undefined; value?: undefined }; -}; - -/** - * Describes the message agntcy.dir.routing.v1.PublishRequest. - * Use `create(PublishRequestSchema)` to create a new message. - */ -export declare const PublishRequestSchema: GenMessage; - -/** - * @generated from message agntcy.dir.routing.v1.UnpublishRequest - */ -export declare type UnpublishRequest = Message<"agntcy.dir.routing.v1.UnpublishRequest"> & { - /** - * @generated from oneof agntcy.dir.routing.v1.UnpublishRequest.request - */ - request: { - /** - * References to the records to be unpublished. - * - * @generated from field: agntcy.dir.routing.v1.RecordRefs record_refs = 1; - */ - value: RecordRefs; - case: "recordRefs"; - } | { - /** - * Queries to match against the records to be unpublished. - * - * @generated from field: agntcy.dir.routing.v1.RecordQueries queries = 2; - */ - value: RecordQueries; - case: "queries"; - } | { case: undefined; value?: undefined }; -}; - -/** - * Describes the message agntcy.dir.routing.v1.UnpublishRequest. - * Use `create(UnpublishRequestSchema)` to create a new message. - */ -export declare const UnpublishRequestSchema: GenMessage; - -/** - * @generated from message agntcy.dir.routing.v1.RecordRefs - */ -export declare type RecordRefs = Message<"agntcy.dir.routing.v1.RecordRefs"> & { - /** - * @generated from field: repeated agntcy.dir.core.v1.RecordRef refs = 1; - */ - refs: RecordRef[]; -}; - -/** - * Describes the message agntcy.dir.routing.v1.RecordRefs. - * Use `create(RecordRefsSchema)` to create a new message. - */ -export declare const RecordRefsSchema: GenMessage; - -/** - * @generated from message agntcy.dir.routing.v1.RecordQueries - */ -export declare type RecordQueries = Message<"agntcy.dir.routing.v1.RecordQueries"> & { - /** - * @generated from field: repeated agntcy.dir.search.v1.RecordQuery queries = 1; - */ - queries: RecordQuery[]; -}; - -/** - * Describes the message agntcy.dir.routing.v1.RecordQueries. - * Use `create(RecordQueriesSchema)` to create a new message. - */ -export declare const RecordQueriesSchema: GenMessage; - -/** - * @generated from message agntcy.dir.routing.v1.SearchRequest - */ -export declare type SearchRequest = Message<"agntcy.dir.routing.v1.SearchRequest"> & { - /** - * List of queries to match against the records. - * - * @generated from field: repeated agntcy.dir.routing.v1.RecordQuery queries = 1; - */ - queries: RecordQuery$1[]; - - /** - * Minimal target query match score. - * For example, if min_match_score=2, it will return records that match - * at least two of the queries. - * If not set, it will return records that match at least one query. - * - * @generated from field: optional uint32 min_match_score = 2; - */ - minMatchScore?: number; - - /** - * Limit the number of results returned. - * If not set, it will return all discovered records. - * Note that this is a soft limit, as the search may return more results - * than the limit if there are multiple peers providing the same record. - * - * @generated from field: optional uint32 limit = 3; - */ - limit?: number; -}; - -/** - * Describes the message agntcy.dir.routing.v1.SearchRequest. - * Use `create(SearchRequestSchema)` to create a new message. - */ -export declare const SearchRequestSchema: GenMessage; - -/** - * @generated from message agntcy.dir.routing.v1.SearchResponse - */ -export declare type SearchResponse = Message<"agntcy.dir.routing.v1.SearchResponse"> & { - /** - * The record that matches the search query. - * - * @generated from field: agntcy.dir.core.v1.RecordRef record_ref = 1; - */ - recordRef?: RecordRef; - - /** - * The peer that provided the record. - * - * @generated from field: agntcy.dir.routing.v1.Peer peer = 2; - */ - peer?: Peer; - - /** - * The queries that were matched. - * - * @generated from field: repeated agntcy.dir.routing.v1.RecordQuery match_queries = 3; - */ - matchQueries: RecordQuery$1[]; - - /** - * The score of the search match. - * - * @generated from field: uint32 match_score = 4; - */ - matchScore: number; -}; - -/** - * Describes the message agntcy.dir.routing.v1.SearchResponse. - * Use `create(SearchResponseSchema)` to create a new message. - */ -export declare const SearchResponseSchema: GenMessage; - -/** - * @generated from message agntcy.dir.routing.v1.ListRequest - */ -export declare type ListRequest = Message<"agntcy.dir.routing.v1.ListRequest"> & { - /** - * List of queries to match against the records. - * If set, all queries must match for the record to be returned. - * - * @generated from field: repeated agntcy.dir.routing.v1.RecordQuery queries = 1; - */ - queries: RecordQuery$1[]; - - /** - * Limit the number of results returned. - * If not set, it will return all records that this peer is providing. - * - * @generated from field: optional uint32 limit = 2; - */ - limit?: number; -}; - -/** - * Describes the message agntcy.dir.routing.v1.ListRequest. - * Use `create(ListRequestSchema)` to create a new message. - */ -export declare const ListRequestSchema: GenMessage; - -/** - * @generated from message agntcy.dir.routing.v1.ListResponse - */ -export declare type ListResponse = Message<"agntcy.dir.routing.v1.ListResponse"> & { - /** - * The record that matches the list queries. - * - * @generated from field: agntcy.dir.core.v1.RecordRef record_ref = 1; - */ - recordRef?: RecordRef; - - /** - * Labels associated with this record (skills, domains, modules) - * Derived from the record content for CLI display purposes - * - * @generated from field: repeated string labels = 2; - */ - labels: string[]; -}; - -/** - * Describes the message agntcy.dir.routing.v1.ListResponse. - * Use `create(ListResponseSchema)` to create a new message. - */ -export declare const ListResponseSchema: GenMessage; - -/** - * Defines an interface for announcement and discovery - * of records across interconnected network. - * - * Middleware should be used to control who can perform these RPCs. - * Policies for the middleware can be handled via separate service. - * - * @generated from service agntcy.dir.routing.v1.RoutingService - */ -export declare const RoutingService: GenService<{ - /** - * Announce to the network that this peer is providing a given record. - * This enables other peers to discover this record and retrieve it - * from this peer. Listeners can use this event to perform custom operations, - * for example by cloning the record. - * - * Items need to be periodically republished (eg. 24h) to the network - * to avoid stale data. Republication should be done in the background. - * - * @generated from rpc agntcy.dir.routing.v1.RoutingService.Publish - */ - publish: { - methodKind: "unary"; - input: typeof PublishRequestSchema; - output: typeof EmptySchema; - }, - /** - * Stop serving this record to the network. If other peers try - * to retrieve this record, the peer will refuse the request. - * - * @generated from rpc agntcy.dir.routing.v1.RoutingService.Unpublish - */ - unpublish: { - methodKind: "unary"; - input: typeof UnpublishRequestSchema; - output: typeof EmptySchema; - }, - /** - * Search records based on the request across the network. - * This will search the network for the record with the given parameters. - * - * It is possible that the records are stale or that they do not exist. - * Some records may be provided by multiple peers. - * - * Results from the search can be used as an input - * to Pull operation to retrieve the records. - * - * @generated from rpc agntcy.dir.routing.v1.RoutingService.Search - */ - search: { - methodKind: "server_streaming"; - input: typeof SearchRequestSchema; - output: typeof SearchResponseSchema; - }, - /** - * List all records that this peer is currently providing - * that match the given parameters. - * This operation does not interact with the network. - * - * @generated from rpc agntcy.dir.routing.v1.RoutingService.List - */ - list: { - methodKind: "server_streaming"; - input: typeof ListRequestSchema; - output: typeof ListResponseSchema; - }, -}>; - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/routing/v1/routing_service.proto (package agntcy.dir.routing.v1, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv2"; +import type { Message } from "@bufbuild/protobuf"; +import type { RecordRef } from "../../core/v1/record_pb.js"; +import type { RecordQuery } from "../../search/v1/record_query_pb.js"; +import type { RecordQuery as RecordQuery$1 } from "./record_query_pb.js"; +import type { Peer } from "./peer_pb.js"; +import type { EmptySchema } from "@bufbuild/protobuf/wkt"; + +/** + * Describes the file agntcy/dir/routing/v1/routing_service.proto. + */ +export declare const file_agntcy_dir_routing_v1_routing_service: GenFile; + +/** + * @generated from message agntcy.dir.routing.v1.PublishRequest + */ +export declare type PublishRequest = Message<"agntcy.dir.routing.v1.PublishRequest"> & { + /** + * @generated from oneof agntcy.dir.routing.v1.PublishRequest.request + */ + request: { + /** + * References to the records to be published. + * + * @generated from field: agntcy.dir.routing.v1.RecordRefs record_refs = 1; + */ + value: RecordRefs; + case: "recordRefs"; + } | { + /** + * Queries to match against the records to be published. + * + * @generated from field: agntcy.dir.routing.v1.RecordQueries queries = 2; + */ + value: RecordQueries; + case: "queries"; + } | { case: undefined; value?: undefined }; +}; + +/** + * Describes the message agntcy.dir.routing.v1.PublishRequest. + * Use `create(PublishRequestSchema)` to create a new message. + */ +export declare const PublishRequestSchema: GenMessage; + +/** + * @generated from message agntcy.dir.routing.v1.UnpublishRequest + */ +export declare type UnpublishRequest = Message<"agntcy.dir.routing.v1.UnpublishRequest"> & { + /** + * @generated from oneof agntcy.dir.routing.v1.UnpublishRequest.request + */ + request: { + /** + * References to the records to be unpublished. + * + * @generated from field: agntcy.dir.routing.v1.RecordRefs record_refs = 1; + */ + value: RecordRefs; + case: "recordRefs"; + } | { + /** + * Queries to match against the records to be unpublished. + * + * @generated from field: agntcy.dir.routing.v1.RecordQueries queries = 2; + */ + value: RecordQueries; + case: "queries"; + } | { case: undefined; value?: undefined }; +}; + +/** + * Describes the message agntcy.dir.routing.v1.UnpublishRequest. + * Use `create(UnpublishRequestSchema)` to create a new message. + */ +export declare const UnpublishRequestSchema: GenMessage; + +/** + * @generated from message agntcy.dir.routing.v1.RecordRefs + */ +export declare type RecordRefs = Message<"agntcy.dir.routing.v1.RecordRefs"> & { + /** + * @generated from field: repeated agntcy.dir.core.v1.RecordRef refs = 1; + */ + refs: RecordRef[]; +}; + +/** + * Describes the message agntcy.dir.routing.v1.RecordRefs. + * Use `create(RecordRefsSchema)` to create a new message. + */ +export declare const RecordRefsSchema: GenMessage; + +/** + * @generated from message agntcy.dir.routing.v1.RecordQueries + */ +export declare type RecordQueries = Message<"agntcy.dir.routing.v1.RecordQueries"> & { + /** + * @generated from field: repeated agntcy.dir.search.v1.RecordQuery queries = 1; + */ + queries: RecordQuery[]; +}; + +/** + * Describes the message agntcy.dir.routing.v1.RecordQueries. + * Use `create(RecordQueriesSchema)` to create a new message. + */ +export declare const RecordQueriesSchema: GenMessage; + +/** + * @generated from message agntcy.dir.routing.v1.SearchRequest + */ +export declare type SearchRequest = Message<"agntcy.dir.routing.v1.SearchRequest"> & { + /** + * List of queries to match against the records. + * + * @generated from field: repeated agntcy.dir.routing.v1.RecordQuery queries = 1; + */ + queries: RecordQuery$1[]; + + /** + * Minimal target query match score. + * For example, if min_match_score=2, it will return records that match + * at least two of the queries. + * If not set, it will return records that match at least one query. + * + * @generated from field: optional uint32 min_match_score = 2; + */ + minMatchScore?: number; + + /** + * Limit the number of results returned. + * If not set, it will return all discovered records. + * Note that this is a soft limit, as the search may return more results + * than the limit if there are multiple peers providing the same record. + * + * @generated from field: optional uint32 limit = 3; + */ + limit?: number; +}; + +/** + * Describes the message agntcy.dir.routing.v1.SearchRequest. + * Use `create(SearchRequestSchema)` to create a new message. + */ +export declare const SearchRequestSchema: GenMessage; + +/** + * @generated from message agntcy.dir.routing.v1.SearchResponse + */ +export declare type SearchResponse = Message<"agntcy.dir.routing.v1.SearchResponse"> & { + /** + * The record that matches the search query. + * + * @generated from field: agntcy.dir.core.v1.RecordRef record_ref = 1; + */ + recordRef?: RecordRef; + + /** + * The peer that provided the record. + * + * @generated from field: agntcy.dir.routing.v1.Peer peer = 2; + */ + peer?: Peer; + + /** + * The queries that were matched. + * + * @generated from field: repeated agntcy.dir.routing.v1.RecordQuery match_queries = 3; + */ + matchQueries: RecordQuery$1[]; + + /** + * The score of the search match. + * + * @generated from field: uint32 match_score = 4; + */ + matchScore: number; +}; + +/** + * Describes the message agntcy.dir.routing.v1.SearchResponse. + * Use `create(SearchResponseSchema)` to create a new message. + */ +export declare const SearchResponseSchema: GenMessage; + +/** + * @generated from message agntcy.dir.routing.v1.ListRequest + */ +export declare type ListRequest = Message<"agntcy.dir.routing.v1.ListRequest"> & { + /** + * List of queries to match against the records. + * If set, all queries must match for the record to be returned. + * + * @generated from field: repeated agntcy.dir.routing.v1.RecordQuery queries = 1; + */ + queries: RecordQuery$1[]; + + /** + * Limit the number of results returned. + * If not set, it will return all records that this peer is providing. + * + * @generated from field: optional uint32 limit = 2; + */ + limit?: number; +}; + +/** + * Describes the message agntcy.dir.routing.v1.ListRequest. + * Use `create(ListRequestSchema)` to create a new message. + */ +export declare const ListRequestSchema: GenMessage; + +/** + * @generated from message agntcy.dir.routing.v1.ListResponse + */ +export declare type ListResponse = Message<"agntcy.dir.routing.v1.ListResponse"> & { + /** + * The record that matches the list queries. + * + * @generated from field: agntcy.dir.core.v1.RecordRef record_ref = 1; + */ + recordRef?: RecordRef; + + /** + * Labels associated with this record (skills, domains, modules) + * Derived from the record content for CLI display purposes + * + * @generated from field: repeated string labels = 2; + */ + labels: string[]; +}; + +/** + * Describes the message agntcy.dir.routing.v1.ListResponse. + * Use `create(ListResponseSchema)` to create a new message. + */ +export declare const ListResponseSchema: GenMessage; + +/** + * Defines an interface for announcement and discovery + * of records across interconnected network. + * + * Middleware should be used to control who can perform these RPCs. + * Policies for the middleware can be handled via separate service. + * + * @generated from service agntcy.dir.routing.v1.RoutingService + */ +export declare const RoutingService: GenService<{ + /** + * Announce to the network that this peer is providing a given record. + * This enables other peers to discover this record and retrieve it + * from this peer. Listeners can use this event to perform custom operations, + * for example by cloning the record. + * + * Items need to be periodically republished (eg. 24h) to the network + * to avoid stale data. Republication should be done in the background. + * + * @generated from rpc agntcy.dir.routing.v1.RoutingService.Publish + */ + publish: { + methodKind: "unary"; + input: typeof PublishRequestSchema; + output: typeof EmptySchema; + }, + /** + * Stop serving this record to the network. If other peers try + * to retrieve this record, the peer will refuse the request. + * + * @generated from rpc agntcy.dir.routing.v1.RoutingService.Unpublish + */ + unpublish: { + methodKind: "unary"; + input: typeof UnpublishRequestSchema; + output: typeof EmptySchema; + }, + /** + * Search records based on the request across the network. + * This will search the network for the record with the given parameters. + * + * It is possible that the records are stale or that they do not exist. + * Some records may be provided by multiple peers. + * + * Results from the search can be used as an input + * to Pull operation to retrieve the records. + * + * @generated from rpc agntcy.dir.routing.v1.RoutingService.Search + */ + search: { + methodKind: "server_streaming"; + input: typeof SearchRequestSchema; + output: typeof SearchResponseSchema; + }, + /** + * List all records that this peer is currently providing + * that match the given parameters. + * This operation does not interact with the network. + * + * @generated from rpc agntcy.dir.routing.v1.RoutingService.List + */ + list: { + methodKind: "server_streaming"; + input: typeof ListRequestSchema; + output: typeof ListResponseSchema; + }, +}>; + diff --git a/sdk/dir-js/src/models/agntcy/dir/routing/v1/routing_service_pb.js b/sdk/dir-js/src/models/agntcy/dir/routing/v1/routing_service_pb.js index 882a25ac6..ac42075b1 100644 --- a/sdk/dir-js/src/models/agntcy/dir/routing/v1/routing_service_pb.js +++ b/sdk/dir-js/src/models/agntcy/dir/routing/v1/routing_service_pb.js @@ -1,88 +1,88 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/routing/v1/routing_service.proto (package agntcy.dir.routing.v1, syntax proto3) -/* eslint-disable */ - -import { fileDesc, messageDesc, serviceDesc } from "@bufbuild/protobuf/codegenv2"; -import { file_agntcy_dir_core_v1_record } from "../../core/v1/record_pb.js"; -import { file_agntcy_dir_routing_v1_peer } from "./peer_pb.js"; -import { file_agntcy_dir_routing_v1_record_query } from "./record_query_pb.js"; -import { file_agntcy_dir_search_v1_record_query } from "../../search/v1/record_query_pb.js"; -import { file_google_protobuf_empty } from "@bufbuild/protobuf/wkt"; - -/** - * Describes the file agntcy/dir/routing/v1/routing_service.proto. - */ -export const file_agntcy_dir_routing_v1_routing_service = /*@__PURE__*/ - fileDesc("CithZ250Y3kvZGlyL3JvdXRpbmcvdjEvcm91dGluZ19zZXJ2aWNlLnByb3RvEhVhZ250Y3kuZGlyLnJvdXRpbmcudjEijgEKDlB1Ymxpc2hSZXF1ZXN0EjgKC3JlY29yZF9yZWZzGAEgASgLMiEuYWdudGN5LmRpci5yb3V0aW5nLnYxLlJlY29yZFJlZnNIABI3CgdxdWVyaWVzGAIgASgLMiQuYWdudGN5LmRpci5yb3V0aW5nLnYxLlJlY29yZFF1ZXJpZXNIAEIJCgdyZXF1ZXN0IpABChBVbnB1Ymxpc2hSZXF1ZXN0EjgKC3JlY29yZF9yZWZzGAEgASgLMiEuYWdudGN5LmRpci5yb3V0aW5nLnYxLlJlY29yZFJlZnNIABI3CgdxdWVyaWVzGAIgASgLMiQuYWdudGN5LmRpci5yb3V0aW5nLnYxLlJlY29yZFF1ZXJpZXNIAEIJCgdyZXF1ZXN0IjkKClJlY29yZFJlZnMSKwoEcmVmcxgBIAMoCzIdLmFnbnRjeS5kaXIuY29yZS52MS5SZWNvcmRSZWYiQwoNUmVjb3JkUXVlcmllcxIyCgdxdWVyaWVzGAEgAygLMiEuYWdudGN5LmRpci5zZWFyY2gudjEuUmVjb3JkUXVlcnkilAEKDVNlYXJjaFJlcXVlc3QSMwoHcXVlcmllcxgBIAMoCzIiLmFnbnRjeS5kaXIucm91dGluZy52MS5SZWNvcmRRdWVyeRIcCg9taW5fbWF0Y2hfc2NvcmUYAiABKA1IAIgBARISCgVsaW1pdBgDIAEoDUgBiAEBQhIKEF9taW5fbWF0Y2hfc2NvcmVCCAoGX2xpbWl0Ir4BCg5TZWFyY2hSZXNwb25zZRIxCgpyZWNvcmRfcmVmGAEgASgLMh0uYWdudGN5LmRpci5jb3JlLnYxLlJlY29yZFJlZhIpCgRwZWVyGAIgASgLMhsuYWdudGN5LmRpci5yb3V0aW5nLnYxLlBlZXISOQoNbWF0Y2hfcXVlcmllcxgDIAMoCzIiLmFnbnRjeS5kaXIucm91dGluZy52MS5SZWNvcmRRdWVyeRITCgttYXRjaF9zY29yZRgEIAEoDSJgCgtMaXN0UmVxdWVzdBIzCgdxdWVyaWVzGAEgAygLMiIuYWdudGN5LmRpci5yb3V0aW5nLnYxLlJlY29yZFF1ZXJ5EhIKBWxpbWl0GAIgASgNSACIAQFCCAoGX2xpbWl0IlEKDExpc3RSZXNwb25zZRIxCgpyZWNvcmRfcmVmGAEgASgLMh0uYWdudGN5LmRpci5jb3JlLnYxLlJlY29yZFJlZhIOCgZsYWJlbHMYAiADKAky1AIKDlJvdXRpbmdTZXJ2aWNlEkgKB1B1Ymxpc2gSJS5hZ250Y3kuZGlyLnJvdXRpbmcudjEuUHVibGlzaFJlcXVlc3QaFi5nb29nbGUucHJvdG9idWYuRW1wdHkSTAoJVW5wdWJsaXNoEicuYWdudGN5LmRpci5yb3V0aW5nLnYxLlVucHVibGlzaFJlcXVlc3QaFi5nb29nbGUucHJvdG9idWYuRW1wdHkSVwoGU2VhcmNoEiQuYWdudGN5LmRpci5yb3V0aW5nLnYxLlNlYXJjaFJlcXVlc3QaJS5hZ250Y3kuZGlyLnJvdXRpbmcudjEuU2VhcmNoUmVzcG9uc2UwARJRCgRMaXN0EiIuYWdudGN5LmRpci5yb3V0aW5nLnYxLkxpc3RSZXF1ZXN0GiMuYWdudGN5LmRpci5yb3V0aW5nLnYxLkxpc3RSZXNwb25zZTABQs0BChljb20uYWdudGN5LmRpci5yb3V0aW5nLnYxQhNSb3V0aW5nU2VydmljZVByb3RvUAFaJGdpdGh1Yi5jb20vYWdudGN5L2Rpci9hcGkvcm91dGluZy92MaICA0FEUqoCFUFnbnRjeS5EaXIuUm91dGluZy5WMcoCFUFnbnRjeVxEaXJcUm91dGluZ1xWMeICIUFnbnRjeVxEaXJcUm91dGluZ1xWMVxHUEJNZXRhZGF0YeoCGEFnbnRjeTo6RGlyOjpSb3V0aW5nOjpWMWIGcHJvdG8z", [file_agntcy_dir_core_v1_record, file_agntcy_dir_routing_v1_peer, file_agntcy_dir_routing_v1_record_query, file_agntcy_dir_search_v1_record_query, file_google_protobuf_empty]); - -/** - * Describes the message agntcy.dir.routing.v1.PublishRequest. - * Use `create(PublishRequestSchema)` to create a new message. - */ -export const PublishRequestSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_routing_v1_routing_service, 0); - -/** - * Describes the message agntcy.dir.routing.v1.UnpublishRequest. - * Use `create(UnpublishRequestSchema)` to create a new message. - */ -export const UnpublishRequestSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_routing_v1_routing_service, 1); - -/** - * Describes the message agntcy.dir.routing.v1.RecordRefs. - * Use `create(RecordRefsSchema)` to create a new message. - */ -export const RecordRefsSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_routing_v1_routing_service, 2); - -/** - * Describes the message agntcy.dir.routing.v1.RecordQueries. - * Use `create(RecordQueriesSchema)` to create a new message. - */ -export const RecordQueriesSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_routing_v1_routing_service, 3); - -/** - * Describes the message agntcy.dir.routing.v1.SearchRequest. - * Use `create(SearchRequestSchema)` to create a new message. - */ -export const SearchRequestSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_routing_v1_routing_service, 4); - -/** - * Describes the message agntcy.dir.routing.v1.SearchResponse. - * Use `create(SearchResponseSchema)` to create a new message. - */ -export const SearchResponseSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_routing_v1_routing_service, 5); - -/** - * Describes the message agntcy.dir.routing.v1.ListRequest. - * Use `create(ListRequestSchema)` to create a new message. - */ -export const ListRequestSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_routing_v1_routing_service, 6); - -/** - * Describes the message agntcy.dir.routing.v1.ListResponse. - * Use `create(ListResponseSchema)` to create a new message. - */ -export const ListResponseSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_routing_v1_routing_service, 7); - -/** - * Defines an interface for announcement and discovery - * of records across interconnected network. - * - * Middleware should be used to control who can perform these RPCs. - * Policies for the middleware can be handled via separate service. - * - * @generated from service agntcy.dir.routing.v1.RoutingService - */ -export const RoutingService = /*@__PURE__*/ - serviceDesc(file_agntcy_dir_routing_v1_routing_service, 0); - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/routing/v1/routing_service.proto (package agntcy.dir.routing.v1, syntax proto3) +/* eslint-disable */ + +import { fileDesc, messageDesc, serviceDesc } from "@bufbuild/protobuf/codegenv2"; +import { file_agntcy_dir_core_v1_record } from "../../core/v1/record_pb.js"; +import { file_agntcy_dir_routing_v1_peer } from "./peer_pb.js"; +import { file_agntcy_dir_routing_v1_record_query } from "./record_query_pb.js"; +import { file_agntcy_dir_search_v1_record_query } from "../../search/v1/record_query_pb.js"; +import { file_google_protobuf_empty } from "@bufbuild/protobuf/wkt"; + +/** + * Describes the file agntcy/dir/routing/v1/routing_service.proto. + */ +export const file_agntcy_dir_routing_v1_routing_service = /*@__PURE__*/ + fileDesc("CithZ250Y3kvZGlyL3JvdXRpbmcvdjEvcm91dGluZ19zZXJ2aWNlLnByb3RvEhVhZ250Y3kuZGlyLnJvdXRpbmcudjEijgEKDlB1Ymxpc2hSZXF1ZXN0EjgKC3JlY29yZF9yZWZzGAEgASgLMiEuYWdudGN5LmRpci5yb3V0aW5nLnYxLlJlY29yZFJlZnNIABI3CgdxdWVyaWVzGAIgASgLMiQuYWdudGN5LmRpci5yb3V0aW5nLnYxLlJlY29yZFF1ZXJpZXNIAEIJCgdyZXF1ZXN0IpABChBVbnB1Ymxpc2hSZXF1ZXN0EjgKC3JlY29yZF9yZWZzGAEgASgLMiEuYWdudGN5LmRpci5yb3V0aW5nLnYxLlJlY29yZFJlZnNIABI3CgdxdWVyaWVzGAIgASgLMiQuYWdudGN5LmRpci5yb3V0aW5nLnYxLlJlY29yZFF1ZXJpZXNIAEIJCgdyZXF1ZXN0IjkKClJlY29yZFJlZnMSKwoEcmVmcxgBIAMoCzIdLmFnbnRjeS5kaXIuY29yZS52MS5SZWNvcmRSZWYiQwoNUmVjb3JkUXVlcmllcxIyCgdxdWVyaWVzGAEgAygLMiEuYWdudGN5LmRpci5zZWFyY2gudjEuUmVjb3JkUXVlcnkilAEKDVNlYXJjaFJlcXVlc3QSMwoHcXVlcmllcxgBIAMoCzIiLmFnbnRjeS5kaXIucm91dGluZy52MS5SZWNvcmRRdWVyeRIcCg9taW5fbWF0Y2hfc2NvcmUYAiABKA1IAIgBARISCgVsaW1pdBgDIAEoDUgBiAEBQhIKEF9taW5fbWF0Y2hfc2NvcmVCCAoGX2xpbWl0Ir4BCg5TZWFyY2hSZXNwb25zZRIxCgpyZWNvcmRfcmVmGAEgASgLMh0uYWdudGN5LmRpci5jb3JlLnYxLlJlY29yZFJlZhIpCgRwZWVyGAIgASgLMhsuYWdudGN5LmRpci5yb3V0aW5nLnYxLlBlZXISOQoNbWF0Y2hfcXVlcmllcxgDIAMoCzIiLmFnbnRjeS5kaXIucm91dGluZy52MS5SZWNvcmRRdWVyeRITCgttYXRjaF9zY29yZRgEIAEoDSJgCgtMaXN0UmVxdWVzdBIzCgdxdWVyaWVzGAEgAygLMiIuYWdudGN5LmRpci5yb3V0aW5nLnYxLlJlY29yZFF1ZXJ5EhIKBWxpbWl0GAIgASgNSACIAQFCCAoGX2xpbWl0IlEKDExpc3RSZXNwb25zZRIxCgpyZWNvcmRfcmVmGAEgASgLMh0uYWdudGN5LmRpci5jb3JlLnYxLlJlY29yZFJlZhIOCgZsYWJlbHMYAiADKAky1AIKDlJvdXRpbmdTZXJ2aWNlEkgKB1B1Ymxpc2gSJS5hZ250Y3kuZGlyLnJvdXRpbmcudjEuUHVibGlzaFJlcXVlc3QaFi5nb29nbGUucHJvdG9idWYuRW1wdHkSTAoJVW5wdWJsaXNoEicuYWdudGN5LmRpci5yb3V0aW5nLnYxLlVucHVibGlzaFJlcXVlc3QaFi5nb29nbGUucHJvdG9idWYuRW1wdHkSVwoGU2VhcmNoEiQuYWdudGN5LmRpci5yb3V0aW5nLnYxLlNlYXJjaFJlcXVlc3QaJS5hZ250Y3kuZGlyLnJvdXRpbmcudjEuU2VhcmNoUmVzcG9uc2UwARJRCgRMaXN0EiIuYWdudGN5LmRpci5yb3V0aW5nLnYxLkxpc3RSZXF1ZXN0GiMuYWdudGN5LmRpci5yb3V0aW5nLnYxLkxpc3RSZXNwb25zZTABQs0BChljb20uYWdudGN5LmRpci5yb3V0aW5nLnYxQhNSb3V0aW5nU2VydmljZVByb3RvUAFaJGdpdGh1Yi5jb20vYWdudGN5L2Rpci9hcGkvcm91dGluZy92MaICA0FEUqoCFUFnbnRjeS5EaXIuUm91dGluZy5WMcoCFUFnbnRjeVxEaXJcUm91dGluZ1xWMeICIUFnbnRjeVxEaXJcUm91dGluZ1xWMVxHUEJNZXRhZGF0YeoCGEFnbnRjeTo6RGlyOjpSb3V0aW5nOjpWMWIGcHJvdG8z", [file_agntcy_dir_core_v1_record, file_agntcy_dir_routing_v1_peer, file_agntcy_dir_routing_v1_record_query, file_agntcy_dir_search_v1_record_query, file_google_protobuf_empty]); + +/** + * Describes the message agntcy.dir.routing.v1.PublishRequest. + * Use `create(PublishRequestSchema)` to create a new message. + */ +export const PublishRequestSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_routing_v1_routing_service, 0); + +/** + * Describes the message agntcy.dir.routing.v1.UnpublishRequest. + * Use `create(UnpublishRequestSchema)` to create a new message. + */ +export const UnpublishRequestSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_routing_v1_routing_service, 1); + +/** + * Describes the message agntcy.dir.routing.v1.RecordRefs. + * Use `create(RecordRefsSchema)` to create a new message. + */ +export const RecordRefsSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_routing_v1_routing_service, 2); + +/** + * Describes the message agntcy.dir.routing.v1.RecordQueries. + * Use `create(RecordQueriesSchema)` to create a new message. + */ +export const RecordQueriesSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_routing_v1_routing_service, 3); + +/** + * Describes the message agntcy.dir.routing.v1.SearchRequest. + * Use `create(SearchRequestSchema)` to create a new message. + */ +export const SearchRequestSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_routing_v1_routing_service, 4); + +/** + * Describes the message agntcy.dir.routing.v1.SearchResponse. + * Use `create(SearchResponseSchema)` to create a new message. + */ +export const SearchResponseSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_routing_v1_routing_service, 5); + +/** + * Describes the message agntcy.dir.routing.v1.ListRequest. + * Use `create(ListRequestSchema)` to create a new message. + */ +export const ListRequestSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_routing_v1_routing_service, 6); + +/** + * Describes the message agntcy.dir.routing.v1.ListResponse. + * Use `create(ListResponseSchema)` to create a new message. + */ +export const ListResponseSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_routing_v1_routing_service, 7); + +/** + * Defines an interface for announcement and discovery + * of records across interconnected network. + * + * Middleware should be used to control who can perform these RPCs. + * Policies for the middleware can be handled via separate service. + * + * @generated from service agntcy.dir.routing.v1.RoutingService + */ +export const RoutingService = /*@__PURE__*/ + serviceDesc(file_agntcy_dir_routing_v1_routing_service, 0); + diff --git a/sdk/dir-js/src/models/agntcy/dir/search/v1/record_query_pb.d.ts b/sdk/dir-js/src/models/agntcy/dir/search/v1/record_query_pb.d.ts index 1a229398c..1c54bd943 100644 --- a/sdk/dir-js/src/models/agntcy/dir/search/v1/record_query_pb.d.ts +++ b/sdk/dir-js/src/models/agntcy/dir/search/v1/record_query_pb.d.ts @@ -1,168 +1,168 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/search/v1/record_query.proto (package agntcy.dir.search.v1, syntax proto3) -/* eslint-disable */ - -import type { GenEnum, GenFile, GenMessage } from "@bufbuild/protobuf/codegenv2"; -import type { Message } from "@bufbuild/protobuf"; - -/** - * Describes the file agntcy/dir/search/v1/record_query.proto. - */ -export declare const file_agntcy_dir_search_v1_record_query: GenFile; - -/** - * A query to match the record against during discovery. - * For example: - * Exact match: { type: RECORD_QUERY_TYPE_NAME, value: "my-agent" } - * Wildcard match: { type: RECORD_QUERY_TYPE_NAME, value: "web*" } - * Pattern match: { type: RECORD_QUERY_TYPE_SKILL_NAME, value: "*machine*learning*" } - * Question mark: { type: RECORD_QUERY_TYPE_VERSION, value: "v1.0.?" } - * List wildcards: { type: RECORD_QUERY_TYPE_NAME, value: "agent-[0-9]" } - * Complex match: { type: RECORD_QUERY_TYPE_LOCATOR, value: "docker-image:https://*.example.com/*" } - * - * @generated from message agntcy.dir.search.v1.RecordQuery - */ -export declare type RecordQuery = Message<"agntcy.dir.search.v1.RecordQuery"> & { - /** - * The type of the query to match against. - * - * @generated from field: agntcy.dir.search.v1.RecordQueryType type = 1; - */ - type: RecordQueryType; - - /** - * The query value to match against. - * Supports wildcard patterns: - * '*' - matches zero or more characters - * '?' - matches exactly one character - * '[]' - matches any character within brackets (e.g., [0-9], [a-z], [abc]) - * - * @generated from field: string value = 2; - */ - value: string; -}; - -/** - * Describes the message agntcy.dir.search.v1.RecordQuery. - * Use `create(RecordQuerySchema)` to create a new message. - */ -export declare const RecordQuerySchema: GenMessage; - -/** - * Defines a list of supported record query types. - * - * @generated from enum agntcy.dir.search.v1.RecordQueryType - */ -export enum RecordQueryType { - /** - * Unspecified query type. - * - * @generated from enum value: RECORD_QUERY_TYPE_UNSPECIFIED = 0; - */ - UNSPECIFIED = 0, - - /** - * Query for a record name. - * Supports wildcard patterns: "web*", "*service", "api-*-v2", "???api", "agent-[0-9]" - * - * @generated from enum value: RECORD_QUERY_TYPE_NAME = 1; - */ - NAME = 1, - - /** - * Query for a record version. - * Supports wildcard patterns: "v1.*", "v2.*", "*-beta", "v1.0.?", "v[0-9].*" - * - * @generated from enum value: RECORD_QUERY_TYPE_VERSION = 2; - */ - VERSION = 2, - - /** - * Query for a skill ID. - * Numeric field - exact match only, no wildcard support. - * - * @generated from enum value: RECORD_QUERY_TYPE_SKILL_ID = 3; - */ - SKILL_ID = 3, - - /** - * Query for a skill name. - * Supports wildcard patterns: "python*", "*script", "*machine*learning*", "Pytho?", "[A-M]*" - * - * @generated from enum value: RECORD_QUERY_TYPE_SKILL_NAME = 4; - */ - SKILL_NAME = 4, - - /** - * Query for a locator type. - * Supports wildcard patterns: "http*", "ftp*", "*docker*", "[hf]tt[ps]*" - * - * @generated from enum value: RECORD_QUERY_TYPE_LOCATOR = 5; - */ - LOCATOR = 5, - - /** - * Query for a module name. - * Supports wildcard patterns: "*-plugin", "*-module", "core*", "mod-?", "plugin-[0-9]" - * - * @generated from enum value: RECORD_QUERY_TYPE_MODULE_NAME = 6; - */ - MODULE_NAME = 6, - - /** - * Query for a domain ID. - * Numeric field - exact match only, no wildcard support. - * - * @generated from enum value: RECORD_QUERY_TYPE_DOMAIN_ID = 7; - */ - DOMAIN_ID = 7, - - /** - * Query for a domain name. - * Supports wildcard patterns: "*education*", "healthcare/*", "*technology" - * - * @generated from enum value: RECORD_QUERY_TYPE_DOMAIN_NAME = 8; - */ - DOMAIN_NAME = 8, - - /** - * Query for a record's created_at timestamp. - * Supports wildcard patterns for date strings: "2025-*", ">=2025-01-01" - * - * @generated from enum value: RECORD_QUERY_TYPE_CREATED_AT = 9; - */ - CREATED_AT = 9, - - /** - * Query for a record author. - * Supports wildcard patterns: "AGNTCY*", "*@example.com", "*Team*" - * - * @generated from enum value: RECORD_QUERY_TYPE_AUTHOR = 10; - */ - AUTHOR = 10, - - /** - * Query for a schema version. - * Supports wildcard patterns: "0.7.*", "0.*", "1.0.?" - * - * @generated from enum value: RECORD_QUERY_TYPE_SCHEMA_VERSION = 11; - */ - SCHEMA_VERSION = 11, - - /** - * Query for a module ID. - * Numeric field - exact match only, no wildcard support. - * - * @generated from enum value: RECORD_QUERY_TYPE_MODULE_ID = 12; - */ - MODULE_ID = 12, -} - -/** - * Describes the enum agntcy.dir.search.v1.RecordQueryType. - */ -export declare const RecordQueryTypeSchema: GenEnum; - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/search/v1/record_query.proto (package agntcy.dir.search.v1, syntax proto3) +/* eslint-disable */ + +import type { GenEnum, GenFile, GenMessage } from "@bufbuild/protobuf/codegenv2"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file agntcy/dir/search/v1/record_query.proto. + */ +export declare const file_agntcy_dir_search_v1_record_query: GenFile; + +/** + * A query to match the record against during discovery. + * For example: + * Exact match: { type: RECORD_QUERY_TYPE_NAME, value: "my-agent" } + * Wildcard match: { type: RECORD_QUERY_TYPE_NAME, value: "web*" } + * Pattern match: { type: RECORD_QUERY_TYPE_SKILL_NAME, value: "*machine*learning*" } + * Question mark: { type: RECORD_QUERY_TYPE_VERSION, value: "v1.0.?" } + * List wildcards: { type: RECORD_QUERY_TYPE_NAME, value: "agent-[0-9]" } + * Complex match: { type: RECORD_QUERY_TYPE_LOCATOR, value: "docker-image:https://*.example.com/*" } + * + * @generated from message agntcy.dir.search.v1.RecordQuery + */ +export declare type RecordQuery = Message<"agntcy.dir.search.v1.RecordQuery"> & { + /** + * The type of the query to match against. + * + * @generated from field: agntcy.dir.search.v1.RecordQueryType type = 1; + */ + type: RecordQueryType; + + /** + * The query value to match against. + * Supports wildcard patterns: + * '*' - matches zero or more characters + * '?' - matches exactly one character + * '[]' - matches any character within brackets (e.g., [0-9], [a-z], [abc]) + * + * @generated from field: string value = 2; + */ + value: string; +}; + +/** + * Describes the message agntcy.dir.search.v1.RecordQuery. + * Use `create(RecordQuerySchema)` to create a new message. + */ +export declare const RecordQuerySchema: GenMessage; + +/** + * Defines a list of supported record query types. + * + * @generated from enum agntcy.dir.search.v1.RecordQueryType + */ +export enum RecordQueryType { + /** + * Unspecified query type. + * + * @generated from enum value: RECORD_QUERY_TYPE_UNSPECIFIED = 0; + */ + UNSPECIFIED = 0, + + /** + * Query for a record name. + * Supports wildcard patterns: "web*", "*service", "api-*-v2", "???api", "agent-[0-9]" + * + * @generated from enum value: RECORD_QUERY_TYPE_NAME = 1; + */ + NAME = 1, + + /** + * Query for a record version. + * Supports wildcard patterns: "v1.*", "v2.*", "*-beta", "v1.0.?", "v[0-9].*" + * + * @generated from enum value: RECORD_QUERY_TYPE_VERSION = 2; + */ + VERSION = 2, + + /** + * Query for a skill ID. + * Numeric field - exact match only, no wildcard support. + * + * @generated from enum value: RECORD_QUERY_TYPE_SKILL_ID = 3; + */ + SKILL_ID = 3, + + /** + * Query for a skill name. + * Supports wildcard patterns: "python*", "*script", "*machine*learning*", "Pytho?", "[A-M]*" + * + * @generated from enum value: RECORD_QUERY_TYPE_SKILL_NAME = 4; + */ + SKILL_NAME = 4, + + /** + * Query for a locator type. + * Supports wildcard patterns: "http*", "ftp*", "*docker*", "[hf]tt[ps]*" + * + * @generated from enum value: RECORD_QUERY_TYPE_LOCATOR = 5; + */ + LOCATOR = 5, + + /** + * Query for a module name. + * Supports wildcard patterns: "*-plugin", "*-module", "core*", "mod-?", "plugin-[0-9]" + * + * @generated from enum value: RECORD_QUERY_TYPE_MODULE_NAME = 6; + */ + MODULE_NAME = 6, + + /** + * Query for a domain ID. + * Numeric field - exact match only, no wildcard support. + * + * @generated from enum value: RECORD_QUERY_TYPE_DOMAIN_ID = 7; + */ + DOMAIN_ID = 7, + + /** + * Query for a domain name. + * Supports wildcard patterns: "*education*", "healthcare/*", "*technology" + * + * @generated from enum value: RECORD_QUERY_TYPE_DOMAIN_NAME = 8; + */ + DOMAIN_NAME = 8, + + /** + * Query for a record's created_at timestamp. + * Supports wildcard patterns for date strings: "2025-*", ">=2025-01-01" + * + * @generated from enum value: RECORD_QUERY_TYPE_CREATED_AT = 9; + */ + CREATED_AT = 9, + + /** + * Query for a record author. + * Supports wildcard patterns: "AGNTCY*", "*@example.com", "*Team*" + * + * @generated from enum value: RECORD_QUERY_TYPE_AUTHOR = 10; + */ + AUTHOR = 10, + + /** + * Query for a schema version. + * Supports wildcard patterns: "0.7.*", "0.*", "1.0.?" + * + * @generated from enum value: RECORD_QUERY_TYPE_SCHEMA_VERSION = 11; + */ + SCHEMA_VERSION = 11, + + /** + * Query for a module ID. + * Numeric field - exact match only, no wildcard support. + * + * @generated from enum value: RECORD_QUERY_TYPE_MODULE_ID = 12; + */ + MODULE_ID = 12, +} + +/** + * Describes the enum agntcy.dir.search.v1.RecordQueryType. + */ +export declare const RecordQueryTypeSchema: GenEnum; + diff --git a/sdk/dir-js/src/models/agntcy/dir/search/v1/record_query_pb.js b/sdk/dir-js/src/models/agntcy/dir/search/v1/record_query_pb.js index b199fc875..33e18a6a4 100644 --- a/sdk/dir-js/src/models/agntcy/dir/search/v1/record_query_pb.js +++ b/sdk/dir-js/src/models/agntcy/dir/search/v1/record_query_pb.js @@ -1,36 +1,36 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/search/v1/record_query.proto (package agntcy.dir.search.v1, syntax proto3) -/* eslint-disable */ - -import { enumDesc, fileDesc, messageDesc, tsEnum } from "@bufbuild/protobuf/codegenv2"; - -/** - * Describes the file agntcy/dir/search/v1/record_query.proto. - */ -export const file_agntcy_dir_search_v1_record_query = /*@__PURE__*/ - fileDesc("CidhZ250Y3kvZGlyL3NlYXJjaC92MS9yZWNvcmRfcXVlcnkucHJvdG8SFGFnbnRjeS5kaXIuc2VhcmNoLnYxIlEKC1JlY29yZFF1ZXJ5EjMKBHR5cGUYASABKA4yJS5hZ250Y3kuZGlyLnNlYXJjaC52MS5SZWNvcmRRdWVyeVR5cGUSDQoFdmFsdWUYAiABKAkqvgMKD1JlY29yZFF1ZXJ5VHlwZRIhCh1SRUNPUkRfUVVFUllfVFlQRV9VTlNQRUNJRklFRBAAEhoKFlJFQ09SRF9RVUVSWV9UWVBFX05BTUUQARIdChlSRUNPUkRfUVVFUllfVFlQRV9WRVJTSU9OEAISHgoaUkVDT1JEX1FVRVJZX1RZUEVfU0tJTExfSUQQAxIgChxSRUNPUkRfUVVFUllfVFlQRV9TS0lMTF9OQU1FEAQSHQoZUkVDT1JEX1FVRVJZX1RZUEVfTE9DQVRPUhAFEiEKHVJFQ09SRF9RVUVSWV9UWVBFX01PRFVMRV9OQU1FEAYSHwobUkVDT1JEX1FVRVJZX1RZUEVfRE9NQUlOX0lEEAcSIQodUkVDT1JEX1FVRVJZX1RZUEVfRE9NQUlOX05BTUUQCBIgChxSRUNPUkRfUVVFUllfVFlQRV9DUkVBVEVEX0FUEAkSHAoYUkVDT1JEX1FVRVJZX1RZUEVfQVVUSE9SEAoSJAogUkVDT1JEX1FVRVJZX1RZUEVfU0NIRU1BX1ZFUlNJT04QCxIfChtSRUNPUkRfUVVFUllfVFlQRV9NT0RVTEVfSUQQDELEAQoYY29tLmFnbnRjeS5kaXIuc2VhcmNoLnYxQhBSZWNvcmRRdWVyeVByb3RvUAFaI2dpdGh1Yi5jb20vYWdudGN5L2Rpci9hcGkvc2VhcmNoL3YxogIDQURTqgIUQWdudGN5LkRpci5TZWFyY2guVjHKAhRBZ250Y3lcRGlyXFNlYXJjaFxWMeICIEFnbnRjeVxEaXJcU2VhcmNoXFYxXEdQQk1ldGFkYXRh6gIXQWdudGN5OjpEaXI6OlNlYXJjaDo6VjFiBnByb3RvMw"); - -/** - * Describes the message agntcy.dir.search.v1.RecordQuery. - * Use `create(RecordQuerySchema)` to create a new message. - */ -export const RecordQuerySchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_search_v1_record_query, 0); - -/** - * Describes the enum agntcy.dir.search.v1.RecordQueryType. - */ -export const RecordQueryTypeSchema = /*@__PURE__*/ - enumDesc(file_agntcy_dir_search_v1_record_query, 0); - -/** - * Defines a list of supported record query types. - * - * @generated from enum agntcy.dir.search.v1.RecordQueryType - */ -export const RecordQueryType = /*@__PURE__*/ - tsEnum(RecordQueryTypeSchema); - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/search/v1/record_query.proto (package agntcy.dir.search.v1, syntax proto3) +/* eslint-disable */ + +import { enumDesc, fileDesc, messageDesc, tsEnum } from "@bufbuild/protobuf/codegenv2"; + +/** + * Describes the file agntcy/dir/search/v1/record_query.proto. + */ +export const file_agntcy_dir_search_v1_record_query = /*@__PURE__*/ + fileDesc("CidhZ250Y3kvZGlyL3NlYXJjaC92MS9yZWNvcmRfcXVlcnkucHJvdG8SFGFnbnRjeS5kaXIuc2VhcmNoLnYxIlEKC1JlY29yZFF1ZXJ5EjMKBHR5cGUYASABKA4yJS5hZ250Y3kuZGlyLnNlYXJjaC52MS5SZWNvcmRRdWVyeVR5cGUSDQoFdmFsdWUYAiABKAkqvgMKD1JlY29yZFF1ZXJ5VHlwZRIhCh1SRUNPUkRfUVVFUllfVFlQRV9VTlNQRUNJRklFRBAAEhoKFlJFQ09SRF9RVUVSWV9UWVBFX05BTUUQARIdChlSRUNPUkRfUVVFUllfVFlQRV9WRVJTSU9OEAISHgoaUkVDT1JEX1FVRVJZX1RZUEVfU0tJTExfSUQQAxIgChxSRUNPUkRfUVVFUllfVFlQRV9TS0lMTF9OQU1FEAQSHQoZUkVDT1JEX1FVRVJZX1RZUEVfTE9DQVRPUhAFEiEKHVJFQ09SRF9RVUVSWV9UWVBFX01PRFVMRV9OQU1FEAYSHwobUkVDT1JEX1FVRVJZX1RZUEVfRE9NQUlOX0lEEAcSIQodUkVDT1JEX1FVRVJZX1RZUEVfRE9NQUlOX05BTUUQCBIgChxSRUNPUkRfUVVFUllfVFlQRV9DUkVBVEVEX0FUEAkSHAoYUkVDT1JEX1FVRVJZX1RZUEVfQVVUSE9SEAoSJAogUkVDT1JEX1FVRVJZX1RZUEVfU0NIRU1BX1ZFUlNJT04QCxIfChtSRUNPUkRfUVVFUllfVFlQRV9NT0RVTEVfSUQQDELEAQoYY29tLmFnbnRjeS5kaXIuc2VhcmNoLnYxQhBSZWNvcmRRdWVyeVByb3RvUAFaI2dpdGh1Yi5jb20vYWdudGN5L2Rpci9hcGkvc2VhcmNoL3YxogIDQURTqgIUQWdudGN5LkRpci5TZWFyY2guVjHKAhRBZ250Y3lcRGlyXFNlYXJjaFxWMeICIEFnbnRjeVxEaXJcU2VhcmNoXFYxXEdQQk1ldGFkYXRh6gIXQWdudGN5OjpEaXI6OlNlYXJjaDo6VjFiBnByb3RvMw"); + +/** + * Describes the message agntcy.dir.search.v1.RecordQuery. + * Use `create(RecordQuerySchema)` to create a new message. + */ +export const RecordQuerySchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_search_v1_record_query, 0); + +/** + * Describes the enum agntcy.dir.search.v1.RecordQueryType. + */ +export const RecordQueryTypeSchema = /*@__PURE__*/ + enumDesc(file_agntcy_dir_search_v1_record_query, 0); + +/** + * Defines a list of supported record query types. + * + * @generated from enum agntcy.dir.search.v1.RecordQueryType + */ +export const RecordQueryType = /*@__PURE__*/ + tsEnum(RecordQueryTypeSchema); + diff --git a/sdk/dir-js/src/models/agntcy/dir/search/v1/search_service_pb.d.ts b/sdk/dir-js/src/models/agntcy/dir/search/v1/search_service_pb.d.ts index b77d25999..2764d0c50 100644 --- a/sdk/dir-js/src/models/agntcy/dir/search/v1/search_service_pb.d.ts +++ b/sdk/dir-js/src/models/agntcy/dir/search/v1/search_service_pb.d.ts @@ -1,147 +1,147 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/search/v1/search_service.proto (package agntcy.dir.search.v1, syntax proto3) -/* eslint-disable */ - -import type { GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv2"; -import type { Message } from "@bufbuild/protobuf"; -import type { RecordQuery } from "./record_query_pb.js"; -import type { Record } from "../../core/v1/record_pb.js"; - -/** - * Describes the file agntcy/dir/search/v1/search_service.proto. - */ -export declare const file_agntcy_dir_search_v1_search_service: GenFile; - -/** - * @generated from message agntcy.dir.search.v1.SearchCIDsRequest - */ -export declare type SearchCIDsRequest = Message<"agntcy.dir.search.v1.SearchCIDsRequest"> & { - /** - * List of queries to match against the records. - * - * @generated from field: repeated agntcy.dir.search.v1.RecordQuery queries = 1; - */ - queries: RecordQuery[]; - - /** - * Optional limit on the number of results to return. - * - * @generated from field: optional uint32 limit = 2; - */ - limit?: number; - - /** - * Optional offset for pagination of results. - * - * @generated from field: optional uint32 offset = 3; - */ - offset?: number; -}; - -/** - * Describes the message agntcy.dir.search.v1.SearchCIDsRequest. - * Use `create(SearchCIDsRequestSchema)` to create a new message. - */ -export declare const SearchCIDsRequestSchema: GenMessage; - -/** - * @generated from message agntcy.dir.search.v1.SearchRecordsRequest - */ -export declare type SearchRecordsRequest = Message<"agntcy.dir.search.v1.SearchRecordsRequest"> & { - /** - * List of queries to match against the records. - * - * @generated from field: repeated agntcy.dir.search.v1.RecordQuery queries = 1; - */ - queries: RecordQuery[]; - - /** - * Optional limit on the number of results to return. - * - * @generated from field: optional uint32 limit = 2; - */ - limit?: number; - - /** - * Optional offset for pagination of results. - * - * @generated from field: optional uint32 offset = 3; - */ - offset?: number; -}; - -/** - * Describes the message agntcy.dir.search.v1.SearchRecordsRequest. - * Use `create(SearchRecordsRequestSchema)` to create a new message. - */ -export declare const SearchRecordsRequestSchema: GenMessage; - -/** - * @generated from message agntcy.dir.search.v1.SearchCIDsResponse - */ -export declare type SearchCIDsResponse = Message<"agntcy.dir.search.v1.SearchCIDsResponse"> & { - /** - * The CID of the record that matches the search criteria. - * - * @generated from field: string record_cid = 1; - */ - recordCid: string; -}; - -/** - * Describes the message agntcy.dir.search.v1.SearchCIDsResponse. - * Use `create(SearchCIDsResponseSchema)` to create a new message. - */ -export declare const SearchCIDsResponseSchema: GenMessage; - -/** - * @generated from message agntcy.dir.search.v1.SearchRecordsResponse - */ -export declare type SearchRecordsResponse = Message<"agntcy.dir.search.v1.SearchRecordsResponse"> & { - /** - * The full record that matches the search criteria. - * - * @generated from field: agntcy.dir.core.v1.Record record = 1; - */ - record?: Record; -}; - -/** - * Describes the message agntcy.dir.search.v1.SearchRecordsResponse. - * Use `create(SearchRecordsResponseSchema)` to create a new message. - */ -export declare const SearchRecordsResponseSchema: GenMessage; - -/** - * @generated from service agntcy.dir.search.v1.SearchService - */ -export declare const SearchService: GenService<{ - /** - * Search for record CIDs that match the given parameters. - * Returns only CIDs for efficient lookups and piping to other commands. - * This operation does not interact with the network. - * - * @generated from rpc agntcy.dir.search.v1.SearchService.SearchCIDs - */ - searchCIDs: { - methodKind: "server_streaming"; - input: typeof SearchCIDsRequestSchema; - output: typeof SearchCIDsResponseSchema; - }, - /** - * Search for full records that match the given parameters. - * Returns complete record data including all metadata, skills, domains, etc. - * This operation does not interact with the network. - * - * @generated from rpc agntcy.dir.search.v1.SearchService.SearchRecords - */ - searchRecords: { - methodKind: "server_streaming"; - input: typeof SearchRecordsRequestSchema; - output: typeof SearchRecordsResponseSchema; - }, -}>; - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/search/v1/search_service.proto (package agntcy.dir.search.v1, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv2"; +import type { Message } from "@bufbuild/protobuf"; +import type { RecordQuery } from "./record_query_pb.js"; +import type { Record } from "../../core/v1/record_pb.js"; + +/** + * Describes the file agntcy/dir/search/v1/search_service.proto. + */ +export declare const file_agntcy_dir_search_v1_search_service: GenFile; + +/** + * @generated from message agntcy.dir.search.v1.SearchCIDsRequest + */ +export declare type SearchCIDsRequest = Message<"agntcy.dir.search.v1.SearchCIDsRequest"> & { + /** + * List of queries to match against the records. + * + * @generated from field: repeated agntcy.dir.search.v1.RecordQuery queries = 1; + */ + queries: RecordQuery[]; + + /** + * Optional limit on the number of results to return. + * + * @generated from field: optional uint32 limit = 2; + */ + limit?: number; + + /** + * Optional offset for pagination of results. + * + * @generated from field: optional uint32 offset = 3; + */ + offset?: number; +}; + +/** + * Describes the message agntcy.dir.search.v1.SearchCIDsRequest. + * Use `create(SearchCIDsRequestSchema)` to create a new message. + */ +export declare const SearchCIDsRequestSchema: GenMessage; + +/** + * @generated from message agntcy.dir.search.v1.SearchRecordsRequest + */ +export declare type SearchRecordsRequest = Message<"agntcy.dir.search.v1.SearchRecordsRequest"> & { + /** + * List of queries to match against the records. + * + * @generated from field: repeated agntcy.dir.search.v1.RecordQuery queries = 1; + */ + queries: RecordQuery[]; + + /** + * Optional limit on the number of results to return. + * + * @generated from field: optional uint32 limit = 2; + */ + limit?: number; + + /** + * Optional offset for pagination of results. + * + * @generated from field: optional uint32 offset = 3; + */ + offset?: number; +}; + +/** + * Describes the message agntcy.dir.search.v1.SearchRecordsRequest. + * Use `create(SearchRecordsRequestSchema)` to create a new message. + */ +export declare const SearchRecordsRequestSchema: GenMessage; + +/** + * @generated from message agntcy.dir.search.v1.SearchCIDsResponse + */ +export declare type SearchCIDsResponse = Message<"agntcy.dir.search.v1.SearchCIDsResponse"> & { + /** + * The CID of the record that matches the search criteria. + * + * @generated from field: string record_cid = 1; + */ + recordCid: string; +}; + +/** + * Describes the message agntcy.dir.search.v1.SearchCIDsResponse. + * Use `create(SearchCIDsResponseSchema)` to create a new message. + */ +export declare const SearchCIDsResponseSchema: GenMessage; + +/** + * @generated from message agntcy.dir.search.v1.SearchRecordsResponse + */ +export declare type SearchRecordsResponse = Message<"agntcy.dir.search.v1.SearchRecordsResponse"> & { + /** + * The full record that matches the search criteria. + * + * @generated from field: agntcy.dir.core.v1.Record record = 1; + */ + record?: Record; +}; + +/** + * Describes the message agntcy.dir.search.v1.SearchRecordsResponse. + * Use `create(SearchRecordsResponseSchema)` to create a new message. + */ +export declare const SearchRecordsResponseSchema: GenMessage; + +/** + * @generated from service agntcy.dir.search.v1.SearchService + */ +export declare const SearchService: GenService<{ + /** + * Search for record CIDs that match the given parameters. + * Returns only CIDs for efficient lookups and piping to other commands. + * This operation does not interact with the network. + * + * @generated from rpc agntcy.dir.search.v1.SearchService.SearchCIDs + */ + searchCIDs: { + methodKind: "server_streaming"; + input: typeof SearchCIDsRequestSchema; + output: typeof SearchCIDsResponseSchema; + }, + /** + * Search for full records that match the given parameters. + * Returns complete record data including all metadata, skills, domains, etc. + * This operation does not interact with the network. + * + * @generated from rpc agntcy.dir.search.v1.SearchService.SearchRecords + */ + searchRecords: { + methodKind: "server_streaming"; + input: typeof SearchRecordsRequestSchema; + output: typeof SearchRecordsResponseSchema; + }, +}>; + diff --git a/sdk/dir-js/src/models/agntcy/dir/search/v1/search_service_pb.js b/sdk/dir-js/src/models/agntcy/dir/search/v1/search_service_pb.js index 4103a9651..b29c56f06 100644 --- a/sdk/dir-js/src/models/agntcy/dir/search/v1/search_service_pb.js +++ b/sdk/dir-js/src/models/agntcy/dir/search/v1/search_service_pb.js @@ -1,51 +1,51 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/search/v1/search_service.proto (package agntcy.dir.search.v1, syntax proto3) -/* eslint-disable */ - -import { fileDesc, messageDesc, serviceDesc } from "@bufbuild/protobuf/codegenv2"; -import { file_agntcy_dir_core_v1_record } from "../../core/v1/record_pb.js"; -import { file_agntcy_dir_search_v1_record_query } from "./record_query_pb.js"; - -/** - * Describes the file agntcy/dir/search/v1/search_service.proto. - */ -export const file_agntcy_dir_search_v1_search_service = /*@__PURE__*/ - fileDesc("CilhZ250Y3kvZGlyL3NlYXJjaC92MS9zZWFyY2hfc2VydmljZS5wcm90bxIUYWdudGN5LmRpci5zZWFyY2gudjEihQEKEVNlYXJjaENJRHNSZXF1ZXN0EjIKB3F1ZXJpZXMYASADKAsyIS5hZ250Y3kuZGlyLnNlYXJjaC52MS5SZWNvcmRRdWVyeRISCgVsaW1pdBgCIAEoDUgAiAEBEhMKBm9mZnNldBgDIAEoDUgBiAEBQggKBl9saW1pdEIJCgdfb2Zmc2V0IogBChRTZWFyY2hSZWNvcmRzUmVxdWVzdBIyCgdxdWVyaWVzGAEgAygLMiEuYWdudGN5LmRpci5zZWFyY2gudjEuUmVjb3JkUXVlcnkSEgoFbGltaXQYAiABKA1IAIgBARITCgZvZmZzZXQYAyABKA1IAYgBAUIICgZfbGltaXRCCQoHX29mZnNldCIoChJTZWFyY2hDSURzUmVzcG9uc2USEgoKcmVjb3JkX2NpZBgBIAEoCSJDChVTZWFyY2hSZWNvcmRzUmVzcG9uc2USKgoGcmVjb3JkGAEgASgLMhouYWdudGN5LmRpci5jb3JlLnYxLlJlY29yZDLeAQoNU2VhcmNoU2VydmljZRJhCgpTZWFyY2hDSURzEicuYWdudGN5LmRpci5zZWFyY2gudjEuU2VhcmNoQ0lEc1JlcXVlc3QaKC5hZ250Y3kuZGlyLnNlYXJjaC52MS5TZWFyY2hDSURzUmVzcG9uc2UwARJqCg1TZWFyY2hSZWNvcmRzEiouYWdudGN5LmRpci5zZWFyY2gudjEuU2VhcmNoUmVjb3Jkc1JlcXVlc3QaKy5hZ250Y3kuZGlyLnNlYXJjaC52MS5TZWFyY2hSZWNvcmRzUmVzcG9uc2UwAULGAQoYY29tLmFnbnRjeS5kaXIuc2VhcmNoLnYxQhJTZWFyY2hTZXJ2aWNlUHJvdG9QAVojZ2l0aHViLmNvbS9hZ250Y3kvZGlyL2FwaS9zZWFyY2gvdjGiAgNBRFOqAhRBZ250Y3kuRGlyLlNlYXJjaC5WMcoCFEFnbnRjeVxEaXJcU2VhcmNoXFYx4gIgQWdudGN5XERpclxTZWFyY2hcVjFcR1BCTWV0YWRhdGHqAhdBZ250Y3k6OkRpcjo6U2VhcmNoOjpWMWIGcHJvdG8z", [file_agntcy_dir_core_v1_record, file_agntcy_dir_search_v1_record_query]); - -/** - * Describes the message agntcy.dir.search.v1.SearchCIDsRequest. - * Use `create(SearchCIDsRequestSchema)` to create a new message. - */ -export const SearchCIDsRequestSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_search_v1_search_service, 0); - -/** - * Describes the message agntcy.dir.search.v1.SearchRecordsRequest. - * Use `create(SearchRecordsRequestSchema)` to create a new message. - */ -export const SearchRecordsRequestSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_search_v1_search_service, 1); - -/** - * Describes the message agntcy.dir.search.v1.SearchCIDsResponse. - * Use `create(SearchCIDsResponseSchema)` to create a new message. - */ -export const SearchCIDsResponseSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_search_v1_search_service, 2); - -/** - * Describes the message agntcy.dir.search.v1.SearchRecordsResponse. - * Use `create(SearchRecordsResponseSchema)` to create a new message. - */ -export const SearchRecordsResponseSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_search_v1_search_service, 3); - -/** - * @generated from service agntcy.dir.search.v1.SearchService - */ -export const SearchService = /*@__PURE__*/ - serviceDesc(file_agntcy_dir_search_v1_search_service, 0); - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/search/v1/search_service.proto (package agntcy.dir.search.v1, syntax proto3) +/* eslint-disable */ + +import { fileDesc, messageDesc, serviceDesc } from "@bufbuild/protobuf/codegenv2"; +import { file_agntcy_dir_core_v1_record } from "../../core/v1/record_pb.js"; +import { file_agntcy_dir_search_v1_record_query } from "./record_query_pb.js"; + +/** + * Describes the file agntcy/dir/search/v1/search_service.proto. + */ +export const file_agntcy_dir_search_v1_search_service = /*@__PURE__*/ + fileDesc("CilhZ250Y3kvZGlyL3NlYXJjaC92MS9zZWFyY2hfc2VydmljZS5wcm90bxIUYWdudGN5LmRpci5zZWFyY2gudjEihQEKEVNlYXJjaENJRHNSZXF1ZXN0EjIKB3F1ZXJpZXMYASADKAsyIS5hZ250Y3kuZGlyLnNlYXJjaC52MS5SZWNvcmRRdWVyeRISCgVsaW1pdBgCIAEoDUgAiAEBEhMKBm9mZnNldBgDIAEoDUgBiAEBQggKBl9saW1pdEIJCgdfb2Zmc2V0IogBChRTZWFyY2hSZWNvcmRzUmVxdWVzdBIyCgdxdWVyaWVzGAEgAygLMiEuYWdudGN5LmRpci5zZWFyY2gudjEuUmVjb3JkUXVlcnkSEgoFbGltaXQYAiABKA1IAIgBARITCgZvZmZzZXQYAyABKA1IAYgBAUIICgZfbGltaXRCCQoHX29mZnNldCIoChJTZWFyY2hDSURzUmVzcG9uc2USEgoKcmVjb3JkX2NpZBgBIAEoCSJDChVTZWFyY2hSZWNvcmRzUmVzcG9uc2USKgoGcmVjb3JkGAEgASgLMhouYWdudGN5LmRpci5jb3JlLnYxLlJlY29yZDLeAQoNU2VhcmNoU2VydmljZRJhCgpTZWFyY2hDSURzEicuYWdudGN5LmRpci5zZWFyY2gudjEuU2VhcmNoQ0lEc1JlcXVlc3QaKC5hZ250Y3kuZGlyLnNlYXJjaC52MS5TZWFyY2hDSURzUmVzcG9uc2UwARJqCg1TZWFyY2hSZWNvcmRzEiouYWdudGN5LmRpci5zZWFyY2gudjEuU2VhcmNoUmVjb3Jkc1JlcXVlc3QaKy5hZ250Y3kuZGlyLnNlYXJjaC52MS5TZWFyY2hSZWNvcmRzUmVzcG9uc2UwAULGAQoYY29tLmFnbnRjeS5kaXIuc2VhcmNoLnYxQhJTZWFyY2hTZXJ2aWNlUHJvdG9QAVojZ2l0aHViLmNvbS9hZ250Y3kvZGlyL2FwaS9zZWFyY2gvdjGiAgNBRFOqAhRBZ250Y3kuRGlyLlNlYXJjaC5WMcoCFEFnbnRjeVxEaXJcU2VhcmNoXFYx4gIgQWdudGN5XERpclxTZWFyY2hcVjFcR1BCTWV0YWRhdGHqAhdBZ250Y3k6OkRpcjo6U2VhcmNoOjpWMWIGcHJvdG8z", [file_agntcy_dir_core_v1_record, file_agntcy_dir_search_v1_record_query]); + +/** + * Describes the message agntcy.dir.search.v1.SearchCIDsRequest. + * Use `create(SearchCIDsRequestSchema)` to create a new message. + */ +export const SearchCIDsRequestSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_search_v1_search_service, 0); + +/** + * Describes the message agntcy.dir.search.v1.SearchRecordsRequest. + * Use `create(SearchRecordsRequestSchema)` to create a new message. + */ +export const SearchRecordsRequestSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_search_v1_search_service, 1); + +/** + * Describes the message agntcy.dir.search.v1.SearchCIDsResponse. + * Use `create(SearchCIDsResponseSchema)` to create a new message. + */ +export const SearchCIDsResponseSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_search_v1_search_service, 2); + +/** + * Describes the message agntcy.dir.search.v1.SearchRecordsResponse. + * Use `create(SearchRecordsResponseSchema)` to create a new message. + */ +export const SearchRecordsResponseSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_search_v1_search_service, 3); + +/** + * @generated from service agntcy.dir.search.v1.SearchService + */ +export const SearchService = /*@__PURE__*/ + serviceDesc(file_agntcy_dir_search_v1_search_service, 0); + diff --git a/sdk/dir-js/src/models/agntcy/dir/sign/v1/public_key_pb.d.ts b/sdk/dir-js/src/models/agntcy/dir/sign/v1/public_key_pb.d.ts index 81c1cf993..bda05072d 100644 --- a/sdk/dir-js/src/models/agntcy/dir/sign/v1/public_key_pb.d.ts +++ b/sdk/dir-js/src/models/agntcy/dir/sign/v1/public_key_pb.d.ts @@ -1,36 +1,36 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/sign/v1/public_key.proto (package agntcy.dir.sign.v1, syntax proto3) -/* eslint-disable */ - -import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv2"; -import type { Message } from "@bufbuild/protobuf"; - -/** - * Describes the file agntcy/dir/sign/v1/public_key.proto. - */ -export declare const file_agntcy_dir_sign_v1_public_key: GenFile; - -/** - * PublicKey is the public key data associated with a Record. - * Multiple public keys can be associated with a single Record. - * - * @generated from message agntcy.dir.sign.v1.PublicKey - */ -export declare type PublicKey = Message<"agntcy.dir.sign.v1.PublicKey"> & { - /** - * PEM-encoded public key string. - * - * @generated from field: string key = 1; - */ - key: string; -}; - -/** - * Describes the message agntcy.dir.sign.v1.PublicKey. - * Use `create(PublicKeySchema)` to create a new message. - */ -export declare const PublicKeySchema: GenMessage; - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/sign/v1/public_key.proto (package agntcy.dir.sign.v1, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv2"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file agntcy/dir/sign/v1/public_key.proto. + */ +export declare const file_agntcy_dir_sign_v1_public_key: GenFile; + +/** + * PublicKey is the public key data associated with a Record. + * Multiple public keys can be associated with a single Record. + * + * @generated from message agntcy.dir.sign.v1.PublicKey + */ +export declare type PublicKey = Message<"agntcy.dir.sign.v1.PublicKey"> & { + /** + * PEM-encoded public key string. + * + * @generated from field: string key = 1; + */ + key: string; +}; + +/** + * Describes the message agntcy.dir.sign.v1.PublicKey. + * Use `create(PublicKeySchema)` to create a new message. + */ +export declare const PublicKeySchema: GenMessage; + diff --git a/sdk/dir-js/src/models/agntcy/dir/sign/v1/public_key_pb.js b/sdk/dir-js/src/models/agntcy/dir/sign/v1/public_key_pb.js index dead5a8de..2c0b5043c 100644 --- a/sdk/dir-js/src/models/agntcy/dir/sign/v1/public_key_pb.js +++ b/sdk/dir-js/src/models/agntcy/dir/sign/v1/public_key_pb.js @@ -1,22 +1,22 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/sign/v1/public_key.proto (package agntcy.dir.sign.v1, syntax proto3) -/* eslint-disable */ - -import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv2"; - -/** - * Describes the file agntcy/dir/sign/v1/public_key.proto. - */ -export const file_agntcy_dir_sign_v1_public_key = /*@__PURE__*/ - fileDesc("CiNhZ250Y3kvZGlyL3NpZ24vdjEvcHVibGljX2tleS5wcm90bxISYWdudGN5LmRpci5zaWduLnYxIhgKCVB1YmxpY0tleRILCgNrZXkYASABKAlCtgEKFmNvbS5hZ250Y3kuZGlyLnNpZ24udjFCDlB1YmxpY0tleVByb3RvUAFaIWdpdGh1Yi5jb20vYWdudGN5L2Rpci9hcGkvc2lnbi92MaICA0FEU6oCEkFnbnRjeS5EaXIuU2lnbi5WMcoCEkFnbnRjeVxEaXJcU2lnblxWMeICHkFnbnRjeVxEaXJcU2lnblxWMVxHUEJNZXRhZGF0YeoCFUFnbnRjeTo6RGlyOjpTaWduOjpWMWIGcHJvdG8z"); - -/** - * Describes the message agntcy.dir.sign.v1.PublicKey. - * Use `create(PublicKeySchema)` to create a new message. - */ -export const PublicKeySchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_sign_v1_public_key, 0); - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/sign/v1/public_key.proto (package agntcy.dir.sign.v1, syntax proto3) +/* eslint-disable */ + +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv2"; + +/** + * Describes the file agntcy/dir/sign/v1/public_key.proto. + */ +export const file_agntcy_dir_sign_v1_public_key = /*@__PURE__*/ + fileDesc("CiNhZ250Y3kvZGlyL3NpZ24vdjEvcHVibGljX2tleS5wcm90bxISYWdudGN5LmRpci5zaWduLnYxIhgKCVB1YmxpY0tleRILCgNrZXkYASABKAlCtgEKFmNvbS5hZ250Y3kuZGlyLnNpZ24udjFCDlB1YmxpY0tleVByb3RvUAFaIWdpdGh1Yi5jb20vYWdudGN5L2Rpci9hcGkvc2lnbi92MaICA0FEU6oCEkFnbnRjeS5EaXIuU2lnbi5WMcoCEkFnbnRjeVxEaXJcU2lnblxWMeICHkFnbnRjeVxEaXJcU2lnblxWMVxHUEJNZXRhZGF0YeoCFUFnbnRjeTo6RGlyOjpTaWduOjpWMWIGcHJvdG8z"); + +/** + * Describes the message agntcy.dir.sign.v1.PublicKey. + * Use `create(PublicKeySchema)` to create a new message. + */ +export const PublicKeySchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_sign_v1_public_key, 0); + diff --git a/sdk/dir-js/src/models/agntcy/dir/sign/v1/sign_service_pb.d.ts b/sdk/dir-js/src/models/agntcy/dir/sign/v1/sign_service_pb.d.ts index 332a20f21..67f7a8aad 100644 --- a/sdk/dir-js/src/models/agntcy/dir/sign/v1/sign_service_pb.d.ts +++ b/sdk/dir-js/src/models/agntcy/dir/sign/v1/sign_service_pb.d.ts @@ -1,254 +1,254 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/sign/v1/sign_service.proto (package agntcy.dir.sign.v1, syntax proto3) -/* eslint-disable */ - -import type { GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv2"; -import type { Message } from "@bufbuild/protobuf"; -import type { RecordRef } from "../../core/v1/record_pb.js"; -import type { Signature } from "./signature_pb.js"; - -/** - * Describes the file agntcy/dir/sign/v1/sign_service.proto. - */ -export declare const file_agntcy_dir_sign_v1_sign_service: GenFile; - -/** - * @generated from message agntcy.dir.sign.v1.SignRequest - */ -export declare type SignRequest = Message<"agntcy.dir.sign.v1.SignRequest"> & { - /** - * Record reference to be signed - * - * @generated from field: agntcy.dir.core.v1.RecordRef record_ref = 1; - */ - recordRef?: RecordRef; - - /** - * Signing provider to use - * - * @generated from field: agntcy.dir.sign.v1.SignRequestProvider provider = 2; - */ - provider?: SignRequestProvider; -}; - -/** - * Describes the message agntcy.dir.sign.v1.SignRequest. - * Use `create(SignRequestSchema)` to create a new message. - */ -export declare const SignRequestSchema: GenMessage; - -/** - * @generated from message agntcy.dir.sign.v1.SignRequestProvider - */ -export declare type SignRequestProvider = Message<"agntcy.dir.sign.v1.SignRequestProvider"> & { - /** - * @generated from oneof agntcy.dir.sign.v1.SignRequestProvider.request - */ - request: { - /** - * Sign with OIDC provider - * - * @generated from field: agntcy.dir.sign.v1.SignWithOIDC oidc = 1; - */ - value: SignWithOIDC; - case: "oidc"; - } | { - /** - * Sign with PEM-encoded public key - * - * @generated from field: agntcy.dir.sign.v1.SignWithKey key = 2; - */ - value: SignWithKey; - case: "key"; - } | { case: undefined; value?: undefined }; -}; - -/** - * Describes the message agntcy.dir.sign.v1.SignRequestProvider. - * Use `create(SignRequestProviderSchema)` to create a new message. - */ -export declare const SignRequestProviderSchema: GenMessage; - -/** - * @generated from message agntcy.dir.sign.v1.SignWithOIDC - */ -export declare type SignWithOIDC = Message<"agntcy.dir.sign.v1.SignWithOIDC"> & { - /** - * Token for OIDC provider - * - * @generated from field: string id_token = 1; - */ - idToken: string; - - /** - * Signing options for OIDC - * - * @generated from field: agntcy.dir.sign.v1.SignWithOIDC.SignOpts options = 2; - */ - options?: SignWithOIDC_SignOpts; -}; - -/** - * Describes the message agntcy.dir.sign.v1.SignWithOIDC. - * Use `create(SignWithOIDCSchema)` to create a new message. - */ -export declare const SignWithOIDCSchema: GenMessage; - -/** - * List of sign options for OIDC - * - * @generated from message agntcy.dir.sign.v1.SignWithOIDC.SignOpts - */ -export declare type SignWithOIDC_SignOpts = Message<"agntcy.dir.sign.v1.SignWithOIDC.SignOpts"> & { - /** - * Fulcio authority access URL (default value: https://fulcio.sigstage.dev) - * - * @generated from field: optional string fulcio_url = 1; - */ - fulcioUrl?: string; - - /** - * Rekor validator access URL (default value: https://rekor.sigstage.dev) - * - * @generated from field: optional string rekor_url = 2; - */ - rekorUrl?: string; - - /** - * Timestamp authority access URL (default value: https://timestamp.sigstage.dev/api/v1/timestamp) - * - * @generated from field: optional string timestamp_url = 3; - */ - timestampUrl?: string; - - /** - * OIDC provider access URL (default value: https://oauth2.sigstage.dev/auth) - * - * @generated from field: optional string oidc_provider_url = 4; - */ - oidcProviderUrl?: string; -}; - -/** - * Describes the message agntcy.dir.sign.v1.SignWithOIDC.SignOpts. - * Use `create(SignWithOIDC_SignOptsSchema)` to create a new message. - */ -export declare const SignWithOIDC_SignOptsSchema: GenMessage; - -/** - * @generated from message agntcy.dir.sign.v1.SignWithKey - */ -export declare type SignWithKey = Message<"agntcy.dir.sign.v1.SignWithKey"> & { - /** - * Private key used for signing - * - * @generated from field: bytes private_key = 1; - */ - privateKey: Uint8Array; - - /** - * Password to unlock the private key - * - * @generated from field: optional bytes password = 2; - */ - password?: Uint8Array; -}; - -/** - * Describes the message agntcy.dir.sign.v1.SignWithKey. - * Use `create(SignWithKeySchema)` to create a new message. - */ -export declare const SignWithKeySchema: GenMessage; - -/** - * @generated from message agntcy.dir.sign.v1.SignResponse - */ -export declare type SignResponse = Message<"agntcy.dir.sign.v1.SignResponse"> & { - /** - * Cryptographic signature of the record - * - * @generated from field: agntcy.dir.sign.v1.Signature signature = 1; - */ - signature?: Signature; -}; - -/** - * Describes the message agntcy.dir.sign.v1.SignResponse. - * Use `create(SignResponseSchema)` to create a new message. - */ -export declare const SignResponseSchema: GenMessage; - -/** - * @generated from message agntcy.dir.sign.v1.VerifyRequest - */ -export declare type VerifyRequest = Message<"agntcy.dir.sign.v1.VerifyRequest"> & { - /** - * Record reference to be verified - * - * @generated from field: agntcy.dir.core.v1.RecordRef record_ref = 1; - */ - recordRef?: RecordRef; -}; - -/** - * Describes the message agntcy.dir.sign.v1.VerifyRequest. - * Use `create(VerifyRequestSchema)` to create a new message. - */ -export declare const VerifyRequestSchema: GenMessage; - -/** - * @generated from message agntcy.dir.sign.v1.VerifyResponse - */ -export declare type VerifyResponse = Message<"agntcy.dir.sign.v1.VerifyResponse"> & { - /** - * The verify process result - * - * @generated from field: bool success = 1; - */ - success: boolean; - - /** - * Optional error message if verification failed - * - * @generated from field: optional string error_message = 2; - */ - errorMessage?: string; -}; - -/** - * Describes the message agntcy.dir.sign.v1.VerifyResponse. - * Use `create(VerifyResponseSchema)` to create a new message. - */ -export declare const VerifyResponseSchema: GenMessage; - -/** - * SignService provides methods to sign and verify records. - * - * @generated from service agntcy.dir.sign.v1.SignService - */ -export declare const SignService: GenService<{ - /** - * Sign record using keyless OIDC based provider or using PEM-encoded private key with an optional passphrase - * - * @generated from rpc agntcy.dir.sign.v1.SignService.Sign - */ - sign: { - methodKind: "unary"; - input: typeof SignRequestSchema; - output: typeof SignResponseSchema; - }, - /** - * Verify signed record using keyless OIDC based provider or using PEM-encoded formatted PEM public key encrypted - * - * @generated from rpc agntcy.dir.sign.v1.SignService.Verify - */ - verify: { - methodKind: "unary"; - input: typeof VerifyRequestSchema; - output: typeof VerifyResponseSchema; - }, -}>; - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/sign/v1/sign_service.proto (package agntcy.dir.sign.v1, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv2"; +import type { Message } from "@bufbuild/protobuf"; +import type { RecordRef } from "../../core/v1/record_pb.js"; +import type { Signature } from "./signature_pb.js"; + +/** + * Describes the file agntcy/dir/sign/v1/sign_service.proto. + */ +export declare const file_agntcy_dir_sign_v1_sign_service: GenFile; + +/** + * @generated from message agntcy.dir.sign.v1.SignRequest + */ +export declare type SignRequest = Message<"agntcy.dir.sign.v1.SignRequest"> & { + /** + * Record reference to be signed + * + * @generated from field: agntcy.dir.core.v1.RecordRef record_ref = 1; + */ + recordRef?: RecordRef; + + /** + * Signing provider to use + * + * @generated from field: agntcy.dir.sign.v1.SignRequestProvider provider = 2; + */ + provider?: SignRequestProvider; +}; + +/** + * Describes the message agntcy.dir.sign.v1.SignRequest. + * Use `create(SignRequestSchema)` to create a new message. + */ +export declare const SignRequestSchema: GenMessage; + +/** + * @generated from message agntcy.dir.sign.v1.SignRequestProvider + */ +export declare type SignRequestProvider = Message<"agntcy.dir.sign.v1.SignRequestProvider"> & { + /** + * @generated from oneof agntcy.dir.sign.v1.SignRequestProvider.request + */ + request: { + /** + * Sign with OIDC provider + * + * @generated from field: agntcy.dir.sign.v1.SignWithOIDC oidc = 1; + */ + value: SignWithOIDC; + case: "oidc"; + } | { + /** + * Sign with PEM-encoded public key + * + * @generated from field: agntcy.dir.sign.v1.SignWithKey key = 2; + */ + value: SignWithKey; + case: "key"; + } | { case: undefined; value?: undefined }; +}; + +/** + * Describes the message agntcy.dir.sign.v1.SignRequestProvider. + * Use `create(SignRequestProviderSchema)` to create a new message. + */ +export declare const SignRequestProviderSchema: GenMessage; + +/** + * @generated from message agntcy.dir.sign.v1.SignWithOIDC + */ +export declare type SignWithOIDC = Message<"agntcy.dir.sign.v1.SignWithOIDC"> & { + /** + * Token for OIDC provider + * + * @generated from field: string id_token = 1; + */ + idToken: string; + + /** + * Signing options for OIDC + * + * @generated from field: agntcy.dir.sign.v1.SignWithOIDC.SignOpts options = 2; + */ + options?: SignWithOIDC_SignOpts; +}; + +/** + * Describes the message agntcy.dir.sign.v1.SignWithOIDC. + * Use `create(SignWithOIDCSchema)` to create a new message. + */ +export declare const SignWithOIDCSchema: GenMessage; + +/** + * List of sign options for OIDC + * + * @generated from message agntcy.dir.sign.v1.SignWithOIDC.SignOpts + */ +export declare type SignWithOIDC_SignOpts = Message<"agntcy.dir.sign.v1.SignWithOIDC.SignOpts"> & { + /** + * Fulcio authority access URL (default value: https://fulcio.sigstage.dev) + * + * @generated from field: optional string fulcio_url = 1; + */ + fulcioUrl?: string; + + /** + * Rekor validator access URL (default value: https://rekor.sigstage.dev) + * + * @generated from field: optional string rekor_url = 2; + */ + rekorUrl?: string; + + /** + * Timestamp authority access URL (default value: https://timestamp.sigstage.dev/api/v1/timestamp) + * + * @generated from field: optional string timestamp_url = 3; + */ + timestampUrl?: string; + + /** + * OIDC provider access URL (default value: https://oauth2.sigstage.dev/auth) + * + * @generated from field: optional string oidc_provider_url = 4; + */ + oidcProviderUrl?: string; +}; + +/** + * Describes the message agntcy.dir.sign.v1.SignWithOIDC.SignOpts. + * Use `create(SignWithOIDC_SignOptsSchema)` to create a new message. + */ +export declare const SignWithOIDC_SignOptsSchema: GenMessage; + +/** + * @generated from message agntcy.dir.sign.v1.SignWithKey + */ +export declare type SignWithKey = Message<"agntcy.dir.sign.v1.SignWithKey"> & { + /** + * Private key used for signing + * + * @generated from field: bytes private_key = 1; + */ + privateKey: Uint8Array; + + /** + * Password to unlock the private key + * + * @generated from field: optional bytes password = 2; + */ + password?: Uint8Array; +}; + +/** + * Describes the message agntcy.dir.sign.v1.SignWithKey. + * Use `create(SignWithKeySchema)` to create a new message. + */ +export declare const SignWithKeySchema: GenMessage; + +/** + * @generated from message agntcy.dir.sign.v1.SignResponse + */ +export declare type SignResponse = Message<"agntcy.dir.sign.v1.SignResponse"> & { + /** + * Cryptographic signature of the record + * + * @generated from field: agntcy.dir.sign.v1.Signature signature = 1; + */ + signature?: Signature; +}; + +/** + * Describes the message agntcy.dir.sign.v1.SignResponse. + * Use `create(SignResponseSchema)` to create a new message. + */ +export declare const SignResponseSchema: GenMessage; + +/** + * @generated from message agntcy.dir.sign.v1.VerifyRequest + */ +export declare type VerifyRequest = Message<"agntcy.dir.sign.v1.VerifyRequest"> & { + /** + * Record reference to be verified + * + * @generated from field: agntcy.dir.core.v1.RecordRef record_ref = 1; + */ + recordRef?: RecordRef; +}; + +/** + * Describes the message agntcy.dir.sign.v1.VerifyRequest. + * Use `create(VerifyRequestSchema)` to create a new message. + */ +export declare const VerifyRequestSchema: GenMessage; + +/** + * @generated from message agntcy.dir.sign.v1.VerifyResponse + */ +export declare type VerifyResponse = Message<"agntcy.dir.sign.v1.VerifyResponse"> & { + /** + * The verify process result + * + * @generated from field: bool success = 1; + */ + success: boolean; + + /** + * Optional error message if verification failed + * + * @generated from field: optional string error_message = 2; + */ + errorMessage?: string; +}; + +/** + * Describes the message agntcy.dir.sign.v1.VerifyResponse. + * Use `create(VerifyResponseSchema)` to create a new message. + */ +export declare const VerifyResponseSchema: GenMessage; + +/** + * SignService provides methods to sign and verify records. + * + * @generated from service agntcy.dir.sign.v1.SignService + */ +export declare const SignService: GenService<{ + /** + * Sign record using keyless OIDC based provider or using PEM-encoded private key with an optional passphrase + * + * @generated from rpc agntcy.dir.sign.v1.SignService.Sign + */ + sign: { + methodKind: "unary"; + input: typeof SignRequestSchema; + output: typeof SignResponseSchema; + }, + /** + * Verify signed record using keyless OIDC based provider or using PEM-encoded formatted PEM public key encrypted + * + * @generated from rpc agntcy.dir.sign.v1.SignService.Verify + */ + verify: { + methodKind: "unary"; + input: typeof VerifyRequestSchema; + output: typeof VerifyResponseSchema; + }, +}>; + diff --git a/sdk/dir-js/src/models/agntcy/dir/sign/v1/sign_service_pb.js b/sdk/dir-js/src/models/agntcy/dir/sign/v1/sign_service_pb.js index aa2010887..ecc22e99a 100644 --- a/sdk/dir-js/src/models/agntcy/dir/sign/v1/sign_service_pb.js +++ b/sdk/dir-js/src/models/agntcy/dir/sign/v1/sign_service_pb.js @@ -1,81 +1,81 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/sign/v1/sign_service.proto (package agntcy.dir.sign.v1, syntax proto3) -/* eslint-disable */ - -import { fileDesc, messageDesc, serviceDesc } from "@bufbuild/protobuf/codegenv2"; -import { file_agntcy_dir_core_v1_record } from "../../core/v1/record_pb.js"; -import { file_agntcy_dir_sign_v1_signature } from "./signature_pb.js"; - -/** - * Describes the file agntcy/dir/sign/v1/sign_service.proto. - */ -export const file_agntcy_dir_sign_v1_sign_service = /*@__PURE__*/ - fileDesc("CiVhZ250Y3kvZGlyL3NpZ24vdjEvc2lnbl9zZXJ2aWNlLnByb3RvEhJhZ250Y3kuZGlyLnNpZ24udjEiewoLU2lnblJlcXVlc3QSMQoKcmVjb3JkX3JlZhgBIAEoCzIdLmFnbnRjeS5kaXIuY29yZS52MS5SZWNvcmRSZWYSOQoIcHJvdmlkZXIYAiABKAsyJy5hZ250Y3kuZGlyLnNpZ24udjEuU2lnblJlcXVlc3RQcm92aWRlciKCAQoTU2lnblJlcXVlc3RQcm92aWRlchIwCgRvaWRjGAEgASgLMiAuYWdudGN5LmRpci5zaWduLnYxLlNpZ25XaXRoT0lEQ0gAEi4KA2tleRgCIAEoCzIfLmFnbnRjeS5kaXIuc2lnbi52MS5TaWduV2l0aEtleUgAQgkKB3JlcXVlc3QimwIKDFNpZ25XaXRoT0lEQxIQCghpZF90b2tlbhgBIAEoCRI6CgdvcHRpb25zGAIgASgLMikuYWdudGN5LmRpci5zaWduLnYxLlNpZ25XaXRoT0lEQy5TaWduT3B0cxq8AQoIU2lnbk9wdHMSFwoKZnVsY2lvX3VybBgBIAEoCUgAiAEBEhYKCXJla29yX3VybBgCIAEoCUgBiAEBEhoKDXRpbWVzdGFtcF91cmwYAyABKAlIAogBARIeChFvaWRjX3Byb3ZpZGVyX3VybBgEIAEoCUgDiAEBQg0KC19mdWxjaW9fdXJsQgwKCl9yZWtvcl91cmxCEAoOX3RpbWVzdGFtcF91cmxCFAoSX29pZGNfcHJvdmlkZXJfdXJsIkYKC1NpZ25XaXRoS2V5EhMKC3ByaXZhdGVfa2V5GAEgASgMEhUKCHBhc3N3b3JkGAIgASgMSACIAQFCCwoJX3Bhc3N3b3JkIkAKDFNpZ25SZXNwb25zZRIwCglzaWduYXR1cmUYASABKAsyHS5hZ250Y3kuZGlyLnNpZ24udjEuU2lnbmF0dXJlIkIKDVZlcmlmeVJlcXVlc3QSMQoKcmVjb3JkX3JlZhgBIAEoCzIdLmFnbnRjeS5kaXIuY29yZS52MS5SZWNvcmRSZWYiTwoOVmVyaWZ5UmVzcG9uc2USDwoHc3VjY2VzcxgBIAEoCBIaCg1lcnJvcl9tZXNzYWdlGAIgASgJSACIAQFCEAoOX2Vycm9yX21lc3NhZ2UyqQEKC1NpZ25TZXJ2aWNlEkkKBFNpZ24SHy5hZ250Y3kuZGlyLnNpZ24udjEuU2lnblJlcXVlc3QaIC5hZ250Y3kuZGlyLnNpZ24udjEuU2lnblJlc3BvbnNlEk8KBlZlcmlmeRIhLmFnbnRjeS5kaXIuc2lnbi52MS5WZXJpZnlSZXF1ZXN0GiIuYWdudGN5LmRpci5zaWduLnYxLlZlcmlmeVJlc3BvbnNlQrgBChZjb20uYWdudGN5LmRpci5zaWduLnYxQhBTaWduU2VydmljZVByb3RvUAFaIWdpdGh1Yi5jb20vYWdudGN5L2Rpci9hcGkvc2lnbi92MaICA0FEU6oCEkFnbnRjeS5EaXIuU2lnbi5WMcoCEkFnbnRjeVxEaXJcU2lnblxWMeICHkFnbnRjeVxEaXJcU2lnblxWMVxHUEJNZXRhZGF0YeoCFUFnbnRjeTo6RGlyOjpTaWduOjpWMWIGcHJvdG8z", [file_agntcy_dir_core_v1_record, file_agntcy_dir_sign_v1_signature]); - -/** - * Describes the message agntcy.dir.sign.v1.SignRequest. - * Use `create(SignRequestSchema)` to create a new message. - */ -export const SignRequestSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_sign_v1_sign_service, 0); - -/** - * Describes the message agntcy.dir.sign.v1.SignRequestProvider. - * Use `create(SignRequestProviderSchema)` to create a new message. - */ -export const SignRequestProviderSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_sign_v1_sign_service, 1); - -/** - * Describes the message agntcy.dir.sign.v1.SignWithOIDC. - * Use `create(SignWithOIDCSchema)` to create a new message. - */ -export const SignWithOIDCSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_sign_v1_sign_service, 2); - -/** - * Describes the message agntcy.dir.sign.v1.SignWithOIDC.SignOpts. - * Use `create(SignWithOIDC_SignOptsSchema)` to create a new message. - */ -export const SignWithOIDC_SignOptsSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_sign_v1_sign_service, 2, 0); - -/** - * Describes the message agntcy.dir.sign.v1.SignWithKey. - * Use `create(SignWithKeySchema)` to create a new message. - */ -export const SignWithKeySchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_sign_v1_sign_service, 3); - -/** - * Describes the message agntcy.dir.sign.v1.SignResponse. - * Use `create(SignResponseSchema)` to create a new message. - */ -export const SignResponseSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_sign_v1_sign_service, 4); - -/** - * Describes the message agntcy.dir.sign.v1.VerifyRequest. - * Use `create(VerifyRequestSchema)` to create a new message. - */ -export const VerifyRequestSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_sign_v1_sign_service, 5); - -/** - * Describes the message agntcy.dir.sign.v1.VerifyResponse. - * Use `create(VerifyResponseSchema)` to create a new message. - */ -export const VerifyResponseSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_sign_v1_sign_service, 6); - -/** - * SignService provides methods to sign and verify records. - * - * @generated from service agntcy.dir.sign.v1.SignService - */ -export const SignService = /*@__PURE__*/ - serviceDesc(file_agntcy_dir_sign_v1_sign_service, 0); - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/sign/v1/sign_service.proto (package agntcy.dir.sign.v1, syntax proto3) +/* eslint-disable */ + +import { fileDesc, messageDesc, serviceDesc } from "@bufbuild/protobuf/codegenv2"; +import { file_agntcy_dir_core_v1_record } from "../../core/v1/record_pb.js"; +import { file_agntcy_dir_sign_v1_signature } from "./signature_pb.js"; + +/** + * Describes the file agntcy/dir/sign/v1/sign_service.proto. + */ +export const file_agntcy_dir_sign_v1_sign_service = /*@__PURE__*/ + fileDesc("CiVhZ250Y3kvZGlyL3NpZ24vdjEvc2lnbl9zZXJ2aWNlLnByb3RvEhJhZ250Y3kuZGlyLnNpZ24udjEiewoLU2lnblJlcXVlc3QSMQoKcmVjb3JkX3JlZhgBIAEoCzIdLmFnbnRjeS5kaXIuY29yZS52MS5SZWNvcmRSZWYSOQoIcHJvdmlkZXIYAiABKAsyJy5hZ250Y3kuZGlyLnNpZ24udjEuU2lnblJlcXVlc3RQcm92aWRlciKCAQoTU2lnblJlcXVlc3RQcm92aWRlchIwCgRvaWRjGAEgASgLMiAuYWdudGN5LmRpci5zaWduLnYxLlNpZ25XaXRoT0lEQ0gAEi4KA2tleRgCIAEoCzIfLmFnbnRjeS5kaXIuc2lnbi52MS5TaWduV2l0aEtleUgAQgkKB3JlcXVlc3QimwIKDFNpZ25XaXRoT0lEQxIQCghpZF90b2tlbhgBIAEoCRI6CgdvcHRpb25zGAIgASgLMikuYWdudGN5LmRpci5zaWduLnYxLlNpZ25XaXRoT0lEQy5TaWduT3B0cxq8AQoIU2lnbk9wdHMSFwoKZnVsY2lvX3VybBgBIAEoCUgAiAEBEhYKCXJla29yX3VybBgCIAEoCUgBiAEBEhoKDXRpbWVzdGFtcF91cmwYAyABKAlIAogBARIeChFvaWRjX3Byb3ZpZGVyX3VybBgEIAEoCUgDiAEBQg0KC19mdWxjaW9fdXJsQgwKCl9yZWtvcl91cmxCEAoOX3RpbWVzdGFtcF91cmxCFAoSX29pZGNfcHJvdmlkZXJfdXJsIkYKC1NpZ25XaXRoS2V5EhMKC3ByaXZhdGVfa2V5GAEgASgMEhUKCHBhc3N3b3JkGAIgASgMSACIAQFCCwoJX3Bhc3N3b3JkIkAKDFNpZ25SZXNwb25zZRIwCglzaWduYXR1cmUYASABKAsyHS5hZ250Y3kuZGlyLnNpZ24udjEuU2lnbmF0dXJlIkIKDVZlcmlmeVJlcXVlc3QSMQoKcmVjb3JkX3JlZhgBIAEoCzIdLmFnbnRjeS5kaXIuY29yZS52MS5SZWNvcmRSZWYiTwoOVmVyaWZ5UmVzcG9uc2USDwoHc3VjY2VzcxgBIAEoCBIaCg1lcnJvcl9tZXNzYWdlGAIgASgJSACIAQFCEAoOX2Vycm9yX21lc3NhZ2UyqQEKC1NpZ25TZXJ2aWNlEkkKBFNpZ24SHy5hZ250Y3kuZGlyLnNpZ24udjEuU2lnblJlcXVlc3QaIC5hZ250Y3kuZGlyLnNpZ24udjEuU2lnblJlc3BvbnNlEk8KBlZlcmlmeRIhLmFnbnRjeS5kaXIuc2lnbi52MS5WZXJpZnlSZXF1ZXN0GiIuYWdudGN5LmRpci5zaWduLnYxLlZlcmlmeVJlc3BvbnNlQrgBChZjb20uYWdudGN5LmRpci5zaWduLnYxQhBTaWduU2VydmljZVByb3RvUAFaIWdpdGh1Yi5jb20vYWdudGN5L2Rpci9hcGkvc2lnbi92MaICA0FEU6oCEkFnbnRjeS5EaXIuU2lnbi5WMcoCEkFnbnRjeVxEaXJcU2lnblxWMeICHkFnbnRjeVxEaXJcU2lnblxWMVxHUEJNZXRhZGF0YeoCFUFnbnRjeTo6RGlyOjpTaWduOjpWMWIGcHJvdG8z", [file_agntcy_dir_core_v1_record, file_agntcy_dir_sign_v1_signature]); + +/** + * Describes the message agntcy.dir.sign.v1.SignRequest. + * Use `create(SignRequestSchema)` to create a new message. + */ +export const SignRequestSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_sign_v1_sign_service, 0); + +/** + * Describes the message agntcy.dir.sign.v1.SignRequestProvider. + * Use `create(SignRequestProviderSchema)` to create a new message. + */ +export const SignRequestProviderSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_sign_v1_sign_service, 1); + +/** + * Describes the message agntcy.dir.sign.v1.SignWithOIDC. + * Use `create(SignWithOIDCSchema)` to create a new message. + */ +export const SignWithOIDCSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_sign_v1_sign_service, 2); + +/** + * Describes the message agntcy.dir.sign.v1.SignWithOIDC.SignOpts. + * Use `create(SignWithOIDC_SignOptsSchema)` to create a new message. + */ +export const SignWithOIDC_SignOptsSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_sign_v1_sign_service, 2, 0); + +/** + * Describes the message agntcy.dir.sign.v1.SignWithKey. + * Use `create(SignWithKeySchema)` to create a new message. + */ +export const SignWithKeySchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_sign_v1_sign_service, 3); + +/** + * Describes the message agntcy.dir.sign.v1.SignResponse. + * Use `create(SignResponseSchema)` to create a new message. + */ +export const SignResponseSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_sign_v1_sign_service, 4); + +/** + * Describes the message agntcy.dir.sign.v1.VerifyRequest. + * Use `create(VerifyRequestSchema)` to create a new message. + */ +export const VerifyRequestSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_sign_v1_sign_service, 5); + +/** + * Describes the message agntcy.dir.sign.v1.VerifyResponse. + * Use `create(VerifyResponseSchema)` to create a new message. + */ +export const VerifyResponseSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_sign_v1_sign_service, 6); + +/** + * SignService provides methods to sign and verify records. + * + * @generated from service agntcy.dir.sign.v1.SignService + */ +export const SignService = /*@__PURE__*/ + serviceDesc(file_agntcy_dir_sign_v1_sign_service, 0); + diff --git a/sdk/dir-js/src/models/agntcy/dir/sign/v1/signature_pb.d.ts b/sdk/dir-js/src/models/agntcy/dir/sign/v1/signature_pb.d.ts index 049eebbd0..6c97b6413 100644 --- a/sdk/dir-js/src/models/agntcy/dir/sign/v1/signature_pb.d.ts +++ b/sdk/dir-js/src/models/agntcy/dir/sign/v1/signature_pb.d.ts @@ -1,88 +1,88 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/sign/v1/signature.proto (package agntcy.dir.sign.v1, syntax proto3) -/* eslint-disable */ - -import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv2"; -import type { Message } from "@bufbuild/protobuf"; - -/** - * Describes the file agntcy/dir/sign/v1/signature.proto. - */ -export declare const file_agntcy_dir_sign_v1_signature: GenFile; - -/** - * Signature is the signing data associated with a Record. - * Multiple signatures can be associated with a single Record, - * ie 1 record : N record signatures. - * - * Storage and management of signatures is provided via - * StoreService as a RecordReferrer object. - * - * Signature can be encoded into RecordReferrer object as follows: - * type = "agntcy.dir.sign.v1.Signature" - * data = Signature message encoded as JSON - * - * @generated from message agntcy.dir.sign.v1.Signature - */ -export declare type Signature = Message<"agntcy.dir.sign.v1.Signature"> & { - /** - * Metadata associated with the signature. - * - * @generated from field: map annotations = 1; - */ - annotations: { [key: string]: string }; - - /** - * Signing timestamp of the record in the RFC3339 format. - * Specs: https://www.rfc-editor.org/rfc/rfc3339.html - * - * @generated from field: string signed_at = 2; - */ - signedAt: string; - - /** - * The signature algorithm used (e.g., "ECDSA_P256_SHA256"). - * - * @generated from field: string algorithm = 3; - */ - algorithm: string; - - /** - * Base64-encoded signature. - * - * @generated from field: string signature = 4; - */ - signature: string; - - /** - * Base64-encoded signing certificate. - * - * @generated from field: string certificate = 5; - */ - certificate: string; - - /** - * Type of the signature content bundle. - * - * @generated from field: string content_type = 6; - */ - contentType: string; - - /** - * Base64-encoded signature bundle produced by the signer. - * It is up to the client to interpret the content of the bundle. - * - * @generated from field: string content_bundle = 7; - */ - contentBundle: string; -}; - -/** - * Describes the message agntcy.dir.sign.v1.Signature. - * Use `create(SignatureSchema)` to create a new message. - */ -export declare const SignatureSchema: GenMessage; - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/sign/v1/signature.proto (package agntcy.dir.sign.v1, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv2"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file agntcy/dir/sign/v1/signature.proto. + */ +export declare const file_agntcy_dir_sign_v1_signature: GenFile; + +/** + * Signature is the signing data associated with a Record. + * Multiple signatures can be associated with a single Record, + * ie 1 record : N record signatures. + * + * Storage and management of signatures is provided via + * StoreService as a RecordReferrer object. + * + * Signature can be encoded into RecordReferrer object as follows: + * type = "agntcy.dir.sign.v1.Signature" + * data = Signature message encoded as JSON + * + * @generated from message agntcy.dir.sign.v1.Signature + */ +export declare type Signature = Message<"agntcy.dir.sign.v1.Signature"> & { + /** + * Metadata associated with the signature. + * + * @generated from field: map annotations = 1; + */ + annotations: { [key: string]: string }; + + /** + * Signing timestamp of the record in the RFC3339 format. + * Specs: https://www.rfc-editor.org/rfc/rfc3339.html + * + * @generated from field: string signed_at = 2; + */ + signedAt: string; + + /** + * The signature algorithm used (e.g., "ECDSA_P256_SHA256"). + * + * @generated from field: string algorithm = 3; + */ + algorithm: string; + + /** + * Base64-encoded signature. + * + * @generated from field: string signature = 4; + */ + signature: string; + + /** + * Base64-encoded signing certificate. + * + * @generated from field: string certificate = 5; + */ + certificate: string; + + /** + * Type of the signature content bundle. + * + * @generated from field: string content_type = 6; + */ + contentType: string; + + /** + * Base64-encoded signature bundle produced by the signer. + * It is up to the client to interpret the content of the bundle. + * + * @generated from field: string content_bundle = 7; + */ + contentBundle: string; +}; + +/** + * Describes the message agntcy.dir.sign.v1.Signature. + * Use `create(SignatureSchema)` to create a new message. + */ +export declare const SignatureSchema: GenMessage; + diff --git a/sdk/dir-js/src/models/agntcy/dir/sign/v1/signature_pb.js b/sdk/dir-js/src/models/agntcy/dir/sign/v1/signature_pb.js index ec2523c69..58732bb90 100644 --- a/sdk/dir-js/src/models/agntcy/dir/sign/v1/signature_pb.js +++ b/sdk/dir-js/src/models/agntcy/dir/sign/v1/signature_pb.js @@ -1,22 +1,22 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/sign/v1/signature.proto (package agntcy.dir.sign.v1, syntax proto3) -/* eslint-disable */ - -import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv2"; - -/** - * Describes the file agntcy/dir/sign/v1/signature.proto. - */ -export const file_agntcy_dir_sign_v1_signature = /*@__PURE__*/ - fileDesc("CiJhZ250Y3kvZGlyL3NpZ24vdjEvc2lnbmF0dXJlLnByb3RvEhJhZ250Y3kuZGlyLnNpZ24udjEigAIKCVNpZ25hdHVyZRJDCgthbm5vdGF0aW9ucxgBIAMoCzIuLmFnbnRjeS5kaXIuc2lnbi52MS5TaWduYXR1cmUuQW5ub3RhdGlvbnNFbnRyeRIRCglzaWduZWRfYXQYAiABKAkSEQoJYWxnb3JpdGhtGAMgASgJEhEKCXNpZ25hdHVyZRgEIAEoCRITCgtjZXJ0aWZpY2F0ZRgFIAEoCRIUCgxjb250ZW50X3R5cGUYBiABKAkSFgoOY29udGVudF9idW5kbGUYByABKAkaMgoQQW5ub3RhdGlvbnNFbnRyeRILCgNrZXkYASABKAkSDQoFdmFsdWUYAiABKAk6AjgBQrYBChZjb20uYWdudGN5LmRpci5zaWduLnYxQg5TaWduYXR1cmVQcm90b1ABWiFnaXRodWIuY29tL2FnbnRjeS9kaXIvYXBpL3NpZ24vdjGiAgNBRFOqAhJBZ250Y3kuRGlyLlNpZ24uVjHKAhJBZ250Y3lcRGlyXFNpZ25cVjHiAh5BZ250Y3lcRGlyXFNpZ25cVjFcR1BCTWV0YWRhdGHqAhVBZ250Y3k6OkRpcjo6U2lnbjo6VjFiBnByb3RvMw"); - -/** - * Describes the message agntcy.dir.sign.v1.Signature. - * Use `create(SignatureSchema)` to create a new message. - */ -export const SignatureSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_sign_v1_signature, 0); - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/sign/v1/signature.proto (package agntcy.dir.sign.v1, syntax proto3) +/* eslint-disable */ + +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv2"; + +/** + * Describes the file agntcy/dir/sign/v1/signature.proto. + */ +export const file_agntcy_dir_sign_v1_signature = /*@__PURE__*/ + fileDesc("CiJhZ250Y3kvZGlyL3NpZ24vdjEvc2lnbmF0dXJlLnByb3RvEhJhZ250Y3kuZGlyLnNpZ24udjEigAIKCVNpZ25hdHVyZRJDCgthbm5vdGF0aW9ucxgBIAMoCzIuLmFnbnRjeS5kaXIuc2lnbi52MS5TaWduYXR1cmUuQW5ub3RhdGlvbnNFbnRyeRIRCglzaWduZWRfYXQYAiABKAkSEQoJYWxnb3JpdGhtGAMgASgJEhEKCXNpZ25hdHVyZRgEIAEoCRITCgtjZXJ0aWZpY2F0ZRgFIAEoCRIUCgxjb250ZW50X3R5cGUYBiABKAkSFgoOY29udGVudF9idW5kbGUYByABKAkaMgoQQW5ub3RhdGlvbnNFbnRyeRILCgNrZXkYASABKAkSDQoFdmFsdWUYAiABKAk6AjgBQrYBChZjb20uYWdudGN5LmRpci5zaWduLnYxQg5TaWduYXR1cmVQcm90b1ABWiFnaXRodWIuY29tL2FnbnRjeS9kaXIvYXBpL3NpZ24vdjGiAgNBRFOqAhJBZ250Y3kuRGlyLlNpZ24uVjHKAhJBZ250Y3lcRGlyXFNpZ25cVjHiAh5BZ250Y3lcRGlyXFNpZ25cVjFcR1BCTWV0YWRhdGHqAhVBZ250Y3k6OkRpcjo6U2lnbjo6VjFiBnByb3RvMw"); + +/** + * Describes the message agntcy.dir.sign.v1.Signature. + * Use `create(SignatureSchema)` to create a new message. + */ +export const SignatureSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_sign_v1_signature, 0); + diff --git a/sdk/dir-js/src/models/agntcy/dir/store/v1/store_service_pb.d.ts b/sdk/dir-js/src/models/agntcy/dir/store/v1/store_service_pb.d.ts index ea6c5e514..c1c9e76a1 100644 --- a/sdk/dir-js/src/models/agntcy/dir/store/v1/store_service_pb.d.ts +++ b/sdk/dir-js/src/models/agntcy/dir/store/v1/store_service_pb.d.ts @@ -1,201 +1,201 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/store/v1/store_service.proto (package agntcy.dir.store.v1, syntax proto3) -/* eslint-disable */ - -import type { GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv2"; -import type { Message } from "@bufbuild/protobuf"; -import type { RecordMetaSchema, RecordRef, RecordReferrer, RecordRefSchema, RecordSchema } from "../../core/v1/record_pb.js"; -import type { EmptySchema } from "@bufbuild/protobuf/wkt"; - -/** - * Describes the file agntcy/dir/store/v1/store_service.proto. - */ -export declare const file_agntcy_dir_store_v1_store_service: GenFile; - -/** - * PushReferrerRequest represents a record with optional OCI artifacts for push operations. - * - * @generated from message agntcy.dir.store.v1.PushReferrerRequest - */ -export declare type PushReferrerRequest = Message<"agntcy.dir.store.v1.PushReferrerRequest"> & { - /** - * Record reference - * - * @generated from field: agntcy.dir.core.v1.RecordRef record_ref = 1; - */ - recordRef?: RecordRef; - - /** - * RecordReferrer object to be stored for the record - * - * @generated from field: agntcy.dir.core.v1.RecordReferrer referrer = 2; - */ - referrer?: RecordReferrer; -}; - -/** - * Describes the message agntcy.dir.store.v1.PushReferrerRequest. - * Use `create(PushReferrerRequestSchema)` to create a new message. - */ -export declare const PushReferrerRequestSchema: GenMessage; - -/** - * PushReferrerResponse - * - * @generated from message agntcy.dir.store.v1.PushReferrerResponse - */ -export declare type PushReferrerResponse = Message<"agntcy.dir.store.v1.PushReferrerResponse"> & { - /** - * The push process result - * - * @generated from field: bool success = 1; - */ - success: boolean; - - /** - * Optional error message if push failed - * - * @generated from field: optional string error_message = 2; - */ - errorMessage?: string; -}; - -/** - * Describes the message agntcy.dir.store.v1.PushReferrerResponse. - * Use `create(PushReferrerResponseSchema)` to create a new message. - */ -export declare const PushReferrerResponseSchema: GenMessage; - -/** - * PullReferrerRequest represents a record with optional OCI artifacts for pull operations. - * - * @generated from message agntcy.dir.store.v1.PullReferrerRequest - */ -export declare type PullReferrerRequest = Message<"agntcy.dir.store.v1.PullReferrerRequest"> & { - /** - * Record reference - * - * @generated from field: agntcy.dir.core.v1.RecordRef record_ref = 1; - */ - recordRef?: RecordRef; - - /** - * Record referrer type to be pulled - * If not provided, all referrers will be pulled - * - * @generated from field: optional string referrer_type = 2; - */ - referrerType?: string; -}; - -/** - * Describes the message agntcy.dir.store.v1.PullReferrerRequest. - * Use `create(PullReferrerRequestSchema)` to create a new message. - */ -export declare const PullReferrerRequestSchema: GenMessage; - -/** - * PullReferrerResponse is returned after successfully fetching a record referrer. - * - * @generated from message agntcy.dir.store.v1.PullReferrerResponse - */ -export declare type PullReferrerResponse = Message<"agntcy.dir.store.v1.PullReferrerResponse"> & { - /** - * RecordReferrer object associated with the record - * - * @generated from field: agntcy.dir.core.v1.RecordReferrer referrer = 1; - */ - referrer?: RecordReferrer; -}; - -/** - * Describes the message agntcy.dir.store.v1.PullReferrerResponse. - * Use `create(PullReferrerResponseSchema)` to create a new message. - */ -export declare const PullReferrerResponseSchema: GenMessage; - -/** - * Defines an interface for content-addressable storage - * service for objects. - * - * Max object size: 4MB (to fully fit in a single request) - * Max metadata size: 100KB - * - * Store service can be implemented by various storage backends, - * such as local file system, OCI registry, etc. - * - * Middleware should be used to control who can perform these RPCs. - * Policies for the middleware can be handled via separate service. - * - * Each operation is performed sequentially, meaning that - * for the N-th request, N-th response will be returned. - * If an error occurs, the stream will be cancelled. - * - * @generated from service agntcy.dir.store.v1.StoreService - */ -export declare const StoreService: GenService<{ - /** - * Push performs write operation for given records. - * - * @generated from rpc agntcy.dir.store.v1.StoreService.Push - */ - push: { - methodKind: "bidi_streaming"; - input: typeof RecordSchema; - output: typeof RecordRefSchema; - }, - /** - * Pull performs read operation for given records. - * - * @generated from rpc agntcy.dir.store.v1.StoreService.Pull - */ - pull: { - methodKind: "bidi_streaming"; - input: typeof RecordRefSchema; - output: typeof RecordSchema; - }, - /** - * Lookup resolves basic metadata for the records. - * - * @generated from rpc agntcy.dir.store.v1.StoreService.Lookup - */ - lookup: { - methodKind: "bidi_streaming"; - input: typeof RecordRefSchema; - output: typeof RecordMetaSchema; - }, - /** - * Remove performs delete operation for the records. - * - * @generated from rpc agntcy.dir.store.v1.StoreService.Delete - */ - delete: { - methodKind: "client_streaming"; - input: typeof RecordRefSchema; - output: typeof EmptySchema; - }, - /** - * PushReferrer performs write operation for record referrers. - * - * @generated from rpc agntcy.dir.store.v1.StoreService.PushReferrer - */ - pushReferrer: { - methodKind: "bidi_streaming"; - input: typeof PushReferrerRequestSchema; - output: typeof PushReferrerResponseSchema; - }, - /** - * PullReferrer performs read operation for record referrers. - * - * @generated from rpc agntcy.dir.store.v1.StoreService.PullReferrer - */ - pullReferrer: { - methodKind: "bidi_streaming"; - input: typeof PullReferrerRequestSchema; - output: typeof PullReferrerResponseSchema; - }, -}>; - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/store/v1/store_service.proto (package agntcy.dir.store.v1, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv2"; +import type { Message } from "@bufbuild/protobuf"; +import type { RecordMetaSchema, RecordRef, RecordReferrer, RecordRefSchema, RecordSchema } from "../../core/v1/record_pb.js"; +import type { EmptySchema } from "@bufbuild/protobuf/wkt"; + +/** + * Describes the file agntcy/dir/store/v1/store_service.proto. + */ +export declare const file_agntcy_dir_store_v1_store_service: GenFile; + +/** + * PushReferrerRequest represents a record with optional OCI artifacts for push operations. + * + * @generated from message agntcy.dir.store.v1.PushReferrerRequest + */ +export declare type PushReferrerRequest = Message<"agntcy.dir.store.v1.PushReferrerRequest"> & { + /** + * Record reference + * + * @generated from field: agntcy.dir.core.v1.RecordRef record_ref = 1; + */ + recordRef?: RecordRef; + + /** + * RecordReferrer object to be stored for the record + * + * @generated from field: agntcy.dir.core.v1.RecordReferrer referrer = 2; + */ + referrer?: RecordReferrer; +}; + +/** + * Describes the message agntcy.dir.store.v1.PushReferrerRequest. + * Use `create(PushReferrerRequestSchema)` to create a new message. + */ +export declare const PushReferrerRequestSchema: GenMessage; + +/** + * PushReferrerResponse + * + * @generated from message agntcy.dir.store.v1.PushReferrerResponse + */ +export declare type PushReferrerResponse = Message<"agntcy.dir.store.v1.PushReferrerResponse"> & { + /** + * The push process result + * + * @generated from field: bool success = 1; + */ + success: boolean; + + /** + * Optional error message if push failed + * + * @generated from field: optional string error_message = 2; + */ + errorMessage?: string; +}; + +/** + * Describes the message agntcy.dir.store.v1.PushReferrerResponse. + * Use `create(PushReferrerResponseSchema)` to create a new message. + */ +export declare const PushReferrerResponseSchema: GenMessage; + +/** + * PullReferrerRequest represents a record with optional OCI artifacts for pull operations. + * + * @generated from message agntcy.dir.store.v1.PullReferrerRequest + */ +export declare type PullReferrerRequest = Message<"agntcy.dir.store.v1.PullReferrerRequest"> & { + /** + * Record reference + * + * @generated from field: agntcy.dir.core.v1.RecordRef record_ref = 1; + */ + recordRef?: RecordRef; + + /** + * Record referrer type to be pulled + * If not provided, all referrers will be pulled + * + * @generated from field: optional string referrer_type = 2; + */ + referrerType?: string; +}; + +/** + * Describes the message agntcy.dir.store.v1.PullReferrerRequest. + * Use `create(PullReferrerRequestSchema)` to create a new message. + */ +export declare const PullReferrerRequestSchema: GenMessage; + +/** + * PullReferrerResponse is returned after successfully fetching a record referrer. + * + * @generated from message agntcy.dir.store.v1.PullReferrerResponse + */ +export declare type PullReferrerResponse = Message<"agntcy.dir.store.v1.PullReferrerResponse"> & { + /** + * RecordReferrer object associated with the record + * + * @generated from field: agntcy.dir.core.v1.RecordReferrer referrer = 1; + */ + referrer?: RecordReferrer; +}; + +/** + * Describes the message agntcy.dir.store.v1.PullReferrerResponse. + * Use `create(PullReferrerResponseSchema)` to create a new message. + */ +export declare const PullReferrerResponseSchema: GenMessage; + +/** + * Defines an interface for content-addressable storage + * service for objects. + * + * Max object size: 4MB (to fully fit in a single request) + * Max metadata size: 100KB + * + * Store service can be implemented by various storage backends, + * such as local file system, OCI registry, etc. + * + * Middleware should be used to control who can perform these RPCs. + * Policies for the middleware can be handled via separate service. + * + * Each operation is performed sequentially, meaning that + * for the N-th request, N-th response will be returned. + * If an error occurs, the stream will be cancelled. + * + * @generated from service agntcy.dir.store.v1.StoreService + */ +export declare const StoreService: GenService<{ + /** + * Push performs write operation for given records. + * + * @generated from rpc agntcy.dir.store.v1.StoreService.Push + */ + push: { + methodKind: "bidi_streaming"; + input: typeof RecordSchema; + output: typeof RecordRefSchema; + }, + /** + * Pull performs read operation for given records. + * + * @generated from rpc agntcy.dir.store.v1.StoreService.Pull + */ + pull: { + methodKind: "bidi_streaming"; + input: typeof RecordRefSchema; + output: typeof RecordSchema; + }, + /** + * Lookup resolves basic metadata for the records. + * + * @generated from rpc agntcy.dir.store.v1.StoreService.Lookup + */ + lookup: { + methodKind: "bidi_streaming"; + input: typeof RecordRefSchema; + output: typeof RecordMetaSchema; + }, + /** + * Remove performs delete operation for the records. + * + * @generated from rpc agntcy.dir.store.v1.StoreService.Delete + */ + delete: { + methodKind: "client_streaming"; + input: typeof RecordRefSchema; + output: typeof EmptySchema; + }, + /** + * PushReferrer performs write operation for record referrers. + * + * @generated from rpc agntcy.dir.store.v1.StoreService.PushReferrer + */ + pushReferrer: { + methodKind: "bidi_streaming"; + input: typeof PushReferrerRequestSchema; + output: typeof PushReferrerResponseSchema; + }, + /** + * PullReferrer performs read operation for record referrers. + * + * @generated from rpc agntcy.dir.store.v1.StoreService.PullReferrer + */ + pullReferrer: { + methodKind: "bidi_streaming"; + input: typeof PullReferrerRequestSchema; + output: typeof PullReferrerResponseSchema; + }, +}>; + diff --git a/sdk/dir-js/src/models/agntcy/dir/store/v1/store_service_pb.js b/sdk/dir-js/src/models/agntcy/dir/store/v1/store_service_pb.js index 140895cd9..3e6a20b77 100644 --- a/sdk/dir-js/src/models/agntcy/dir/store/v1/store_service_pb.js +++ b/sdk/dir-js/src/models/agntcy/dir/store/v1/store_service_pb.js @@ -1,67 +1,67 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/store/v1/store_service.proto (package agntcy.dir.store.v1, syntax proto3) -/* eslint-disable */ - -import { fileDesc, messageDesc, serviceDesc } from "@bufbuild/protobuf/codegenv2"; -import { file_agntcy_dir_core_v1_record } from "../../core/v1/record_pb.js"; -import { file_google_protobuf_empty } from "@bufbuild/protobuf/wkt"; - -/** - * Describes the file agntcy/dir/store/v1/store_service.proto. - */ -export const file_agntcy_dir_store_v1_store_service = /*@__PURE__*/ - fileDesc("CidhZ250Y3kvZGlyL3N0b3JlL3YxL3N0b3JlX3NlcnZpY2UucHJvdG8SE2FnbnRjeS5kaXIuc3RvcmUudjEifgoTUHVzaFJlZmVycmVyUmVxdWVzdBIxCgpyZWNvcmRfcmVmGAEgASgLMh0uYWdudGN5LmRpci5jb3JlLnYxLlJlY29yZFJlZhI0CghyZWZlcnJlchgCIAEoCzIiLmFnbnRjeS5kaXIuY29yZS52MS5SZWNvcmRSZWZlcnJlciJVChRQdXNoUmVmZXJyZXJSZXNwb25zZRIPCgdzdWNjZXNzGAEgASgIEhoKDWVycm9yX21lc3NhZ2UYAiABKAlIAIgBAUIQCg5fZXJyb3JfbWVzc2FnZSJ2ChNQdWxsUmVmZXJyZXJSZXF1ZXN0EjEKCnJlY29yZF9yZWYYASABKAsyHS5hZ250Y3kuZGlyLmNvcmUudjEuUmVjb3JkUmVmEhoKDXJlZmVycmVyX3R5cGUYAiABKAlIAIgBAUIQCg5fcmVmZXJyZXJfdHlwZSJMChRQdWxsUmVmZXJyZXJSZXNwb25zZRI0CghyZWZlcnJlchgBIAEoCzIiLmFnbnRjeS5kaXIuY29yZS52MS5SZWNvcmRSZWZlcnJlcjL+AwoMU3RvcmVTZXJ2aWNlEkUKBFB1c2gSGi5hZ250Y3kuZGlyLmNvcmUudjEuUmVjb3JkGh0uYWdudGN5LmRpci5jb3JlLnYxLlJlY29yZFJlZigBMAESRQoEUHVsbBIdLmFnbnRjeS5kaXIuY29yZS52MS5SZWNvcmRSZWYaGi5hZ250Y3kuZGlyLmNvcmUudjEuUmVjb3JkKAEwARJLCgZMb29rdXASHS5hZ250Y3kuZGlyLmNvcmUudjEuUmVjb3JkUmVmGh4uYWdudGN5LmRpci5jb3JlLnYxLlJlY29yZE1ldGEoATABEkEKBkRlbGV0ZRIdLmFnbnRjeS5kaXIuY29yZS52MS5SZWNvcmRSZWYaFi5nb29nbGUucHJvdG9idWYuRW1wdHkoARJnCgxQdXNoUmVmZXJyZXISKC5hZ250Y3kuZGlyLnN0b3JlLnYxLlB1c2hSZWZlcnJlclJlcXVlc3QaKS5hZ250Y3kuZGlyLnN0b3JlLnYxLlB1c2hSZWZlcnJlclJlc3BvbnNlKAEwARJnCgxQdWxsUmVmZXJyZXISKC5hZ250Y3kuZGlyLnN0b3JlLnYxLlB1bGxSZWZlcnJlclJlcXVlc3QaKS5hZ250Y3kuZGlyLnN0b3JlLnYxLlB1bGxSZWZlcnJlclJlc3BvbnNlKAEwAUK/AQoXY29tLmFnbnRjeS5kaXIuc3RvcmUudjFCEVN0b3JlU2VydmljZVByb3RvUAFaImdpdGh1Yi5jb20vYWdudGN5L2Rpci9hcGkvc3RvcmUvdjGiAgNBRFOqAhNBZ250Y3kuRGlyLlN0b3JlLlYxygITQWdudGN5XERpclxTdG9yZVxWMeICH0FnbnRjeVxEaXJcU3RvcmVcVjFcR1BCTWV0YWRhdGHqAhZBZ250Y3k6OkRpcjo6U3RvcmU6OlYxYgZwcm90bzM", [file_agntcy_dir_core_v1_record, file_google_protobuf_empty]); - -/** - * Describes the message agntcy.dir.store.v1.PushReferrerRequest. - * Use `create(PushReferrerRequestSchema)` to create a new message. - */ -export const PushReferrerRequestSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_store_v1_store_service, 0); - -/** - * Describes the message agntcy.dir.store.v1.PushReferrerResponse. - * Use `create(PushReferrerResponseSchema)` to create a new message. - */ -export const PushReferrerResponseSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_store_v1_store_service, 1); - -/** - * Describes the message agntcy.dir.store.v1.PullReferrerRequest. - * Use `create(PullReferrerRequestSchema)` to create a new message. - */ -export const PullReferrerRequestSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_store_v1_store_service, 2); - -/** - * Describes the message agntcy.dir.store.v1.PullReferrerResponse. - * Use `create(PullReferrerResponseSchema)` to create a new message. - */ -export const PullReferrerResponseSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_store_v1_store_service, 3); - -/** - * Defines an interface for content-addressable storage - * service for objects. - * - * Max object size: 4MB (to fully fit in a single request) - * Max metadata size: 100KB - * - * Store service can be implemented by various storage backends, - * such as local file system, OCI registry, etc. - * - * Middleware should be used to control who can perform these RPCs. - * Policies for the middleware can be handled via separate service. - * - * Each operation is performed sequentially, meaning that - * for the N-th request, N-th response will be returned. - * If an error occurs, the stream will be cancelled. - * - * @generated from service agntcy.dir.store.v1.StoreService - */ -export const StoreService = /*@__PURE__*/ - serviceDesc(file_agntcy_dir_store_v1_store_service, 0); - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/store/v1/store_service.proto (package agntcy.dir.store.v1, syntax proto3) +/* eslint-disable */ + +import { fileDesc, messageDesc, serviceDesc } from "@bufbuild/protobuf/codegenv2"; +import { file_agntcy_dir_core_v1_record } from "../../core/v1/record_pb.js"; +import { file_google_protobuf_empty } from "@bufbuild/protobuf/wkt"; + +/** + * Describes the file agntcy/dir/store/v1/store_service.proto. + */ +export const file_agntcy_dir_store_v1_store_service = /*@__PURE__*/ + fileDesc("CidhZ250Y3kvZGlyL3N0b3JlL3YxL3N0b3JlX3NlcnZpY2UucHJvdG8SE2FnbnRjeS5kaXIuc3RvcmUudjEifgoTUHVzaFJlZmVycmVyUmVxdWVzdBIxCgpyZWNvcmRfcmVmGAEgASgLMh0uYWdudGN5LmRpci5jb3JlLnYxLlJlY29yZFJlZhI0CghyZWZlcnJlchgCIAEoCzIiLmFnbnRjeS5kaXIuY29yZS52MS5SZWNvcmRSZWZlcnJlciJVChRQdXNoUmVmZXJyZXJSZXNwb25zZRIPCgdzdWNjZXNzGAEgASgIEhoKDWVycm9yX21lc3NhZ2UYAiABKAlIAIgBAUIQCg5fZXJyb3JfbWVzc2FnZSJ2ChNQdWxsUmVmZXJyZXJSZXF1ZXN0EjEKCnJlY29yZF9yZWYYASABKAsyHS5hZ250Y3kuZGlyLmNvcmUudjEuUmVjb3JkUmVmEhoKDXJlZmVycmVyX3R5cGUYAiABKAlIAIgBAUIQCg5fcmVmZXJyZXJfdHlwZSJMChRQdWxsUmVmZXJyZXJSZXNwb25zZRI0CghyZWZlcnJlchgBIAEoCzIiLmFnbnRjeS5kaXIuY29yZS52MS5SZWNvcmRSZWZlcnJlcjL+AwoMU3RvcmVTZXJ2aWNlEkUKBFB1c2gSGi5hZ250Y3kuZGlyLmNvcmUudjEuUmVjb3JkGh0uYWdudGN5LmRpci5jb3JlLnYxLlJlY29yZFJlZigBMAESRQoEUHVsbBIdLmFnbnRjeS5kaXIuY29yZS52MS5SZWNvcmRSZWYaGi5hZ250Y3kuZGlyLmNvcmUudjEuUmVjb3JkKAEwARJLCgZMb29rdXASHS5hZ250Y3kuZGlyLmNvcmUudjEuUmVjb3JkUmVmGh4uYWdudGN5LmRpci5jb3JlLnYxLlJlY29yZE1ldGEoATABEkEKBkRlbGV0ZRIdLmFnbnRjeS5kaXIuY29yZS52MS5SZWNvcmRSZWYaFi5nb29nbGUucHJvdG9idWYuRW1wdHkoARJnCgxQdXNoUmVmZXJyZXISKC5hZ250Y3kuZGlyLnN0b3JlLnYxLlB1c2hSZWZlcnJlclJlcXVlc3QaKS5hZ250Y3kuZGlyLnN0b3JlLnYxLlB1c2hSZWZlcnJlclJlc3BvbnNlKAEwARJnCgxQdWxsUmVmZXJyZXISKC5hZ250Y3kuZGlyLnN0b3JlLnYxLlB1bGxSZWZlcnJlclJlcXVlc3QaKS5hZ250Y3kuZGlyLnN0b3JlLnYxLlB1bGxSZWZlcnJlclJlc3BvbnNlKAEwAUK/AQoXY29tLmFnbnRjeS5kaXIuc3RvcmUudjFCEVN0b3JlU2VydmljZVByb3RvUAFaImdpdGh1Yi5jb20vYWdudGN5L2Rpci9hcGkvc3RvcmUvdjGiAgNBRFOqAhNBZ250Y3kuRGlyLlN0b3JlLlYxygITQWdudGN5XERpclxTdG9yZVxWMeICH0FnbnRjeVxEaXJcU3RvcmVcVjFcR1BCTWV0YWRhdGHqAhZBZ250Y3k6OkRpcjo6U3RvcmU6OlYxYgZwcm90bzM", [file_agntcy_dir_core_v1_record, file_google_protobuf_empty]); + +/** + * Describes the message agntcy.dir.store.v1.PushReferrerRequest. + * Use `create(PushReferrerRequestSchema)` to create a new message. + */ +export const PushReferrerRequestSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_store_v1_store_service, 0); + +/** + * Describes the message agntcy.dir.store.v1.PushReferrerResponse. + * Use `create(PushReferrerResponseSchema)` to create a new message. + */ +export const PushReferrerResponseSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_store_v1_store_service, 1); + +/** + * Describes the message agntcy.dir.store.v1.PullReferrerRequest. + * Use `create(PullReferrerRequestSchema)` to create a new message. + */ +export const PullReferrerRequestSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_store_v1_store_service, 2); + +/** + * Describes the message agntcy.dir.store.v1.PullReferrerResponse. + * Use `create(PullReferrerResponseSchema)` to create a new message. + */ +export const PullReferrerResponseSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_store_v1_store_service, 3); + +/** + * Defines an interface for content-addressable storage + * service for objects. + * + * Max object size: 4MB (to fully fit in a single request) + * Max metadata size: 100KB + * + * Store service can be implemented by various storage backends, + * such as local file system, OCI registry, etc. + * + * Middleware should be used to control who can perform these RPCs. + * Policies for the middleware can be handled via separate service. + * + * Each operation is performed sequentially, meaning that + * for the N-th request, N-th response will be returned. + * If an error occurs, the stream will be cancelled. + * + * @generated from service agntcy.dir.store.v1.StoreService + */ +export const StoreService = /*@__PURE__*/ + serviceDesc(file_agntcy_dir_store_v1_store_service, 0); + diff --git a/sdk/dir-js/src/models/agntcy/dir/store/v1/sync_service_pb.d.ts b/sdk/dir-js/src/models/agntcy/dir/store/v1/sync_service_pb.d.ts index cc4af80f4..d8abffda7 100644 --- a/sdk/dir-js/src/models/agntcy/dir/store/v1/sync_service_pb.d.ts +++ b/sdk/dir-js/src/models/agntcy/dir/store/v1/sync_service_pb.d.ts @@ -1,450 +1,450 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/store/v1/sync_service.proto (package agntcy.dir.store.v1, syntax proto3) -/* eslint-disable */ - -import type { GenEnum, GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv2"; -import type { Message } from "@bufbuild/protobuf"; - -/** - * Describes the file agntcy/dir/store/v1/sync_service.proto. - */ -export declare const file_agntcy_dir_store_v1_sync_service: GenFile; - -/** - * CreateSyncRequest defines the parameters for creating a new synchronization operation. - * - * Currently supports basic synchronization of all objects from a remote Directory. - * Future versions may include additional options for filtering and scheduling capabilities. - * - * @generated from message agntcy.dir.store.v1.CreateSyncRequest - */ -export declare type CreateSyncRequest = Message<"agntcy.dir.store.v1.CreateSyncRequest"> & { - /** - * URL of the remote Registry to synchronize from. - * - * This should be a complete URL including protocol and port if non-standard. - * Examples: - * - "https://directory.example.com" - * - "http://localhost:8080" - * - "https://directory.example.com:9443" - * - * @generated from field: string remote_directory_url = 1; - */ - remoteDirectoryUrl: string; - - /** - * List of CIDs to synchronize from the remote Directory. - * If empty, all objects will be synchronized. - * - * @generated from field: repeated string cids = 2; - */ - cids: string[]; -}; - -/** - * Describes the message agntcy.dir.store.v1.CreateSyncRequest. - * Use `create(CreateSyncRequestSchema)` to create a new message. - */ -export declare const CreateSyncRequestSchema: GenMessage; - -/** - * CreateSyncResponse contains the result of creating a new synchronization operation. - * - * @generated from message agntcy.dir.store.v1.CreateSyncResponse - */ -export declare type CreateSyncResponse = Message<"agntcy.dir.store.v1.CreateSyncResponse"> & { - /** - * Unique identifier for the created synchronization operation. - * This ID can be used with other SyncService RPCs to monitor and manage the sync. - * - * @generated from field: string sync_id = 1; - */ - syncId: string; -}; - -/** - * Describes the message agntcy.dir.store.v1.CreateSyncResponse. - * Use `create(CreateSyncResponseSchema)` to create a new message. - */ -export declare const CreateSyncResponseSchema: GenMessage; - -/** - * ListSyncsRequest specifies parameters for listing synchronization operations. - * - * @generated from message agntcy.dir.store.v1.ListSyncsRequest - */ -export declare type ListSyncsRequest = Message<"agntcy.dir.store.v1.ListSyncsRequest"> & { - /** - * Optional limit on the number of results to return. - * - * @generated from field: optional uint32 limit = 2; - */ - limit?: number; - - /** - * Optional offset for pagination of results. - * - * @generated from field: optional uint32 offset = 3; - */ - offset?: number; -}; - -/** - * Describes the message agntcy.dir.store.v1.ListSyncsRequest. - * Use `create(ListSyncsRequestSchema)` to create a new message. - */ -export declare const ListSyncsRequestSchema: GenMessage; - -/** - * ListSyncItem represents a single synchronization in the list of all syncs. - * - * @generated from message agntcy.dir.store.v1.ListSyncsItem - */ -export declare type ListSyncsItem = Message<"agntcy.dir.store.v1.ListSyncsItem"> & { - /** - * Unique identifier of the synchronization operation. - * - * @generated from field: string sync_id = 1; - */ - syncId: string; - - /** - * Current status of the synchronization operation. - * - * @generated from field: agntcy.dir.store.v1.SyncStatus status = 2; - */ - status: SyncStatus; - - /** - * URL of the remote Directory being synchronized from. - * - * @generated from field: string remote_directory_url = 3; - */ - remoteDirectoryUrl: string; -}; - -/** - * Describes the message agntcy.dir.store.v1.ListSyncsItem. - * Use `create(ListSyncsItemSchema)` to create a new message. - */ -export declare const ListSyncsItemSchema: GenMessage; - -/** - * GetSyncRequest specifies which synchronization status to retrieve. - * - * @generated from message agntcy.dir.store.v1.GetSyncRequest - */ -export declare type GetSyncRequest = Message<"agntcy.dir.store.v1.GetSyncRequest"> & { - /** - * Unique identifier of the synchronization operation to query. - * - * @generated from field: string sync_id = 1; - */ - syncId: string; -}; - -/** - * Describes the message agntcy.dir.store.v1.GetSyncRequest. - * Use `create(GetSyncRequestSchema)` to create a new message. - */ -export declare const GetSyncRequestSchema: GenMessage; - -/** - * GetSyncResponse provides detailed information about a specific synchronization operation. - * - * @generated from message agntcy.dir.store.v1.GetSyncResponse - */ -export declare type GetSyncResponse = Message<"agntcy.dir.store.v1.GetSyncResponse"> & { - /** - * Unique identifier of the synchronization operation. - * - * @generated from field: string sync_id = 1; - */ - syncId: string; - - /** - * Current status of the synchronization operation. - * - * @generated from field: agntcy.dir.store.v1.SyncStatus status = 2; - */ - status: SyncStatus; - - /** - * URL of the remote Directory node being synchronized from. - * - * @generated from field: string remote_directory_url = 3; - */ - remoteDirectoryUrl: string; - - /** - * Timestamp when the synchronization operation was created in the RFC3339 format. - * Specs: https://www.rfc-editor.org/rfc/rfc3339.html - * - * @generated from field: string created_time = 4; - */ - createdTime: string; - - /** - * Timestamp of the most recent status update for this synchronization in the RFC3339 format. - * - * @generated from field: string last_update_time = 5; - */ - lastUpdateTime: string; -}; - -/** - * Describes the message agntcy.dir.store.v1.GetSyncResponse. - * Use `create(GetSyncResponseSchema)` to create a new message. - */ -export declare const GetSyncResponseSchema: GenMessage; - -/** - * DeleteSyncRequest specifies which synchronization to delete. - * - * @generated from message agntcy.dir.store.v1.DeleteSyncRequest - */ -export declare type DeleteSyncRequest = Message<"agntcy.dir.store.v1.DeleteSyncRequest"> & { - /** - * Unique identifier of the synchronization operation to delete. - * - * @generated from field: string sync_id = 1; - */ - syncId: string; -}; - -/** - * Describes the message agntcy.dir.store.v1.DeleteSyncRequest. - * Use `create(DeleteSyncRequestSchema)` to create a new message. - */ -export declare const DeleteSyncRequestSchema: GenMessage; - -/** - * DeleteSyncResponse - * - * @generated from message agntcy.dir.store.v1.DeleteSyncResponse - */ -export declare type DeleteSyncResponse = Message<"agntcy.dir.store.v1.DeleteSyncResponse"> & { -}; - -/** - * Describes the message agntcy.dir.store.v1.DeleteSyncResponse. - * Use `create(DeleteSyncResponseSchema)` to create a new message. - */ -export declare const DeleteSyncResponseSchema: GenMessage; - -/** - * @generated from message agntcy.dir.store.v1.RequestRegistryCredentialsRequest - */ -export declare type RequestRegistryCredentialsRequest = Message<"agntcy.dir.store.v1.RequestRegistryCredentialsRequest"> & { - /** - * Identity of the requesting node - * For example: spiffe://example.org/service/foo - * - * @generated from field: string requesting_node_id = 1; - */ - requestingNodeId: string; -}; - -/** - * Describes the message agntcy.dir.store.v1.RequestRegistryCredentialsRequest. - * Use `create(RequestRegistryCredentialsRequestSchema)` to create a new message. - */ -export declare const RequestRegistryCredentialsRequestSchema: GenMessage; - -/** - * @generated from message agntcy.dir.store.v1.RequestRegistryCredentialsResponse - */ -export declare type RequestRegistryCredentialsResponse = Message<"agntcy.dir.store.v1.RequestRegistryCredentialsResponse"> & { - /** - * Success status of the credential negotiation - * - * @generated from field: bool success = 1; - */ - success: boolean; - - /** - * Error message if negotiation failed - * - * @generated from field: string error_message = 2; - */ - errorMessage: string; - - /** - * URL of the remote Registry being synchronized from. - * - * @generated from field: string remote_registry_url = 3; - */ - remoteRegistryUrl: string; - - /** - * Registry credentials (oneof based on credential type) - * - * @generated from oneof agntcy.dir.store.v1.RequestRegistryCredentialsResponse.credentials - */ - credentials: { - /** - * CertificateCredentials certificate = 5; - * - * @generated from field: agntcy.dir.store.v1.BasicAuthCredentials basic_auth = 4; - */ - value: BasicAuthCredentials; - case: "basicAuth"; - } | { case: undefined; value?: undefined }; -}; - -/** - * Describes the message agntcy.dir.store.v1.RequestRegistryCredentialsResponse. - * Use `create(RequestRegistryCredentialsResponseSchema)` to create a new message. - */ -export declare const RequestRegistryCredentialsResponseSchema: GenMessage; - -/** - * Supporting credential type definitions - * - * @generated from message agntcy.dir.store.v1.BasicAuthCredentials - */ -export declare type BasicAuthCredentials = Message<"agntcy.dir.store.v1.BasicAuthCredentials"> & { - /** - * @generated from field: string username = 1; - */ - username: string; - - /** - * @generated from field: string password = 2; - */ - password: string; -}; - -/** - * Describes the message agntcy.dir.store.v1.BasicAuthCredentials. - * Use `create(BasicAuthCredentialsSchema)` to create a new message. - */ -export declare const BasicAuthCredentialsSchema: GenMessage; - -/** - * SyncStatus enumeration defines the possible states of a synchronization operation. - * - * @generated from enum agntcy.dir.store.v1.SyncStatus - */ -export enum SyncStatus { - /** - * Default/unset status - should not be used in practice - * - * @generated from enum value: SYNC_STATUS_UNSPECIFIED = 0; - */ - UNSPECIFIED = 0, - - /** - * Sync operation has been created but not yet started - * - * @generated from enum value: SYNC_STATUS_PENDING = 1; - */ - PENDING = 1, - - /** - * Sync operation is actively discovering and transferring objects - * - * @generated from enum value: SYNC_STATUS_IN_PROGRESS = 2; - */ - IN_PROGRESS = 2, - - /** - * Sync operation encountered an error and stopped - * - * @generated from enum value: SYNC_STATUS_FAILED = 3; - */ - FAILED = 3, - - /** - * Sync operation has been marked for deletion but cleanup not yet started - * - * @generated from enum value: SYNC_STATUS_DELETE_PENDING = 4; - */ - DELETE_PENDING = 4, - - /** - * Sync operation has been successfully deleted and cleaned up - * - * @generated from enum value: SYNC_STATUS_DELETED = 5; - */ - DELETED = 5, -} - -/** - * Describes the enum agntcy.dir.store.v1.SyncStatus. - */ -export declare const SyncStatusSchema: GenEnum; - -/** - * SyncService provides functionality for synchronizing objects between Directory nodes. - * - * This service enables one-way synchronization from a remote Directory node to the local node, - * allowing distributed Directory instances to share and replicate objects. The service supports - * both on-demand synchronization and tracking of sync operations through their lifecycle. - * - * @generated from service agntcy.dir.store.v1.SyncService - */ -export declare const SyncService: GenService<{ - /** - * CreateSync initiates a new synchronization operation from a remote Directory node. - * - * The operation is non-blocking and returns immediately with a sync ID that can be used - * to track progress and manage the sync operation. - * - * @generated from rpc agntcy.dir.store.v1.SyncService.CreateSync - */ - createSync: { - methodKind: "unary"; - input: typeof CreateSyncRequestSchema; - output: typeof CreateSyncResponseSchema; - }, - /** - * ListSyncs returns a stream of all sync operations known to the system. - * - * This includes active, completed, and failed synchronizations. - * - * @generated from rpc agntcy.dir.store.v1.SyncService.ListSyncs - */ - listSyncs: { - methodKind: "server_streaming"; - input: typeof ListSyncsRequestSchema; - output: typeof ListSyncsItemSchema; - }, - /** - * GetSync retrieves detailed status information for a specific synchronization. - * - * @generated from rpc agntcy.dir.store.v1.SyncService.GetSync - */ - getSync: { - methodKind: "unary"; - input: typeof GetSyncRequestSchema; - output: typeof GetSyncResponseSchema; - }, - /** - * DeleteSync removes a synchronization operation from the system. - * - * @generated from rpc agntcy.dir.store.v1.SyncService.DeleteSync - */ - deleteSync: { - methodKind: "unary"; - input: typeof DeleteSyncRequestSchema; - output: typeof DeleteSyncResponseSchema; - }, - /** - * RequestRegistryCredentials requests registry credentials between two Directory nodes. - * - * This RPC allows a requesting node to authenticate with this node and obtain - * temporary registry credentials for secure Zot-based synchronization. - * - * @generated from rpc agntcy.dir.store.v1.SyncService.RequestRegistryCredentials - */ - requestRegistryCredentials: { - methodKind: "unary"; - input: typeof RequestRegistryCredentialsRequestSchema; - output: typeof RequestRegistryCredentialsResponseSchema; - }, -}>; - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/store/v1/sync_service.proto (package agntcy.dir.store.v1, syntax proto3) +/* eslint-disable */ + +import type { GenEnum, GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv2"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file agntcy/dir/store/v1/sync_service.proto. + */ +export declare const file_agntcy_dir_store_v1_sync_service: GenFile; + +/** + * CreateSyncRequest defines the parameters for creating a new synchronization operation. + * + * Currently supports basic synchronization of all objects from a remote Directory. + * Future versions may include additional options for filtering and scheduling capabilities. + * + * @generated from message agntcy.dir.store.v1.CreateSyncRequest + */ +export declare type CreateSyncRequest = Message<"agntcy.dir.store.v1.CreateSyncRequest"> & { + /** + * URL of the remote Registry to synchronize from. + * + * This should be a complete URL including protocol and port if non-standard. + * Examples: + * - "https://directory.example.com" + * - "http://localhost:8080" + * - "https://directory.example.com:9443" + * + * @generated from field: string remote_directory_url = 1; + */ + remoteDirectoryUrl: string; + + /** + * List of CIDs to synchronize from the remote Directory. + * If empty, all objects will be synchronized. + * + * @generated from field: repeated string cids = 2; + */ + cids: string[]; +}; + +/** + * Describes the message agntcy.dir.store.v1.CreateSyncRequest. + * Use `create(CreateSyncRequestSchema)` to create a new message. + */ +export declare const CreateSyncRequestSchema: GenMessage; + +/** + * CreateSyncResponse contains the result of creating a new synchronization operation. + * + * @generated from message agntcy.dir.store.v1.CreateSyncResponse + */ +export declare type CreateSyncResponse = Message<"agntcy.dir.store.v1.CreateSyncResponse"> & { + /** + * Unique identifier for the created synchronization operation. + * This ID can be used with other SyncService RPCs to monitor and manage the sync. + * + * @generated from field: string sync_id = 1; + */ + syncId: string; +}; + +/** + * Describes the message agntcy.dir.store.v1.CreateSyncResponse. + * Use `create(CreateSyncResponseSchema)` to create a new message. + */ +export declare const CreateSyncResponseSchema: GenMessage; + +/** + * ListSyncsRequest specifies parameters for listing synchronization operations. + * + * @generated from message agntcy.dir.store.v1.ListSyncsRequest + */ +export declare type ListSyncsRequest = Message<"agntcy.dir.store.v1.ListSyncsRequest"> & { + /** + * Optional limit on the number of results to return. + * + * @generated from field: optional uint32 limit = 2; + */ + limit?: number; + + /** + * Optional offset for pagination of results. + * + * @generated from field: optional uint32 offset = 3; + */ + offset?: number; +}; + +/** + * Describes the message agntcy.dir.store.v1.ListSyncsRequest. + * Use `create(ListSyncsRequestSchema)` to create a new message. + */ +export declare const ListSyncsRequestSchema: GenMessage; + +/** + * ListSyncItem represents a single synchronization in the list of all syncs. + * + * @generated from message agntcy.dir.store.v1.ListSyncsItem + */ +export declare type ListSyncsItem = Message<"agntcy.dir.store.v1.ListSyncsItem"> & { + /** + * Unique identifier of the synchronization operation. + * + * @generated from field: string sync_id = 1; + */ + syncId: string; + + /** + * Current status of the synchronization operation. + * + * @generated from field: agntcy.dir.store.v1.SyncStatus status = 2; + */ + status: SyncStatus; + + /** + * URL of the remote Directory being synchronized from. + * + * @generated from field: string remote_directory_url = 3; + */ + remoteDirectoryUrl: string; +}; + +/** + * Describes the message agntcy.dir.store.v1.ListSyncsItem. + * Use `create(ListSyncsItemSchema)` to create a new message. + */ +export declare const ListSyncsItemSchema: GenMessage; + +/** + * GetSyncRequest specifies which synchronization status to retrieve. + * + * @generated from message agntcy.dir.store.v1.GetSyncRequest + */ +export declare type GetSyncRequest = Message<"agntcy.dir.store.v1.GetSyncRequest"> & { + /** + * Unique identifier of the synchronization operation to query. + * + * @generated from field: string sync_id = 1; + */ + syncId: string; +}; + +/** + * Describes the message agntcy.dir.store.v1.GetSyncRequest. + * Use `create(GetSyncRequestSchema)` to create a new message. + */ +export declare const GetSyncRequestSchema: GenMessage; + +/** + * GetSyncResponse provides detailed information about a specific synchronization operation. + * + * @generated from message agntcy.dir.store.v1.GetSyncResponse + */ +export declare type GetSyncResponse = Message<"agntcy.dir.store.v1.GetSyncResponse"> & { + /** + * Unique identifier of the synchronization operation. + * + * @generated from field: string sync_id = 1; + */ + syncId: string; + + /** + * Current status of the synchronization operation. + * + * @generated from field: agntcy.dir.store.v1.SyncStatus status = 2; + */ + status: SyncStatus; + + /** + * URL of the remote Directory node being synchronized from. + * + * @generated from field: string remote_directory_url = 3; + */ + remoteDirectoryUrl: string; + + /** + * Timestamp when the synchronization operation was created in the RFC3339 format. + * Specs: https://www.rfc-editor.org/rfc/rfc3339.html + * + * @generated from field: string created_time = 4; + */ + createdTime: string; + + /** + * Timestamp of the most recent status update for this synchronization in the RFC3339 format. + * + * @generated from field: string last_update_time = 5; + */ + lastUpdateTime: string; +}; + +/** + * Describes the message agntcy.dir.store.v1.GetSyncResponse. + * Use `create(GetSyncResponseSchema)` to create a new message. + */ +export declare const GetSyncResponseSchema: GenMessage; + +/** + * DeleteSyncRequest specifies which synchronization to delete. + * + * @generated from message agntcy.dir.store.v1.DeleteSyncRequest + */ +export declare type DeleteSyncRequest = Message<"agntcy.dir.store.v1.DeleteSyncRequest"> & { + /** + * Unique identifier of the synchronization operation to delete. + * + * @generated from field: string sync_id = 1; + */ + syncId: string; +}; + +/** + * Describes the message agntcy.dir.store.v1.DeleteSyncRequest. + * Use `create(DeleteSyncRequestSchema)` to create a new message. + */ +export declare const DeleteSyncRequestSchema: GenMessage; + +/** + * DeleteSyncResponse + * + * @generated from message agntcy.dir.store.v1.DeleteSyncResponse + */ +export declare type DeleteSyncResponse = Message<"agntcy.dir.store.v1.DeleteSyncResponse"> & { +}; + +/** + * Describes the message agntcy.dir.store.v1.DeleteSyncResponse. + * Use `create(DeleteSyncResponseSchema)` to create a new message. + */ +export declare const DeleteSyncResponseSchema: GenMessage; + +/** + * @generated from message agntcy.dir.store.v1.RequestRegistryCredentialsRequest + */ +export declare type RequestRegistryCredentialsRequest = Message<"agntcy.dir.store.v1.RequestRegistryCredentialsRequest"> & { + /** + * Identity of the requesting node + * For example: spiffe://example.org/service/foo + * + * @generated from field: string requesting_node_id = 1; + */ + requestingNodeId: string; +}; + +/** + * Describes the message agntcy.dir.store.v1.RequestRegistryCredentialsRequest. + * Use `create(RequestRegistryCredentialsRequestSchema)` to create a new message. + */ +export declare const RequestRegistryCredentialsRequestSchema: GenMessage; + +/** + * @generated from message agntcy.dir.store.v1.RequestRegistryCredentialsResponse + */ +export declare type RequestRegistryCredentialsResponse = Message<"agntcy.dir.store.v1.RequestRegistryCredentialsResponse"> & { + /** + * Success status of the credential negotiation + * + * @generated from field: bool success = 1; + */ + success: boolean; + + /** + * Error message if negotiation failed + * + * @generated from field: string error_message = 2; + */ + errorMessage: string; + + /** + * URL of the remote Registry being synchronized from. + * + * @generated from field: string remote_registry_url = 3; + */ + remoteRegistryUrl: string; + + /** + * Registry credentials (oneof based on credential type) + * + * @generated from oneof agntcy.dir.store.v1.RequestRegistryCredentialsResponse.credentials + */ + credentials: { + /** + * CertificateCredentials certificate = 5; + * + * @generated from field: agntcy.dir.store.v1.BasicAuthCredentials basic_auth = 4; + */ + value: BasicAuthCredentials; + case: "basicAuth"; + } | { case: undefined; value?: undefined }; +}; + +/** + * Describes the message agntcy.dir.store.v1.RequestRegistryCredentialsResponse. + * Use `create(RequestRegistryCredentialsResponseSchema)` to create a new message. + */ +export declare const RequestRegistryCredentialsResponseSchema: GenMessage; + +/** + * Supporting credential type definitions + * + * @generated from message agntcy.dir.store.v1.BasicAuthCredentials + */ +export declare type BasicAuthCredentials = Message<"agntcy.dir.store.v1.BasicAuthCredentials"> & { + /** + * @generated from field: string username = 1; + */ + username: string; + + /** + * @generated from field: string password = 2; + */ + password: string; +}; + +/** + * Describes the message agntcy.dir.store.v1.BasicAuthCredentials. + * Use `create(BasicAuthCredentialsSchema)` to create a new message. + */ +export declare const BasicAuthCredentialsSchema: GenMessage; + +/** + * SyncStatus enumeration defines the possible states of a synchronization operation. + * + * @generated from enum agntcy.dir.store.v1.SyncStatus + */ +export enum SyncStatus { + /** + * Default/unset status - should not be used in practice + * + * @generated from enum value: SYNC_STATUS_UNSPECIFIED = 0; + */ + UNSPECIFIED = 0, + + /** + * Sync operation has been created but not yet started + * + * @generated from enum value: SYNC_STATUS_PENDING = 1; + */ + PENDING = 1, + + /** + * Sync operation is actively discovering and transferring objects + * + * @generated from enum value: SYNC_STATUS_IN_PROGRESS = 2; + */ + IN_PROGRESS = 2, + + /** + * Sync operation encountered an error and stopped + * + * @generated from enum value: SYNC_STATUS_FAILED = 3; + */ + FAILED = 3, + + /** + * Sync operation has been marked for deletion but cleanup not yet started + * + * @generated from enum value: SYNC_STATUS_DELETE_PENDING = 4; + */ + DELETE_PENDING = 4, + + /** + * Sync operation has been successfully deleted and cleaned up + * + * @generated from enum value: SYNC_STATUS_DELETED = 5; + */ + DELETED = 5, +} + +/** + * Describes the enum agntcy.dir.store.v1.SyncStatus. + */ +export declare const SyncStatusSchema: GenEnum; + +/** + * SyncService provides functionality for synchronizing objects between Directory nodes. + * + * This service enables one-way synchronization from a remote Directory node to the local node, + * allowing distributed Directory instances to share and replicate objects. The service supports + * both on-demand synchronization and tracking of sync operations through their lifecycle. + * + * @generated from service agntcy.dir.store.v1.SyncService + */ +export declare const SyncService: GenService<{ + /** + * CreateSync initiates a new synchronization operation from a remote Directory node. + * + * The operation is non-blocking and returns immediately with a sync ID that can be used + * to track progress and manage the sync operation. + * + * @generated from rpc agntcy.dir.store.v1.SyncService.CreateSync + */ + createSync: { + methodKind: "unary"; + input: typeof CreateSyncRequestSchema; + output: typeof CreateSyncResponseSchema; + }, + /** + * ListSyncs returns a stream of all sync operations known to the system. + * + * This includes active, completed, and failed synchronizations. + * + * @generated from rpc agntcy.dir.store.v1.SyncService.ListSyncs + */ + listSyncs: { + methodKind: "server_streaming"; + input: typeof ListSyncsRequestSchema; + output: typeof ListSyncsItemSchema; + }, + /** + * GetSync retrieves detailed status information for a specific synchronization. + * + * @generated from rpc agntcy.dir.store.v1.SyncService.GetSync + */ + getSync: { + methodKind: "unary"; + input: typeof GetSyncRequestSchema; + output: typeof GetSyncResponseSchema; + }, + /** + * DeleteSync removes a synchronization operation from the system. + * + * @generated from rpc agntcy.dir.store.v1.SyncService.DeleteSync + */ + deleteSync: { + methodKind: "unary"; + input: typeof DeleteSyncRequestSchema; + output: typeof DeleteSyncResponseSchema; + }, + /** + * RequestRegistryCredentials requests registry credentials between two Directory nodes. + * + * This RPC allows a requesting node to authenticate with this node and obtain + * temporary registry credentials for secure Zot-based synchronization. + * + * @generated from rpc agntcy.dir.store.v1.SyncService.RequestRegistryCredentials + */ + requestRegistryCredentials: { + methodKind: "unary"; + input: typeof RequestRegistryCredentialsRequestSchema; + output: typeof RequestRegistryCredentialsResponseSchema; + }, +}>; + diff --git a/sdk/dir-js/src/models/agntcy/dir/store/v1/sync_service_pb.js b/sdk/dir-js/src/models/agntcy/dir/store/v1/sync_service_pb.js index da071d30f..920f1a333 100644 --- a/sdk/dir-js/src/models/agntcy/dir/store/v1/sync_service_pb.js +++ b/sdk/dir-js/src/models/agntcy/dir/store/v1/sync_service_pb.js @@ -1,118 +1,118 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" -// @generated from file agntcy/dir/store/v1/sync_service.proto (package agntcy.dir.store.v1, syntax proto3) -/* eslint-disable */ - -import { enumDesc, fileDesc, messageDesc, serviceDesc, tsEnum } from "@bufbuild/protobuf/codegenv2"; - -/** - * Describes the file agntcy/dir/store/v1/sync_service.proto. - */ -export const file_agntcy_dir_store_v1_sync_service = /*@__PURE__*/ - fileDesc("CiZhZ250Y3kvZGlyL3N0b3JlL3YxL3N5bmNfc2VydmljZS5wcm90bxITYWdudGN5LmRpci5zdG9yZS52MSI/ChFDcmVhdGVTeW5jUmVxdWVzdBIcChRyZW1vdGVfZGlyZWN0b3J5X3VybBgBIAEoCRIMCgRjaWRzGAIgAygJIiUKEkNyZWF0ZVN5bmNSZXNwb25zZRIPCgdzeW5jX2lkGAEgASgJIlAKEExpc3RTeW5jc1JlcXVlc3QSEgoFbGltaXQYAiABKA1IAIgBARITCgZvZmZzZXQYAyABKA1IAYgBAUIICgZfbGltaXRCCQoHX29mZnNldCJvCg1MaXN0U3luY3NJdGVtEg8KB3N5bmNfaWQYASABKAkSLwoGc3RhdHVzGAIgASgOMh8uYWdudGN5LmRpci5zdG9yZS52MS5TeW5jU3RhdHVzEhwKFHJlbW90ZV9kaXJlY3RvcnlfdXJsGAMgASgJIiEKDkdldFN5bmNSZXF1ZXN0Eg8KB3N5bmNfaWQYASABKAkioQEKD0dldFN5bmNSZXNwb25zZRIPCgdzeW5jX2lkGAEgASgJEi8KBnN0YXR1cxgCIAEoDjIfLmFnbnRjeS5kaXIuc3RvcmUudjEuU3luY1N0YXR1cxIcChRyZW1vdGVfZGlyZWN0b3J5X3VybBgDIAEoCRIUCgxjcmVhdGVkX3RpbWUYBCABKAkSGAoQbGFzdF91cGRhdGVfdGltZRgFIAEoCSIkChFEZWxldGVTeW5jUmVxdWVzdBIPCgdzeW5jX2lkGAEgASgJIhQKEkRlbGV0ZVN5bmNSZXNwb25zZSI/CiFSZXF1ZXN0UmVnaXN0cnlDcmVkZW50aWFsc1JlcXVlc3QSGgoScmVxdWVzdGluZ19ub2RlX2lkGAEgASgJIrkBCiJSZXF1ZXN0UmVnaXN0cnlDcmVkZW50aWFsc1Jlc3BvbnNlEg8KB3N1Y2Nlc3MYASABKAgSFQoNZXJyb3JfbWVzc2FnZRgCIAEoCRIbChNyZW1vdGVfcmVnaXN0cnlfdXJsGAMgASgJEj8KCmJhc2ljX2F1dGgYBCABKAsyKS5hZ250Y3kuZGlyLnN0b3JlLnYxLkJhc2ljQXV0aENyZWRlbnRpYWxzSABCDQoLY3JlZGVudGlhbHMiOgoUQmFzaWNBdXRoQ3JlZGVudGlhbHMSEAoIdXNlcm5hbWUYASABKAkSEAoIcGFzc3dvcmQYAiABKAkqsAEKClN5bmNTdGF0dXMSGwoXU1lOQ19TVEFUVVNfVU5TUEVDSUZJRUQQABIXChNTWU5DX1NUQVRVU19QRU5ESU5HEAESGwoXU1lOQ19TVEFUVVNfSU5fUFJPR1JFU1MQAhIWChJTWU5DX1NUQVRVU19GQUlMRUQQAxIeChpTWU5DX1NUQVRVU19ERUxFVEVfUEVORElORxAEEhcKE1NZTkNfU1RBVFVTX0RFTEVURUQQBTKLBAoLU3luY1NlcnZpY2USXQoKQ3JlYXRlU3luYxImLmFnbnRjeS5kaXIuc3RvcmUudjEuQ3JlYXRlU3luY1JlcXVlc3QaJy5hZ250Y3kuZGlyLnN0b3JlLnYxLkNyZWF0ZVN5bmNSZXNwb25zZRJYCglMaXN0U3luY3MSJS5hZ250Y3kuZGlyLnN0b3JlLnYxLkxpc3RTeW5jc1JlcXVlc3QaIi5hZ250Y3kuZGlyLnN0b3JlLnYxLkxpc3RTeW5jc0l0ZW0wARJUCgdHZXRTeW5jEiMuYWdudGN5LmRpci5zdG9yZS52MS5HZXRTeW5jUmVxdWVzdBokLmFnbnRjeS5kaXIuc3RvcmUudjEuR2V0U3luY1Jlc3BvbnNlEl0KCkRlbGV0ZVN5bmMSJi5hZ250Y3kuZGlyLnN0b3JlLnYxLkRlbGV0ZVN5bmNSZXF1ZXN0GicuYWdudGN5LmRpci5zdG9yZS52MS5EZWxldGVTeW5jUmVzcG9uc2USjQEKGlJlcXVlc3RSZWdpc3RyeUNyZWRlbnRpYWxzEjYuYWdudGN5LmRpci5zdG9yZS52MS5SZXF1ZXN0UmVnaXN0cnlDcmVkZW50aWFsc1JlcXVlc3QaNy5hZ250Y3kuZGlyLnN0b3JlLnYxLlJlcXVlc3RSZWdpc3RyeUNyZWRlbnRpYWxzUmVzcG9uc2VCvgEKF2NvbS5hZ250Y3kuZGlyLnN0b3JlLnYxQhBTeW5jU2VydmljZVByb3RvUAFaImdpdGh1Yi5jb20vYWdudGN5L2Rpci9hcGkvc3RvcmUvdjGiAgNBRFOqAhNBZ250Y3kuRGlyLlN0b3JlLlYxygITQWdudGN5XERpclxTdG9yZVxWMeICH0FnbnRjeVxEaXJcU3RvcmVcVjFcR1BCTWV0YWRhdGHqAhZBZ250Y3k6OkRpcjo6U3RvcmU6OlYxYgZwcm90bzM"); - -/** - * Describes the message agntcy.dir.store.v1.CreateSyncRequest. - * Use `create(CreateSyncRequestSchema)` to create a new message. - */ -export const CreateSyncRequestSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_store_v1_sync_service, 0); - -/** - * Describes the message agntcy.dir.store.v1.CreateSyncResponse. - * Use `create(CreateSyncResponseSchema)` to create a new message. - */ -export const CreateSyncResponseSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_store_v1_sync_service, 1); - -/** - * Describes the message agntcy.dir.store.v1.ListSyncsRequest. - * Use `create(ListSyncsRequestSchema)` to create a new message. - */ -export const ListSyncsRequestSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_store_v1_sync_service, 2); - -/** - * Describes the message agntcy.dir.store.v1.ListSyncsItem. - * Use `create(ListSyncsItemSchema)` to create a new message. - */ -export const ListSyncsItemSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_store_v1_sync_service, 3); - -/** - * Describes the message agntcy.dir.store.v1.GetSyncRequest. - * Use `create(GetSyncRequestSchema)` to create a new message. - */ -export const GetSyncRequestSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_store_v1_sync_service, 4); - -/** - * Describes the message agntcy.dir.store.v1.GetSyncResponse. - * Use `create(GetSyncResponseSchema)` to create a new message. - */ -export const GetSyncResponseSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_store_v1_sync_service, 5); - -/** - * Describes the message agntcy.dir.store.v1.DeleteSyncRequest. - * Use `create(DeleteSyncRequestSchema)` to create a new message. - */ -export const DeleteSyncRequestSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_store_v1_sync_service, 6); - -/** - * Describes the message agntcy.dir.store.v1.DeleteSyncResponse. - * Use `create(DeleteSyncResponseSchema)` to create a new message. - */ -export const DeleteSyncResponseSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_store_v1_sync_service, 7); - -/** - * Describes the message agntcy.dir.store.v1.RequestRegistryCredentialsRequest. - * Use `create(RequestRegistryCredentialsRequestSchema)` to create a new message. - */ -export const RequestRegistryCredentialsRequestSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_store_v1_sync_service, 8); - -/** - * Describes the message agntcy.dir.store.v1.RequestRegistryCredentialsResponse. - * Use `create(RequestRegistryCredentialsResponseSchema)` to create a new message. - */ -export const RequestRegistryCredentialsResponseSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_store_v1_sync_service, 9); - -/** - * Describes the message agntcy.dir.store.v1.BasicAuthCredentials. - * Use `create(BasicAuthCredentialsSchema)` to create a new message. - */ -export const BasicAuthCredentialsSchema = /*@__PURE__*/ - messageDesc(file_agntcy_dir_store_v1_sync_service, 10); - -/** - * Describes the enum agntcy.dir.store.v1.SyncStatus. - */ -export const SyncStatusSchema = /*@__PURE__*/ - enumDesc(file_agntcy_dir_store_v1_sync_service, 0); - -/** - * SyncStatus enumeration defines the possible states of a synchronization operation. - * - * @generated from enum agntcy.dir.store.v1.SyncStatus - */ -export const SyncStatus = /*@__PURE__*/ - tsEnum(SyncStatusSchema); - -/** - * SyncService provides functionality for synchronizing objects between Directory nodes. - * - * This service enables one-way synchronization from a remote Directory node to the local node, - * allowing distributed Directory instances to share and replicate objects. The service supports - * both on-demand synchronization and tracking of sync operations through their lifecycle. - * - * @generated from service agntcy.dir.store.v1.SyncService - */ -export const SyncService = /*@__PURE__*/ - serviceDesc(file_agntcy_dir_store_v1_sync_service, 0); - +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// @generated by protoc-gen-es v2.9.0 with parameter "import_extension=js" +// @generated from file agntcy/dir/store/v1/sync_service.proto (package agntcy.dir.store.v1, syntax proto3) +/* eslint-disable */ + +import { enumDesc, fileDesc, messageDesc, serviceDesc, tsEnum } from "@bufbuild/protobuf/codegenv2"; + +/** + * Describes the file agntcy/dir/store/v1/sync_service.proto. + */ +export const file_agntcy_dir_store_v1_sync_service = /*@__PURE__*/ + fileDesc("CiZhZ250Y3kvZGlyL3N0b3JlL3YxL3N5bmNfc2VydmljZS5wcm90bxITYWdudGN5LmRpci5zdG9yZS52MSI/ChFDcmVhdGVTeW5jUmVxdWVzdBIcChRyZW1vdGVfZGlyZWN0b3J5X3VybBgBIAEoCRIMCgRjaWRzGAIgAygJIiUKEkNyZWF0ZVN5bmNSZXNwb25zZRIPCgdzeW5jX2lkGAEgASgJIlAKEExpc3RTeW5jc1JlcXVlc3QSEgoFbGltaXQYAiABKA1IAIgBARITCgZvZmZzZXQYAyABKA1IAYgBAUIICgZfbGltaXRCCQoHX29mZnNldCJvCg1MaXN0U3luY3NJdGVtEg8KB3N5bmNfaWQYASABKAkSLwoGc3RhdHVzGAIgASgOMh8uYWdudGN5LmRpci5zdG9yZS52MS5TeW5jU3RhdHVzEhwKFHJlbW90ZV9kaXJlY3RvcnlfdXJsGAMgASgJIiEKDkdldFN5bmNSZXF1ZXN0Eg8KB3N5bmNfaWQYASABKAkioQEKD0dldFN5bmNSZXNwb25zZRIPCgdzeW5jX2lkGAEgASgJEi8KBnN0YXR1cxgCIAEoDjIfLmFnbnRjeS5kaXIuc3RvcmUudjEuU3luY1N0YXR1cxIcChRyZW1vdGVfZGlyZWN0b3J5X3VybBgDIAEoCRIUCgxjcmVhdGVkX3RpbWUYBCABKAkSGAoQbGFzdF91cGRhdGVfdGltZRgFIAEoCSIkChFEZWxldGVTeW5jUmVxdWVzdBIPCgdzeW5jX2lkGAEgASgJIhQKEkRlbGV0ZVN5bmNSZXNwb25zZSI/CiFSZXF1ZXN0UmVnaXN0cnlDcmVkZW50aWFsc1JlcXVlc3QSGgoScmVxdWVzdGluZ19ub2RlX2lkGAEgASgJIrkBCiJSZXF1ZXN0UmVnaXN0cnlDcmVkZW50aWFsc1Jlc3BvbnNlEg8KB3N1Y2Nlc3MYASABKAgSFQoNZXJyb3JfbWVzc2FnZRgCIAEoCRIbChNyZW1vdGVfcmVnaXN0cnlfdXJsGAMgASgJEj8KCmJhc2ljX2F1dGgYBCABKAsyKS5hZ250Y3kuZGlyLnN0b3JlLnYxLkJhc2ljQXV0aENyZWRlbnRpYWxzSABCDQoLY3JlZGVudGlhbHMiOgoUQmFzaWNBdXRoQ3JlZGVudGlhbHMSEAoIdXNlcm5hbWUYASABKAkSEAoIcGFzc3dvcmQYAiABKAkqsAEKClN5bmNTdGF0dXMSGwoXU1lOQ19TVEFUVVNfVU5TUEVDSUZJRUQQABIXChNTWU5DX1NUQVRVU19QRU5ESU5HEAESGwoXU1lOQ19TVEFUVVNfSU5fUFJPR1JFU1MQAhIWChJTWU5DX1NUQVRVU19GQUlMRUQQAxIeChpTWU5DX1NUQVRVU19ERUxFVEVfUEVORElORxAEEhcKE1NZTkNfU1RBVFVTX0RFTEVURUQQBTKLBAoLU3luY1NlcnZpY2USXQoKQ3JlYXRlU3luYxImLmFnbnRjeS5kaXIuc3RvcmUudjEuQ3JlYXRlU3luY1JlcXVlc3QaJy5hZ250Y3kuZGlyLnN0b3JlLnYxLkNyZWF0ZVN5bmNSZXNwb25zZRJYCglMaXN0U3luY3MSJS5hZ250Y3kuZGlyLnN0b3JlLnYxLkxpc3RTeW5jc1JlcXVlc3QaIi5hZ250Y3kuZGlyLnN0b3JlLnYxLkxpc3RTeW5jc0l0ZW0wARJUCgdHZXRTeW5jEiMuYWdudGN5LmRpci5zdG9yZS52MS5HZXRTeW5jUmVxdWVzdBokLmFnbnRjeS5kaXIuc3RvcmUudjEuR2V0U3luY1Jlc3BvbnNlEl0KCkRlbGV0ZVN5bmMSJi5hZ250Y3kuZGlyLnN0b3JlLnYxLkRlbGV0ZVN5bmNSZXF1ZXN0GicuYWdudGN5LmRpci5zdG9yZS52MS5EZWxldGVTeW5jUmVzcG9uc2USjQEKGlJlcXVlc3RSZWdpc3RyeUNyZWRlbnRpYWxzEjYuYWdudGN5LmRpci5zdG9yZS52MS5SZXF1ZXN0UmVnaXN0cnlDcmVkZW50aWFsc1JlcXVlc3QaNy5hZ250Y3kuZGlyLnN0b3JlLnYxLlJlcXVlc3RSZWdpc3RyeUNyZWRlbnRpYWxzUmVzcG9uc2VCvgEKF2NvbS5hZ250Y3kuZGlyLnN0b3JlLnYxQhBTeW5jU2VydmljZVByb3RvUAFaImdpdGh1Yi5jb20vYWdudGN5L2Rpci9hcGkvc3RvcmUvdjGiAgNBRFOqAhNBZ250Y3kuRGlyLlN0b3JlLlYxygITQWdudGN5XERpclxTdG9yZVxWMeICH0FnbnRjeVxEaXJcU3RvcmVcVjFcR1BCTWV0YWRhdGHqAhZBZ250Y3k6OkRpcjo6U3RvcmU6OlYxYgZwcm90bzM"); + +/** + * Describes the message agntcy.dir.store.v1.CreateSyncRequest. + * Use `create(CreateSyncRequestSchema)` to create a new message. + */ +export const CreateSyncRequestSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_store_v1_sync_service, 0); + +/** + * Describes the message agntcy.dir.store.v1.CreateSyncResponse. + * Use `create(CreateSyncResponseSchema)` to create a new message. + */ +export const CreateSyncResponseSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_store_v1_sync_service, 1); + +/** + * Describes the message agntcy.dir.store.v1.ListSyncsRequest. + * Use `create(ListSyncsRequestSchema)` to create a new message. + */ +export const ListSyncsRequestSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_store_v1_sync_service, 2); + +/** + * Describes the message agntcy.dir.store.v1.ListSyncsItem. + * Use `create(ListSyncsItemSchema)` to create a new message. + */ +export const ListSyncsItemSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_store_v1_sync_service, 3); + +/** + * Describes the message agntcy.dir.store.v1.GetSyncRequest. + * Use `create(GetSyncRequestSchema)` to create a new message. + */ +export const GetSyncRequestSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_store_v1_sync_service, 4); + +/** + * Describes the message agntcy.dir.store.v1.GetSyncResponse. + * Use `create(GetSyncResponseSchema)` to create a new message. + */ +export const GetSyncResponseSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_store_v1_sync_service, 5); + +/** + * Describes the message agntcy.dir.store.v1.DeleteSyncRequest. + * Use `create(DeleteSyncRequestSchema)` to create a new message. + */ +export const DeleteSyncRequestSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_store_v1_sync_service, 6); + +/** + * Describes the message agntcy.dir.store.v1.DeleteSyncResponse. + * Use `create(DeleteSyncResponseSchema)` to create a new message. + */ +export const DeleteSyncResponseSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_store_v1_sync_service, 7); + +/** + * Describes the message agntcy.dir.store.v1.RequestRegistryCredentialsRequest. + * Use `create(RequestRegistryCredentialsRequestSchema)` to create a new message. + */ +export const RequestRegistryCredentialsRequestSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_store_v1_sync_service, 8); + +/** + * Describes the message agntcy.dir.store.v1.RequestRegistryCredentialsResponse. + * Use `create(RequestRegistryCredentialsResponseSchema)` to create a new message. + */ +export const RequestRegistryCredentialsResponseSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_store_v1_sync_service, 9); + +/** + * Describes the message agntcy.dir.store.v1.BasicAuthCredentials. + * Use `create(BasicAuthCredentialsSchema)` to create a new message. + */ +export const BasicAuthCredentialsSchema = /*@__PURE__*/ + messageDesc(file_agntcy_dir_store_v1_sync_service, 10); + +/** + * Describes the enum agntcy.dir.store.v1.SyncStatus. + */ +export const SyncStatusSchema = /*@__PURE__*/ + enumDesc(file_agntcy_dir_store_v1_sync_service, 0); + +/** + * SyncStatus enumeration defines the possible states of a synchronization operation. + * + * @generated from enum agntcy.dir.store.v1.SyncStatus + */ +export const SyncStatus = /*@__PURE__*/ + tsEnum(SyncStatusSchema); + +/** + * SyncService provides functionality for synchronizing objects between Directory nodes. + * + * This service enables one-way synchronization from a remote Directory node to the local node, + * allowing distributed Directory instances to share and replicate objects. The service supports + * both on-demand synchronization and tracking of sync operations through their lifecycle. + * + * @generated from service agntcy.dir.store.v1.SyncService + */ +export const SyncService = /*@__PURE__*/ + serviceDesc(file_agntcy_dir_store_v1_sync_service, 0); + diff --git a/sdk/dir-js/src/models/core_v1.ts b/sdk/dir-js/src/models/core_v1.ts index 2bd4ed7cc..da9603664 100644 --- a/sdk/dir-js/src/models/core_v1.ts +++ b/sdk/dir-js/src/models/core_v1.ts @@ -1,4 +1,4 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -export * from './agntcy/dir/core/v1/record_pb'; +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +export * from './agntcy/dir/core/v1/record_pb'; diff --git a/sdk/dir-js/src/models/events_v1.ts b/sdk/dir-js/src/models/events_v1.ts index a7bac865d..e17d44597 100644 --- a/sdk/dir-js/src/models/events_v1.ts +++ b/sdk/dir-js/src/models/events_v1.ts @@ -1,4 +1,4 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -export * from './agntcy/dir/events/v1/event_service_pb'; +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +export * from './agntcy/dir/events/v1/event_service_pb'; diff --git a/sdk/dir-js/src/models/index.ts b/sdk/dir-js/src/models/index.ts index ecbc0ec12..37f2ed622 100644 --- a/sdk/dir-js/src/models/index.ts +++ b/sdk/dir-js/src/models/index.ts @@ -1,9 +1,9 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -export * as core_v1 from './core_v1'; -export * as routing_v1 from './routing_v1'; -export * as search_v1 from './search_v1'; -export * as sign_v1 from './sign_v1'; -export * as store_v1 from './store_v1'; -export * as events_v1 from './events_v1'; +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +export * as core_v1 from './core_v1'; +export * as routing_v1 from './routing_v1'; +export * as search_v1 from './search_v1'; +export * as sign_v1 from './sign_v1'; +export * as store_v1 from './store_v1'; +export * as events_v1 from './events_v1'; diff --git a/sdk/dir-js/src/models/routing_v1.ts b/sdk/dir-js/src/models/routing_v1.ts index c5a7ab462..b46fbfa51 100644 --- a/sdk/dir-js/src/models/routing_v1.ts +++ b/sdk/dir-js/src/models/routing_v1.ts @@ -1,7 +1,7 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -export * from './agntcy/dir/routing/v1/peer_pb'; -export * from './agntcy/dir/routing/v1/publication_service_pb'; -export * from './agntcy/dir/routing/v1/record_query_pb'; -export * from './agntcy/dir/routing/v1/routing_service_pb'; +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +export * from './agntcy/dir/routing/v1/peer_pb'; +export * from './agntcy/dir/routing/v1/publication_service_pb'; +export * from './agntcy/dir/routing/v1/record_query_pb'; +export * from './agntcy/dir/routing/v1/routing_service_pb'; diff --git a/sdk/dir-js/src/models/search_v1.ts b/sdk/dir-js/src/models/search_v1.ts index 216b94112..9e3f97921 100644 --- a/sdk/dir-js/src/models/search_v1.ts +++ b/sdk/dir-js/src/models/search_v1.ts @@ -1,5 +1,5 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -export * from './agntcy/dir/search/v1/record_query_pb'; -export * from './agntcy/dir/search/v1/search_service_pb'; +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +export * from './agntcy/dir/search/v1/record_query_pb'; +export * from './agntcy/dir/search/v1/search_service_pb'; diff --git a/sdk/dir-js/src/models/sign_v1.ts b/sdk/dir-js/src/models/sign_v1.ts index 5aae4bfe7..986827c09 100644 --- a/sdk/dir-js/src/models/sign_v1.ts +++ b/sdk/dir-js/src/models/sign_v1.ts @@ -1,6 +1,6 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -export * from './agntcy/dir/sign/v1/sign_service_pb'; -export * from './agntcy/dir/sign/v1/signature_pb'; -export * from './agntcy/dir/sign/v1/public_key_pb'; +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +export * from './agntcy/dir/sign/v1/sign_service_pb'; +export * from './agntcy/dir/sign/v1/signature_pb'; +export * from './agntcy/dir/sign/v1/public_key_pb'; diff --git a/sdk/dir-js/src/models/store_v1.ts b/sdk/dir-js/src/models/store_v1.ts index 7467eda22..47842cf10 100644 --- a/sdk/dir-js/src/models/store_v1.ts +++ b/sdk/dir-js/src/models/store_v1.ts @@ -1,5 +1,5 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -export * from './agntcy/dir/store/v1/store_service_pb'; -export * from './agntcy/dir/store/v1/sync_service_pb'; +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +export * from './agntcy/dir/store/v1/store_service_pb'; +export * from './agntcy/dir/store/v1/sync_service_pb'; diff --git a/sdk/dir-js/test/client.test.ts b/sdk/dir-js/test/client.test.ts index 1d62c1291..fb796cac1 100644 --- a/sdk/dir-js/test/client.test.ts +++ b/sdk/dir-js/test/client.test.ts @@ -1,553 +1,553 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -import { describe, test, beforeAll, afterAll, expect } from 'vitest'; - -import { execSync } from 'node:child_process'; -import { pool as workerpool } from 'workerpool'; -import { readFileSync, rmSync } from 'node:fs'; -import { env } from 'node:process'; -import { create } from '@bufbuild/protobuf'; - -import { validate as isValidUUID } from 'uuid'; -import { v4 as uuidv4 } from 'uuid'; - -import { Client, Config, models } from '../src'; - -/** - * Generate test records with unique names. - * Schema: https://schema.oasf.outshift.com/0.7.0/objects/record - * @param count - Number of records to generate - * @param testFunctionName - Name of the test function for record naming - * @returns Array of generated Record objects - */ -function genRecords( - count: number, - testFunctionName: string, -): models.core_v1.Record[] { - const records: models.core_v1.Record[] = []; - for (let index = 0; index < count; index++) { - records.push( - create(models.core_v1.RecordSchema, { - data: { - name: `agntcy-${testFunctionName}-${index}-${uuidv4().substring(0, 8)}`, - version: 'v3.0.0', - schema_version: '0.7.0', - description: "Research agent for Cisco's marketing strategy.", - authors: ['Cisco Systems'], - created_at: '2025-03-19T17:06:37Z', - skills: [ - { - name: 'natural_language_processing/natural_language_generation/text_completion', - id: 10201, - }, - { - name: 'natural_language_processing/analytical_reasoning/problem_solving', - id: 10702, - }, - ], - locators: [ - { - type: 'docker_image', - url: 'https://ghcr.io/agntcy/marketing-strategy', - }, - ], - domains: [ - { - name: 'technology/networking', - id: 103, - }, - ], - modules: [], - }, - }), - ); - } - - return records; -} - -describe('Client', () => { - let config: Config; - let client: Client; - - beforeAll(async () => { - // Verify that DIRCTL_PATH is set in the environment - expect(env.DIRCTL_PATH).toBeDefined(); - - // Initialize the client - config = Config.loadFromEnv(); - const grpcTransport = await Client.createGRPCTransport(config); - - client = new Client(config, grpcTransport); - }); - - afterAll(async () => { - // Clean up any resources if needed - // Note: gRPC clients in Connect don't need explicit closing - }); - - test('push', async () => { - const records = genRecords(2, 'push'); - const recordRefs = await client.push(records); - - expect(recordRefs).not.toBeNull(); - expect(recordRefs).toBeInstanceOf(Array); - expect(recordRefs).toHaveLength(2); - - for (const ref of recordRefs) { - expect(ref).toBeTypeOf(typeof models.core_v1.RecordRefSchema); - expect(ref.cid).toHaveLength(59); - } - }); - - test('pull', async () => { - const records = genRecords(2, 'pull'); - const recordRefs = await client.push(records); - const pulledRecords = await client.pull(recordRefs); - - expect(pulledRecords).not.toBeNull(); - expect(pulledRecords).toBeInstanceOf(Array); - expect(pulledRecords).toHaveLength(2); - - for (let index = 0; index < pulledRecords.length; index++) { - const record = pulledRecords[index]; - expect(record).toBeTypeOf(typeof models.core_v1.RecordSchema); - expect(record).toEqual(records[index]); - } - }); - - test('searchCIDs', async () => { - const records = genRecords(1, 'search'); - await client.push(records); - - const searchRequest = create(models.search_v1.SearchCIDsRequestSchema, { - queries: [ - { - type: models.search_v1.RecordQueryType.SKILL_ID, - value: '10201', - }, - ], - limit: 2, - }); - - const objects = await client.searchCIDs(searchRequest); - - expect(objects).not.toBeNull(); - expect(objects).toBeInstanceOf(Array); - expect(objects.length).toBeGreaterThan(0); - - for (const obj of objects) { - expect(obj).toHaveProperty('recordCid'); - } - }); - - test('lookup', async () => { - const records = genRecords(2, 'lookup'); - const recordRefs = await client.push(records); - const metadatas = await client.lookup(recordRefs); - - expect(metadatas).not.toBeNull(); - expect(metadatas).toBeInstanceOf(Array); - expect(metadatas).toHaveLength(2); - - for (const metadata of metadatas) { - expect(metadata).toBeTypeOf(typeof models.core_v1.RecordMetaSchema); - } - }); - - test('publish', async () => { - const records = genRecords(1, 'publish'); - const recordRefs = await client.push(records); - - await client.publish( - create(models.routing_v1.PublishRequestSchema, { - request: { - case: 'recordRefs', - value: { - refs: recordRefs, - }, - }, - }), - ); - }); - - test('list', async () => { - const records = genRecords(1, 'list'); - const recordRefs = await client.push(records); - - // Publish records - await client.publish( - create(models.routing_v1.PublishRequestSchema, { - request: { - case: 'recordRefs', - value: { - refs: recordRefs, - }, - }, - }), - ); - - // Sleep to allow the publication to be indexed - await new Promise(resolve => setTimeout(resolve, 5000)); - - // Query for records in the domain - const objects = await client.list( - create(models.routing_v1.ListRequestSchema, { - queries: [ - { - type: models.routing_v1.RecordQueryType.DOMAIN, - value: 'technology/networking', - }, - ], - }), - ); - - expect(objects).not.toBeNull(); - expect(objects).toBeInstanceOf(Array); - expect(objects.length).not.toBe(0); - - for (const obj of objects) { - expect(obj).toBeTypeOf(typeof models.routing_v1.ListResponseSchema); - } - }, 30000); - - test('unpublish', async () => { - const records = genRecords(1, 'unpublish'); - const recordRefs = await client.push(records); - - // Publish records - await client.publish( - create(models.routing_v1.PublishRequestSchema, { - request: { - case: 'recordRefs', - value: { - refs: recordRefs, - }, - }, - }), - ); - - // Unpublish - await client.unpublish( - create(models.routing_v1.UnpublishRequestSchema, { - request: { - case: 'recordRefs', - value: { - refs: recordRefs, - }, - }, - }), - ); - }); - - test('delete', async () => { - const records = genRecords(1, 'delete'); - const recordRefs = await client.push(records); - - await client.delete(recordRefs); - }); - - test('pushReferrer', async () => { - const records = genRecords(2, 'pushReferrer'); - const recordRefs = await client.push(records); - - const requests: models.store_v1.PushReferrerRequest[] = recordRefs.map( - ( - recordRef: models.core_v1.RecordRef, - ): models.store_v1.PushReferrerRequest => { - return create(models.store_v1.PushReferrerRequestSchema, { - recordRef: recordRef, - referrer: create(models.core_v1.RecordReferrerSchema, { - type: models.sign_v1.SignatureSchema.typeName, - data: { - signature: 'dGVzdC1zaWduYXR1cmU=', - annotations: { - payload: 'test-payload-data', - }, - }, - }), - }); - }, - ); - - const response = await client.push_referrer(requests); - expect(response).not.toBeNull(); - expect(response).toHaveLength(2); - - for (const r of response) { - expect(r).toBeTypeOf(typeof models.store_v1.PushReferrerResponseSchema); - } - }); - - test('pullReferrer', async () => { - const records = genRecords(2, 'pullReferrer'); - const recordRefs = await client.push(records); - - // Push signatures to these records first - const pushRequests: models.store_v1.PushReferrerRequest[] = recordRefs.map( - ( - recordRef: models.core_v1.RecordRef, - ): models.store_v1.PushReferrerRequest => { - return create(models.store_v1.PushReferrerRequestSchema, { - recordRef: recordRef, - referrer: create(models.core_v1.RecordReferrerSchema, { - type: models.sign_v1.SignatureSchema.typeName, - data: { - signature: 'dGVzdC1zaWduYXR1cmU=', - annotations: { - payload: 'test-payload-data', - }, - }, - }), - }); - }, - ); - - const pushResponse = await client.push_referrer(pushRequests); - expect(pushResponse).not.toBeNull(); - expect(pushResponse).toHaveLength(2); - - for (const r of pushResponse) { - expect(r).toBeTypeOf(typeof models.store_v1.PushReferrerResponseSchema); - } - - // Now pull the signatures back - const requests: models.store_v1.PullReferrerRequest[] = recordRefs.map( - ( - recordRef: models.core_v1.RecordRef, - ): models.store_v1.PullReferrerRequest => { - return create(models.store_v1.PullReferrerRequestSchema, { - recordRef: recordRef, - referrerType: models.sign_v1.SignatureSchema.typeName, - }); - }, - ); - - const response = await client.pull_referrer(requests); - expect(response).not.toBeNull(); - expect(response).toHaveLength(2); - - for (const r of response) { - expect(r).toBeTypeOf(typeof models.store_v1.PullReferrerResponseSchema); - } - }); - - test('sign_and_verify', async () => { - const shellEnv = { ...env }; - - const records = genRecords(2, 'sign_verify'); - const recordRefs = await client.push(records); - - const keyPassword = 'testing-key'; - - // Clean up any existing keys - rmSync('cosign.key', { force: true }); - rmSync('cosign.pub', { force: true }); - - try { - // Generate key pair - const cosignPath = env['COSIGN_PATH'] || 'cosign'; - execSync(`${cosignPath} generate-key-pair`, { - env: { ...shellEnv, COSIGN_PASSWORD: keyPassword }, - encoding: 'utf8', - stdio: 'pipe', - }); - - // Read configuration data - const keyFile = readFileSync('cosign.key'); - const token = shellEnv['OIDC_TOKEN'] || ''; - const providerUrl = shellEnv['OIDC_PROVIDER_URL'] || ''; - const clientId = shellEnv['OIDC_CLIENT_ID'] || 'sigstore'; - - // Create signing providers - const keyRequest = create(models.sign_v1.SignRequestSchema, { - recordRef: recordRefs[0], - provider: { - request: { - case: 'key', - value: { - privateKey: keyFile, - password: Buffer.from(keyPassword, 'utf-8'), - }, - }, - }, - }); - - const oidcRequest = create(models.sign_v1.SignRequestSchema, { - recordRef: recordRefs[1], - provider: { - request: { - case: 'oidc', - value: { - idToken: token, - options: { - oidcProviderUrl: providerUrl, - }, - }, - }, - }, - }); - - // Sign test - client.sign(keyRequest); - - if ((shellEnv['OIDC_TOKEN'] || '') != '' && (shellEnv['OIDC_PROVIDER_URL'] || '') != '') { - client.sign(oidcRequest, clientId); - } else { - recordRefs.pop(); // NOTE: Drop the unsigned record if no OIDC tested - } - - // Verify test - for (const ref of recordRefs) { - const response = await client.verify( - create(models.sign_v1.VerifyRequestSchema, { - recordRef: ref, - }), - ); - - expect(response.success).toBe(true); - } - - // Test invalid CID - try { - client.sign( - create(models.sign_v1.SignRequestSchema, { - recordRef: { cid: 'invalid-cid' }, - provider: { - request: { - case: 'key', - value: { - privateKey: Uint8Array.from([]), - password: Uint8Array.from([]), - }, - }, - }, - }), - ); - expect.fail('Should have thrown error for invalid CID'); - } catch (error) { - if (error instanceof Error) { - expect(error.message).toContain('failed to decode CID invalid-cid'); - } - } - } catch (error) { - expect.fail(`Sign and verify test failed: ${error}`); - } finally { - // Clean up keys - rmSync('cosign.key', { force: true }); - rmSync('cosign.pub', { force: true }); - } - }, 30000); - - test('sync', async () => { - // Create sync - const createResponse = await client.create_sync( - create(models.store_v1.CreateSyncRequestSchema, { - remoteDirectoryUrl: - env['DIRECTORY_SERVER_PEER1_ADDRESS'] || '0.0.0.0:8891', - }), - ); - expect(createResponse).toBeTypeOf( - typeof models.store_v1.CreateSyncResponseSchema, - ); - - const syncId = createResponse.syncId; - expect(isValidUUID(syncId)).toBe(true); - - // List syncs - const listResponse = await client.list_syncs( - create(models.store_v1.ListSyncsRequestSchema, {}), - ); - expect(listResponse).toBeInstanceOf(Array); - - for (const syncItem of listResponse) { - expect(syncItem).toBeTypeOf(typeof models.store_v1.ListSyncsItemSchema); - expect(isValidUUID(syncItem.syncId)).toBe(true); - } - - // Get sync - const getResponse = await client.get_sync( - create(models.store_v1.GetSyncRequestSchema, { - syncId: syncId, - }), - ); - expect(getResponse).toBeTypeOf( - typeof models.store_v1.GetSyncResponseSchema, - ); - expect(getResponse.syncId).toEqual(syncId); - - // Delete sync - await client.delete_sync( - create(models.store_v1.DeleteSyncRequestSchema, { - syncId: syncId, - }), - ); - }); - - test('listen', async () => { - const records = genRecords(1, 'listen'); - const recordRefs = await client.push(records); - - const pool = workerpool(__dirname + '/listen_worker.ts'); - try { - pool.exec('pullRecordsBackground', [recordRefs[0].cid, config.dirctlPath, config.spiffeEndpointSocket]); - } catch (error) { - expect.fail(`pullRecordsBackground execution failed: ${error}`) - } - - let events = client.listen( - create(models.events_v1.ListenRequestSchema, {}) - ); - - for await (const response of events) { - expect(response).toBeTypeOf(typeof models.events_v1.ListenResponseSchema); - break; // Exit after first event for test purposes - } - - pool.terminate(true); - - }, 120000); - - test('publication', async () => { - const records = genRecords(1, 'publication'); - const recordRefs = await client.push(records); - - const createResponse = await client.create_publication( - create(models.routing_v1.PublishRequestSchema, { - request: { - case: 'recordRefs', - value: { - refs: recordRefs, - }, - }, - }), - ); - - expect(createResponse).toBeTypeOf( - typeof models.routing_v1.CreatePublicationResponseSchema, - ); - - const publicationsList = await client.list_publication( - create(models.routing_v1.ListPublicationsRequestSchema, {}), - ); - - expect(publicationsList).toBeInstanceOf(Array); - - for (const publication of publicationsList) { - expect(publication).toBeTypeOf(typeof models.routing_v1.ListPublicationsItemSchema); - } - - const getResponse = await client.get_publication( - create(models.routing_v1.GetPublicationRequestSchema, { - publicationId: createResponse.publicationId, - }), - ); - - expect(getResponse).toBeTypeOf( - typeof models.routing_v1.GetPublicationResponseSchema, - ); - - expect(getResponse.publicationId).toEqual(createResponse.publicationId); - }); -}); +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +import { describe, test, beforeAll, afterAll, expect } from 'vitest'; + +import { execSync } from 'node:child_process'; +import { pool as workerpool } from 'workerpool'; +import { readFileSync, rmSync } from 'node:fs'; +import { env } from 'node:process'; +import { create } from '@bufbuild/protobuf'; + +import { validate as isValidUUID } from 'uuid'; +import { v4 as uuidv4 } from 'uuid'; + +import { Client, Config, models } from '../src'; + +/** + * Generate test records with unique names. + * Schema: https://schema.oasf.outshift.com/0.7.0/objects/record + * @param count - Number of records to generate + * @param testFunctionName - Name of the test function for record naming + * @returns Array of generated Record objects + */ +function genRecords( + count: number, + testFunctionName: string, +): models.core_v1.Record[] { + const records: models.core_v1.Record[] = []; + for (let index = 0; index < count; index++) { + records.push( + create(models.core_v1.RecordSchema, { + data: { + name: `agntcy-${testFunctionName}-${index}-${uuidv4().substring(0, 8)}`, + version: 'v3.0.0', + schema_version: '0.7.0', + description: "Research agent for Cisco's marketing strategy.", + authors: ['Cisco Systems'], + created_at: '2025-03-19T17:06:37Z', + skills: [ + { + name: 'natural_language_processing/natural_language_generation/text_completion', + id: 10201, + }, + { + name: 'natural_language_processing/analytical_reasoning/problem_solving', + id: 10702, + }, + ], + locators: [ + { + type: 'docker_image', + url: 'https://ghcr.io/agntcy/marketing-strategy', + }, + ], + domains: [ + { + name: 'technology/networking', + id: 103, + }, + ], + modules: [], + }, + }), + ); + } + + return records; +} + +describe('Client', () => { + let config: Config; + let client: Client; + + beforeAll(async () => { + // Verify that DIRCTL_PATH is set in the environment + expect(env.DIRCTL_PATH).toBeDefined(); + + // Initialize the client + config = Config.loadFromEnv(); + const grpcTransport = await Client.createGRPCTransport(config); + + client = new Client(config, grpcTransport); + }); + + afterAll(async () => { + // Clean up any resources if needed + // Note: gRPC clients in Connect don't need explicit closing + }); + + test('push', async () => { + const records = genRecords(2, 'push'); + const recordRefs = await client.push(records); + + expect(recordRefs).not.toBeNull(); + expect(recordRefs).toBeInstanceOf(Array); + expect(recordRefs).toHaveLength(2); + + for (const ref of recordRefs) { + expect(ref).toBeTypeOf(typeof models.core_v1.RecordRefSchema); + expect(ref.cid).toHaveLength(59); + } + }); + + test('pull', async () => { + const records = genRecords(2, 'pull'); + const recordRefs = await client.push(records); + const pulledRecords = await client.pull(recordRefs); + + expect(pulledRecords).not.toBeNull(); + expect(pulledRecords).toBeInstanceOf(Array); + expect(pulledRecords).toHaveLength(2); + + for (let index = 0; index < pulledRecords.length; index++) { + const record = pulledRecords[index]; + expect(record).toBeTypeOf(typeof models.core_v1.RecordSchema); + expect(record).toEqual(records[index]); + } + }); + + test('searchCIDs', async () => { + const records = genRecords(1, 'search'); + await client.push(records); + + const searchRequest = create(models.search_v1.SearchCIDsRequestSchema, { + queries: [ + { + type: models.search_v1.RecordQueryType.SKILL_ID, + value: '10201', + }, + ], + limit: 2, + }); + + const objects = await client.searchCIDs(searchRequest); + + expect(objects).not.toBeNull(); + expect(objects).toBeInstanceOf(Array); + expect(objects.length).toBeGreaterThan(0); + + for (const obj of objects) { + expect(obj).toHaveProperty('recordCid'); + } + }); + + test('lookup', async () => { + const records = genRecords(2, 'lookup'); + const recordRefs = await client.push(records); + const metadatas = await client.lookup(recordRefs); + + expect(metadatas).not.toBeNull(); + expect(metadatas).toBeInstanceOf(Array); + expect(metadatas).toHaveLength(2); + + for (const metadata of metadatas) { + expect(metadata).toBeTypeOf(typeof models.core_v1.RecordMetaSchema); + } + }); + + test('publish', async () => { + const records = genRecords(1, 'publish'); + const recordRefs = await client.push(records); + + await client.publish( + create(models.routing_v1.PublishRequestSchema, { + request: { + case: 'recordRefs', + value: { + refs: recordRefs, + }, + }, + }), + ); + }); + + test('list', async () => { + const records = genRecords(1, 'list'); + const recordRefs = await client.push(records); + + // Publish records + await client.publish( + create(models.routing_v1.PublishRequestSchema, { + request: { + case: 'recordRefs', + value: { + refs: recordRefs, + }, + }, + }), + ); + + // Sleep to allow the publication to be indexed + await new Promise(resolve => setTimeout(resolve, 5000)); + + // Query for records in the domain + const objects = await client.list( + create(models.routing_v1.ListRequestSchema, { + queries: [ + { + type: models.routing_v1.RecordQueryType.DOMAIN, + value: 'technology/networking', + }, + ], + }), + ); + + expect(objects).not.toBeNull(); + expect(objects).toBeInstanceOf(Array); + expect(objects.length).not.toBe(0); + + for (const obj of objects) { + expect(obj).toBeTypeOf(typeof models.routing_v1.ListResponseSchema); + } + }, 30000); + + test('unpublish', async () => { + const records = genRecords(1, 'unpublish'); + const recordRefs = await client.push(records); + + // Publish records + await client.publish( + create(models.routing_v1.PublishRequestSchema, { + request: { + case: 'recordRefs', + value: { + refs: recordRefs, + }, + }, + }), + ); + + // Unpublish + await client.unpublish( + create(models.routing_v1.UnpublishRequestSchema, { + request: { + case: 'recordRefs', + value: { + refs: recordRefs, + }, + }, + }), + ); + }); + + test('delete', async () => { + const records = genRecords(1, 'delete'); + const recordRefs = await client.push(records); + + await client.delete(recordRefs); + }); + + test('pushReferrer', async () => { + const records = genRecords(2, 'pushReferrer'); + const recordRefs = await client.push(records); + + const requests: models.store_v1.PushReferrerRequest[] = recordRefs.map( + ( + recordRef: models.core_v1.RecordRef, + ): models.store_v1.PushReferrerRequest => { + return create(models.store_v1.PushReferrerRequestSchema, { + recordRef: recordRef, + referrer: create(models.core_v1.RecordReferrerSchema, { + type: models.sign_v1.SignatureSchema.typeName, + data: { + signature: 'dGVzdC1zaWduYXR1cmU=', + annotations: { + payload: 'test-payload-data', + }, + }, + }), + }); + }, + ); + + const response = await client.push_referrer(requests); + expect(response).not.toBeNull(); + expect(response).toHaveLength(2); + + for (const r of response) { + expect(r).toBeTypeOf(typeof models.store_v1.PushReferrerResponseSchema); + } + }); + + test('pullReferrer', async () => { + const records = genRecords(2, 'pullReferrer'); + const recordRefs = await client.push(records); + + // Push signatures to these records first + const pushRequests: models.store_v1.PushReferrerRequest[] = recordRefs.map( + ( + recordRef: models.core_v1.RecordRef, + ): models.store_v1.PushReferrerRequest => { + return create(models.store_v1.PushReferrerRequestSchema, { + recordRef: recordRef, + referrer: create(models.core_v1.RecordReferrerSchema, { + type: models.sign_v1.SignatureSchema.typeName, + data: { + signature: 'dGVzdC1zaWduYXR1cmU=', + annotations: { + payload: 'test-payload-data', + }, + }, + }), + }); + }, + ); + + const pushResponse = await client.push_referrer(pushRequests); + expect(pushResponse).not.toBeNull(); + expect(pushResponse).toHaveLength(2); + + for (const r of pushResponse) { + expect(r).toBeTypeOf(typeof models.store_v1.PushReferrerResponseSchema); + } + + // Now pull the signatures back + const requests: models.store_v1.PullReferrerRequest[] = recordRefs.map( + ( + recordRef: models.core_v1.RecordRef, + ): models.store_v1.PullReferrerRequest => { + return create(models.store_v1.PullReferrerRequestSchema, { + recordRef: recordRef, + referrerType: models.sign_v1.SignatureSchema.typeName, + }); + }, + ); + + const response = await client.pull_referrer(requests); + expect(response).not.toBeNull(); + expect(response).toHaveLength(2); + + for (const r of response) { + expect(r).toBeTypeOf(typeof models.store_v1.PullReferrerResponseSchema); + } + }); + + test('sign_and_verify', async () => { + const shellEnv = { ...env }; + + const records = genRecords(2, 'sign_verify'); + const recordRefs = await client.push(records); + + const keyPassword = 'testing-key'; + + // Clean up any existing keys + rmSync('cosign.key', { force: true }); + rmSync('cosign.pub', { force: true }); + + try { + // Generate key pair + const cosignPath = env['COSIGN_PATH'] || 'cosign'; + execSync(`${cosignPath} generate-key-pair`, { + env: { ...shellEnv, COSIGN_PASSWORD: keyPassword }, + encoding: 'utf8', + stdio: 'pipe', + }); + + // Read configuration data + const keyFile = readFileSync('cosign.key'); + const token = shellEnv['OIDC_TOKEN'] || ''; + const providerUrl = shellEnv['OIDC_PROVIDER_URL'] || ''; + const clientId = shellEnv['OIDC_CLIENT_ID'] || 'sigstore'; + + // Create signing providers + const keyRequest = create(models.sign_v1.SignRequestSchema, { + recordRef: recordRefs[0], + provider: { + request: { + case: 'key', + value: { + privateKey: keyFile, + password: Buffer.from(keyPassword, 'utf-8'), + }, + }, + }, + }); + + const oidcRequest = create(models.sign_v1.SignRequestSchema, { + recordRef: recordRefs[1], + provider: { + request: { + case: 'oidc', + value: { + idToken: token, + options: { + oidcProviderUrl: providerUrl, + }, + }, + }, + }, + }); + + // Sign test + client.sign(keyRequest); + + if ((shellEnv['OIDC_TOKEN'] || '') != '' && (shellEnv['OIDC_PROVIDER_URL'] || '') != '') { + client.sign(oidcRequest, clientId); + } else { + recordRefs.pop(); // NOTE: Drop the unsigned record if no OIDC tested + } + + // Verify test + for (const ref of recordRefs) { + const response = await client.verify( + create(models.sign_v1.VerifyRequestSchema, { + recordRef: ref, + }), + ); + + expect(response.success).toBe(true); + } + + // Test invalid CID + try { + client.sign( + create(models.sign_v1.SignRequestSchema, { + recordRef: { cid: 'invalid-cid' }, + provider: { + request: { + case: 'key', + value: { + privateKey: Uint8Array.from([]), + password: Uint8Array.from([]), + }, + }, + }, + }), + ); + expect.fail('Should have thrown error for invalid CID'); + } catch (error) { + if (error instanceof Error) { + expect(error.message).toContain('failed to decode CID invalid-cid'); + } + } + } catch (error) { + expect.fail(`Sign and verify test failed: ${error}`); + } finally { + // Clean up keys + rmSync('cosign.key', { force: true }); + rmSync('cosign.pub', { force: true }); + } + }, 30000); + + test('sync', async () => { + // Create sync + const createResponse = await client.create_sync( + create(models.store_v1.CreateSyncRequestSchema, { + remoteDirectoryUrl: + env['DIRECTORY_SERVER_PEER1_ADDRESS'] || '0.0.0.0:8891', + }), + ); + expect(createResponse).toBeTypeOf( + typeof models.store_v1.CreateSyncResponseSchema, + ); + + const syncId = createResponse.syncId; + expect(isValidUUID(syncId)).toBe(true); + + // List syncs + const listResponse = await client.list_syncs( + create(models.store_v1.ListSyncsRequestSchema, {}), + ); + expect(listResponse).toBeInstanceOf(Array); + + for (const syncItem of listResponse) { + expect(syncItem).toBeTypeOf(typeof models.store_v1.ListSyncsItemSchema); + expect(isValidUUID(syncItem.syncId)).toBe(true); + } + + // Get sync + const getResponse = await client.get_sync( + create(models.store_v1.GetSyncRequestSchema, { + syncId: syncId, + }), + ); + expect(getResponse).toBeTypeOf( + typeof models.store_v1.GetSyncResponseSchema, + ); + expect(getResponse.syncId).toEqual(syncId); + + // Delete sync + await client.delete_sync( + create(models.store_v1.DeleteSyncRequestSchema, { + syncId: syncId, + }), + ); + }); + + test('listen', async () => { + const records = genRecords(1, 'listen'); + const recordRefs = await client.push(records); + + const pool = workerpool(__dirname + '/listen_worker.ts'); + try { + pool.exec('pullRecordsBackground', [recordRefs[0].cid, config.dirctlPath, config.spiffeEndpointSocket]); + } catch (error) { + expect.fail(`pullRecordsBackground execution failed: ${error}`) + } + + let events = client.listen( + create(models.events_v1.ListenRequestSchema, {}) + ); + + for await (const response of events) { + expect(response).toBeTypeOf(typeof models.events_v1.ListenResponseSchema); + break; // Exit after first event for test purposes + } + + pool.terminate(true); + + }, 120000); + + test('publication', async () => { + const records = genRecords(1, 'publication'); + const recordRefs = await client.push(records); + + const createResponse = await client.create_publication( + create(models.routing_v1.PublishRequestSchema, { + request: { + case: 'recordRefs', + value: { + refs: recordRefs, + }, + }, + }), + ); + + expect(createResponse).toBeTypeOf( + typeof models.routing_v1.CreatePublicationResponseSchema, + ); + + const publicationsList = await client.list_publication( + create(models.routing_v1.ListPublicationsRequestSchema, {}), + ); + + expect(publicationsList).toBeInstanceOf(Array); + + for (const publication of publicationsList) { + expect(publication).toBeTypeOf(typeof models.routing_v1.ListPublicationsItemSchema); + } + + const getResponse = await client.get_publication( + create(models.routing_v1.GetPublicationRequestSchema, { + publicationId: createResponse.publicationId, + }), + ); + + expect(getResponse).toBeTypeOf( + typeof models.routing_v1.GetPublicationResponseSchema, + ); + + expect(getResponse.publicationId).toEqual(createResponse.publicationId); + }); +}); diff --git a/sdk/dir-js/test/listen_worker.ts b/sdk/dir-js/test/listen_worker.ts index cd007a275..bcaabf5cf 100644 --- a/sdk/dir-js/test/listen_worker.ts +++ b/sdk/dir-js/test/listen_worker.ts @@ -1,27 +1,27 @@ -import { spawnSync } from 'node:child_process'; -import { env } from 'node:process'; -import { worker } from 'workerpool'; - -worker({ - pullRecordsBackground, -}); - -export async function pullRecordsBackground(cid: string, dirctlPath: string, spiffeEndpointSocket: string) { - const shell_env = env; - - let commandArgs = ["pull", cid]; - - if (spiffeEndpointSocket !== '') { - commandArgs.push(...["--spiffe-socket-path", spiffeEndpointSocket]); - } - - for (let count = 0; count < 90; count++) { - // Execute command - spawnSync( - `${dirctlPath}`, commandArgs, - { env: { ...shell_env }, encoding: 'utf8', stdio: 'pipe' }, - ); - - await new Promise(resolve => setTimeout(resolve, 1000)); - } -} +import { spawnSync } from 'node:child_process'; +import { env } from 'node:process'; +import { worker } from 'workerpool'; + +worker({ + pullRecordsBackground, +}); + +export async function pullRecordsBackground(cid: string, dirctlPath: string, spiffeEndpointSocket: string) { + const shell_env = env; + + let commandArgs = ["pull", cid]; + + if (spiffeEndpointSocket !== '') { + commandArgs.push(...["--spiffe-socket-path", spiffeEndpointSocket]); + } + + for (let count = 0; count < 90; count++) { + // Execute command + spawnSync( + `${dirctlPath}`, commandArgs, + { env: { ...shell_env }, encoding: 'utf8', stdio: 'pipe' }, + ); + + await new Promise(resolve => setTimeout(resolve, 1000)); + } +} diff --git a/sdk/dir-js/tsconfig.json b/sdk/dir-js/tsconfig.json index 1ebe77c0b..2aed6ccf7 100644 --- a/sdk/dir-js/tsconfig.json +++ b/sdk/dir-js/tsconfig.json @@ -1,52 +1,52 @@ -{ - "compilerOptions": { - "rootDir": ".", - "outDir": "./dist", - "moduleResolution": "bundler", - "module": "es2020", - "target": "es2018", - "lib": [ - // ES2017 for async/await - "ES2017", - // DOM for the fetch and streams API - "DOM", - // ES2018.AsyncIterable for AsyncIterator - "ES2018.AsyncIterable" - ], - "declaration": true, - "strict": true, - "resolveJsonModule": true, - "allowUnreachableCode": false, - "allowUnusedLabels": false, - "composite": true, - "forceConsistentCasingInFileNames": true, - "noEmitOnError": true, - "noFallthroughCasesInSwitch": true, - "noImplicitReturns": true, - "pretty": true, - "sourceMap": true, - "stripInternal": true - }, - "include": [ - "src/**/*.ts", - "test/**/*.test.ts" - ], - "exclude": [ - "node_modules", - "dist", - ], - "ts-node": { - "compilerOptions": { - "module": "ES2020" - } - }, - "vitest": { - "test": { - "globals": true, - "environment": "node", - "include": [ - "test/**/*.test.ts" - ] - } - } -} +{ + "compilerOptions": { + "rootDir": ".", + "outDir": "./dist", + "moduleResolution": "bundler", + "module": "es2020", + "target": "es2018", + "lib": [ + // ES2017 for async/await + "ES2017", + // DOM for the fetch and streams API + "DOM", + // ES2018.AsyncIterable for AsyncIterator + "ES2018.AsyncIterable" + ], + "declaration": true, + "strict": true, + "resolveJsonModule": true, + "allowUnreachableCode": false, + "allowUnusedLabels": false, + "composite": true, + "forceConsistentCasingInFileNames": true, + "noEmitOnError": true, + "noFallthroughCasesInSwitch": true, + "noImplicitReturns": true, + "pretty": true, + "sourceMap": true, + "stripInternal": true + }, + "include": [ + "src/**/*.ts", + "test/**/*.test.ts" + ], + "exclude": [ + "node_modules", + "dist", + ], + "ts-node": { + "compilerOptions": { + "module": "ES2020" + } + }, + "vitest": { + "test": { + "globals": true, + "environment": "node", + "include": [ + "test/**/*.test.ts" + ] + } + } +} diff --git a/sdk/dir-py/.gitignore b/sdk/dir-py/.gitignore index 2cdb3b6a9..25b520e54 100644 --- a/sdk/dir-py/.gitignore +++ b/sdk/dir-py/.gitignore @@ -1,14 +1,14 @@ -# Python -dist/ -__pycache__/ -*.egg-info/ -*.pyc -*.pyo -*.pyd - -# Virtual Environments -.venv/ - -# Tests -*.key -*.pub +# Python +dist/ +__pycache__/ +*.egg-info/ +*.pyc +*.pyo +*.pyd + +# Virtual Environments +.venv/ + +# Tests +*.key +*.pub diff --git a/sdk/dir-py/README.md b/sdk/dir-py/README.md index 571546d90..12cac64c9 100644 --- a/sdk/dir-py/README.md +++ b/sdk/dir-py/README.md @@ -1,174 +1,174 @@ -# Directory Python SDK - -## Overview - -Dir Python SDK provides a simple way to interact with the Directory API. -It allows developers to integrate and use Directory functionality from their Python applications with ease. - -## Features - -The Directory Python SDK provides comprehensive access to all Directory APIs with a simple, intuitive interface: - -### **Store API** -- **Record Management**: Push records to the store and pull them by reference -- **Metadata Operations**: Look up record metadata without downloading full content -- **Data Lifecycle**: Delete records permanently from the store -- **Referrer Support**: Push and pull artifacts for existing records -- **Sync Management**: Manage storage synchronization policies between Directory servers - -### **Search API** -- **Flexible Search**: Search stored records using text, semantic, and structured queries -- **Advanced Filtering**: Filter results by metadata, content type, and other criteria - -### **Routing API** -- **Network Publishing**: Publish records to make them discoverable across the network -- **Content Discovery**: List and query published records across the network -- **Network Management**: Unpublish records to remove them from network discovery - -### **Signing and Verification** -- **Local Signing**: Sign records locally using private keys or OIDC-based authentication. -Requires [dirctl](https://github.com/agntcy/dir/releases) binary to perform signing. -- **Remote Verification**: Verify record signatures using the Directory gRPC API - -### **Developer Experience** -- **Type Safety**: Full type hints for better IDE support and fewer runtime errors -- **Async Support**: Non-blocking operations with streaming responses for large datasets -- **Error Handling**: Comprehensive gRPC error handling with detailed error messages -- **Configuration**: Flexible configuration via environment variables or direct instantiation - -## Installation - -Install the SDK using [uv](https://github.com/astral-sh/uv) - -1. Initialize the project: -```bash -uv init -``` - -2. Add the SDK to your project: -```bash -uv add agntcy-dir --index https://buf.build/gen/python -``` - -## Configuration - -The SDK can be configured via environment variables or direct instantiation: - -```python -# Environment variables (insecure mode, default) -export DIRECTORY_CLIENT_SERVER_ADDRESS="localhost:8888" -export DIRCTL_PATH="/path/to/dirctl" - -# Environment variables (X.509 authentication) -export DIRECTORY_CLIENT_SERVER_ADDRESS="localhost:8888" -export DIRECTORY_CLIENT_AUTH_MODE="x509" -export DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH="/tmp/agent.sock" - -# Environment variables (JWT authentication) -export DIRECTORY_CLIENT_SERVER_ADDRESS="localhost:8888" -export DIRECTORY_CLIENT_AUTH_MODE="jwt" -export DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH="/tmp/agent.sock" -export DIRECTORY_CLIENT_JWT_AUDIENCE="spiffe://example.org/dir-server" - -# Or configure directly -from agntcy.dir_sdk.client import Config, Client - -# Insecure mode (default, for development only) -config = Config( - server_address="localhost:8888", - dirctl_path="/usr/local/bin/dirctl" -) -client = Client(config) - -# X.509 authentication with SPIRE -x509_config = Config( - server_address="localhost:8888", - dirctl_path="/usr/local/bin/dirctl", - spiffe_socket_path="/tmp/agent.sock", - auth_mode="x509" -) -x509_client = Client(x509_config) - -# JWT authentication with SPIRE -jwt_config = Config( - server_address="localhost:8888", - dirctl_path="/usr/local/bin/dirctl", - spiffe_socket_path="/tmp/agent.sock", - auth_mode="jwt", - jwt_audience="spiffe://example.org/dir-server" -) -jwt_client = Client(jwt_config) -``` - -## Error Handling - -The SDK primarily raises `grpc.RpcError` exceptions for gRPC communication issues and `RuntimeError` for configuration problems: - -```python -import grpc -from agntcy.dir_sdk.client import Client - -try: - client = Client() - records = client.list(list_request) -except grpc.RpcError as e: - # Handle gRPC errors - if e.code() == grpc.StatusCode.NOT_FOUND: - print("Resource not found") - elif e.code() == grpc.StatusCode.UNAVAILABLE: - print("Server unavailable") - else: - print(f"gRPC error: {e.details()}") -except RuntimeError as e: - # Handle configuration or subprocess errors - print(f"Runtime error: {e}") -``` - -Common gRPC status codes: -- `NOT_FOUND`: Resource doesn't exist -- `ALREADY_EXISTS`: Resource already exists -- `UNAVAILABLE`: Server is down or unreachable -- `PERMISSION_DENIED`: Authentication/authorization failure -- `INVALID_ARGUMENT`: Invalid request parameters - - -## Getting Started - -### Prerequisites - -- Python 3.10 or higher -- [uv](https://github.com/astral-sh/uv) - Package manager -- [dirctl](https://github.com/agntcy/dir/releases) - Directory CLI binary -- Directory server instance (see setup below) - -### 1. Server Setup - -**Option A: Local Development Server** - -```bash -# Clone the repository and start the server using Taskfile -task server:start -``` - -**Option B: Custom Server** - -```bash -# Set your Directory server address -export DIRECTORY_CLIENT_SERVER_ADDRESS="your-server:8888" -``` - -### 2. SDK Installation - -```bash -# Add the Directory SDK -uv add agntcy-dir --index https://buf.build/gen/python -``` - -### Usage Examples - -See the [Example Python Project](../examples/example-py/) for a complete working example that demonstrates all SDK features. - -```bash -uv sync -uv run example.py -``` +# Directory Python SDK + +## Overview + +Dir Python SDK provides a simple way to interact with the Directory API. +It allows developers to integrate and use Directory functionality from their Python applications with ease. + +## Features + +The Directory Python SDK provides comprehensive access to all Directory APIs with a simple, intuitive interface: + +### **Store API** +- **Record Management**: Push records to the store and pull them by reference +- **Metadata Operations**: Look up record metadata without downloading full content +- **Data Lifecycle**: Delete records permanently from the store +- **Referrer Support**: Push and pull artifacts for existing records +- **Sync Management**: Manage storage synchronization policies between Directory servers + +### **Search API** +- **Flexible Search**: Search stored records using text, semantic, and structured queries +- **Advanced Filtering**: Filter results by metadata, content type, and other criteria + +### **Routing API** +- **Network Publishing**: Publish records to make them discoverable across the network +- **Content Discovery**: List and query published records across the network +- **Network Management**: Unpublish records to remove them from network discovery + +### **Signing and Verification** +- **Local Signing**: Sign records locally using private keys or OIDC-based authentication. +Requires [dirctl](https://github.com/agntcy/dir/releases) binary to perform signing. +- **Remote Verification**: Verify record signatures using the Directory gRPC API + +### **Developer Experience** +- **Type Safety**: Full type hints for better IDE support and fewer runtime errors +- **Async Support**: Non-blocking operations with streaming responses for large datasets +- **Error Handling**: Comprehensive gRPC error handling with detailed error messages +- **Configuration**: Flexible configuration via environment variables or direct instantiation + +## Installation + +Install the SDK using [uv](https://github.com/astral-sh/uv) + +1. Initialize the project: +```bash +uv init +``` + +2. Add the SDK to your project: +```bash +uv add agntcy-dir --index https://buf.build/gen/python +``` + +## Configuration + +The SDK can be configured via environment variables or direct instantiation: + +```python +# Environment variables (insecure mode, default) +export DIRECTORY_CLIENT_SERVER_ADDRESS="localhost:8888" +export DIRCTL_PATH="/path/to/dirctl" + +# Environment variables (X.509 authentication) +export DIRECTORY_CLIENT_SERVER_ADDRESS="localhost:8888" +export DIRECTORY_CLIENT_AUTH_MODE="x509" +export DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH="/tmp/agent.sock" + +# Environment variables (JWT authentication) +export DIRECTORY_CLIENT_SERVER_ADDRESS="localhost:8888" +export DIRECTORY_CLIENT_AUTH_MODE="jwt" +export DIRECTORY_CLIENT_SPIFFE_SOCKET_PATH="/tmp/agent.sock" +export DIRECTORY_CLIENT_JWT_AUDIENCE="spiffe://example.org/dir-server" + +# Or configure directly +from agntcy.dir_sdk.client import Config, Client + +# Insecure mode (default, for development only) +config = Config( + server_address="localhost:8888", + dirctl_path="/usr/local/bin/dirctl" +) +client = Client(config) + +# X.509 authentication with SPIRE +x509_config = Config( + server_address="localhost:8888", + dirctl_path="/usr/local/bin/dirctl", + spiffe_socket_path="/tmp/agent.sock", + auth_mode="x509" +) +x509_client = Client(x509_config) + +# JWT authentication with SPIRE +jwt_config = Config( + server_address="localhost:8888", + dirctl_path="/usr/local/bin/dirctl", + spiffe_socket_path="/tmp/agent.sock", + auth_mode="jwt", + jwt_audience="spiffe://example.org/dir-server" +) +jwt_client = Client(jwt_config) +``` + +## Error Handling + +The SDK primarily raises `grpc.RpcError` exceptions for gRPC communication issues and `RuntimeError` for configuration problems: + +```python +import grpc +from agntcy.dir_sdk.client import Client + +try: + client = Client() + records = client.list(list_request) +except grpc.RpcError as e: + # Handle gRPC errors + if e.code() == grpc.StatusCode.NOT_FOUND: + print("Resource not found") + elif e.code() == grpc.StatusCode.UNAVAILABLE: + print("Server unavailable") + else: + print(f"gRPC error: {e.details()}") +except RuntimeError as e: + # Handle configuration or subprocess errors + print(f"Runtime error: {e}") +``` + +Common gRPC status codes: +- `NOT_FOUND`: Resource doesn't exist +- `ALREADY_EXISTS`: Resource already exists +- `UNAVAILABLE`: Server is down or unreachable +- `PERMISSION_DENIED`: Authentication/authorization failure +- `INVALID_ARGUMENT`: Invalid request parameters + + +## Getting Started + +### Prerequisites + +- Python 3.10 or higher +- [uv](https://github.com/astral-sh/uv) - Package manager +- [dirctl](https://github.com/agntcy/dir/releases) - Directory CLI binary +- Directory server instance (see setup below) + +### 1. Server Setup + +**Option A: Local Development Server** + +```bash +# Clone the repository and start the server using Taskfile +task server:start +``` + +**Option B: Custom Server** + +```bash +# Set your Directory server address +export DIRECTORY_CLIENT_SERVER_ADDRESS="your-server:8888" +``` + +### 2. SDK Installation + +```bash +# Add the Directory SDK +uv add agntcy-dir --index https://buf.build/gen/python +``` + +### Usage Examples + +See the [Example Python Project](../examples/example-py/) for a complete working example that demonstrates all SDK features. + +```bash +uv sync +uv run example.py +``` diff --git a/sdk/dir-py/agntcy/dir/core/v1/record_pb2.py b/sdk/dir-py/agntcy/dir/core/v1/record_pb2.py index 302871811..44c85a788 100644 --- a/sdk/dir-py/agntcy/dir/core/v1/record_pb2.py +++ b/sdk/dir-py/agntcy/dir/core/v1/record_pb2.py @@ -1,52 +1,52 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: agntcy/dir/core/v1/record.proto -# Protobuf Python Version: 6.32.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 32, - 1, - '', - 'agntcy/dir/core/v1/record.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1f\x61gntcy/dir/core/v1/record.proto\x12\x12\x61gntcy.dir.core.v1\x1a\x1cgoogle/protobuf/struct.proto\"\x1d\n\tRecordRef\x12\x10\n\x03\x63id\x18\x01 \x01(\tR\x03\x63id\"\xf7\x01\n\nRecordMeta\x12\x10\n\x03\x63id\x18\x01 \x01(\tR\x03\x63id\x12Q\n\x0b\x61nnotations\x18\x02 \x03(\x0b\x32/.agntcy.dir.core.v1.RecordMeta.AnnotationsEntryR\x0b\x61nnotations\x12%\n\x0eschema_version\x18\x03 \x01(\tR\rschemaVersion\x12\x1d\n\ncreated_at\x18\x04 \x01(\tR\tcreatedAt\x1a>\n\x10\x41nnotationsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"5\n\x06Record\x12+\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x17.google.protobuf.StructR\x04\x64\x61ta\"\xc5\x02\n\x0eRecordReferrer\x12\x12\n\x04type\x18\x01 \x01(\tR\x04type\x12<\n\nrecord_ref\x18\x02 \x01(\x0b\x32\x1d.agntcy.dir.core.v1.RecordRefR\trecordRef\x12U\n\x0b\x61nnotations\x18\x03 \x03(\x0b\x32\x33.agntcy.dir.core.v1.RecordReferrer.AnnotationsEntryR\x0b\x61nnotations\x12\x1d\n\ncreated_at\x18\x04 \x01(\tR\tcreatedAt\x12+\n\x04\x64\x61ta\x18\x05 \x01(\x0b\x32\x17.google.protobuf.StructR\x04\x64\x61ta\x1a>\n\x10\x41nnotationsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x42\xb3\x01\n\x16\x63om.agntcy.dir.core.v1B\x0bRecordProtoP\x01Z!github.com/agntcy/dir/api/core/v1\xa2\x02\x03\x41\x44\x43\xaa\x02\x12\x41gntcy.Dir.Core.V1\xca\x02\x12\x41gntcy\\Dir\\Core\\V1\xe2\x02\x1e\x41gntcy\\Dir\\Core\\V1\\GPBMetadata\xea\x02\x15\x41gntcy::Dir::Core::V1b\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.core.v1.record_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'\n\026com.agntcy.dir.core.v1B\013RecordProtoP\001Z!github.com/agntcy/dir/api/core/v1\242\002\003ADC\252\002\022Agntcy.Dir.Core.V1\312\002\022Agntcy\\Dir\\Core\\V1\342\002\036Agntcy\\Dir\\Core\\V1\\GPBMetadata\352\002\025Agntcy::Dir::Core::V1' - _globals['_RECORDMETA_ANNOTATIONSENTRY']._loaded_options = None - _globals['_RECORDMETA_ANNOTATIONSENTRY']._serialized_options = b'8\001' - _globals['_RECORDREFERRER_ANNOTATIONSENTRY']._loaded_options = None - _globals['_RECORDREFERRER_ANNOTATIONSENTRY']._serialized_options = b'8\001' - _globals['_RECORDREF']._serialized_start=85 - _globals['_RECORDREF']._serialized_end=114 - _globals['_RECORDMETA']._serialized_start=117 - _globals['_RECORDMETA']._serialized_end=364 - _globals['_RECORDMETA_ANNOTATIONSENTRY']._serialized_start=302 - _globals['_RECORDMETA_ANNOTATIONSENTRY']._serialized_end=364 - _globals['_RECORD']._serialized_start=366 - _globals['_RECORD']._serialized_end=419 - _globals['_RECORDREFERRER']._serialized_start=422 - _globals['_RECORDREFERRER']._serialized_end=747 - _globals['_RECORDREFERRER_ANNOTATIONSENTRY']._serialized_start=302 - _globals['_RECORDREFERRER_ANNOTATIONSENTRY']._serialized_end=364 -# @@protoc_insertion_point(module_scope) +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: agntcy/dir/core/v1/record.proto +# Protobuf Python Version: 6.32.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 6, + 32, + 1, + '', + 'agntcy/dir/core/v1/record.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1f\x61gntcy/dir/core/v1/record.proto\x12\x12\x61gntcy.dir.core.v1\x1a\x1cgoogle/protobuf/struct.proto\"\x1d\n\tRecordRef\x12\x10\n\x03\x63id\x18\x01 \x01(\tR\x03\x63id\"\xf7\x01\n\nRecordMeta\x12\x10\n\x03\x63id\x18\x01 \x01(\tR\x03\x63id\x12Q\n\x0b\x61nnotations\x18\x02 \x03(\x0b\x32/.agntcy.dir.core.v1.RecordMeta.AnnotationsEntryR\x0b\x61nnotations\x12%\n\x0eschema_version\x18\x03 \x01(\tR\rschemaVersion\x12\x1d\n\ncreated_at\x18\x04 \x01(\tR\tcreatedAt\x1a>\n\x10\x41nnotationsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"5\n\x06Record\x12+\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x17.google.protobuf.StructR\x04\x64\x61ta\"\xc5\x02\n\x0eRecordReferrer\x12\x12\n\x04type\x18\x01 \x01(\tR\x04type\x12<\n\nrecord_ref\x18\x02 \x01(\x0b\x32\x1d.agntcy.dir.core.v1.RecordRefR\trecordRef\x12U\n\x0b\x61nnotations\x18\x03 \x03(\x0b\x32\x33.agntcy.dir.core.v1.RecordReferrer.AnnotationsEntryR\x0b\x61nnotations\x12\x1d\n\ncreated_at\x18\x04 \x01(\tR\tcreatedAt\x12+\n\x04\x64\x61ta\x18\x05 \x01(\x0b\x32\x17.google.protobuf.StructR\x04\x64\x61ta\x1a>\n\x10\x41nnotationsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x42\xb3\x01\n\x16\x63om.agntcy.dir.core.v1B\x0bRecordProtoP\x01Z!github.com/agntcy/dir/api/core/v1\xa2\x02\x03\x41\x44\x43\xaa\x02\x12\x41gntcy.Dir.Core.V1\xca\x02\x12\x41gntcy\\Dir\\Core\\V1\xe2\x02\x1e\x41gntcy\\Dir\\Core\\V1\\GPBMetadata\xea\x02\x15\x41gntcy::Dir::Core::V1b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.core.v1.record_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\026com.agntcy.dir.core.v1B\013RecordProtoP\001Z!github.com/agntcy/dir/api/core/v1\242\002\003ADC\252\002\022Agntcy.Dir.Core.V1\312\002\022Agntcy\\Dir\\Core\\V1\342\002\036Agntcy\\Dir\\Core\\V1\\GPBMetadata\352\002\025Agntcy::Dir::Core::V1' + _globals['_RECORDMETA_ANNOTATIONSENTRY']._loaded_options = None + _globals['_RECORDMETA_ANNOTATIONSENTRY']._serialized_options = b'8\001' + _globals['_RECORDREFERRER_ANNOTATIONSENTRY']._loaded_options = None + _globals['_RECORDREFERRER_ANNOTATIONSENTRY']._serialized_options = b'8\001' + _globals['_RECORDREF']._serialized_start=85 + _globals['_RECORDREF']._serialized_end=114 + _globals['_RECORDMETA']._serialized_start=117 + _globals['_RECORDMETA']._serialized_end=364 + _globals['_RECORDMETA_ANNOTATIONSENTRY']._serialized_start=302 + _globals['_RECORDMETA_ANNOTATIONSENTRY']._serialized_end=364 + _globals['_RECORD']._serialized_start=366 + _globals['_RECORD']._serialized_end=419 + _globals['_RECORDREFERRER']._serialized_start=422 + _globals['_RECORDREFERRER']._serialized_end=747 + _globals['_RECORDREFERRER_ANNOTATIONSENTRY']._serialized_start=302 + _globals['_RECORDREFERRER_ANNOTATIONSENTRY']._serialized_end=364 +# @@protoc_insertion_point(module_scope) diff --git a/sdk/dir-py/agntcy/dir/core/v1/record_pb2.pyi b/sdk/dir-py/agntcy/dir/core/v1/record_pb2.pyi index dc55a7a66..463d9a154 100644 --- a/sdk/dir-py/agntcy/dir/core/v1/record_pb2.pyi +++ b/sdk/dir-py/agntcy/dir/core/v1/record_pb2.pyi @@ -1,59 +1,59 @@ -from google.protobuf import struct_pb2 as _struct_pb2 -from google.protobuf.internal import containers as _containers -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Mapping as _Mapping, Optional as _Optional, Union as _Union - -DESCRIPTOR: _descriptor.FileDescriptor - -class RecordRef(_message.Message): - __slots__ = ("cid",) - CID_FIELD_NUMBER: _ClassVar[int] - cid: str - def __init__(self, cid: _Optional[str] = ...) -> None: ... - -class RecordMeta(_message.Message): - __slots__ = ("cid", "annotations", "schema_version", "created_at") - class AnnotationsEntry(_message.Message): - __slots__ = ("key", "value") - KEY_FIELD_NUMBER: _ClassVar[int] - VALUE_FIELD_NUMBER: _ClassVar[int] - key: str - value: str - def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... - CID_FIELD_NUMBER: _ClassVar[int] - ANNOTATIONS_FIELD_NUMBER: _ClassVar[int] - SCHEMA_VERSION_FIELD_NUMBER: _ClassVar[int] - CREATED_AT_FIELD_NUMBER: _ClassVar[int] - cid: str - annotations: _containers.ScalarMap[str, str] - schema_version: str - created_at: str - def __init__(self, cid: _Optional[str] = ..., annotations: _Optional[_Mapping[str, str]] = ..., schema_version: _Optional[str] = ..., created_at: _Optional[str] = ...) -> None: ... - -class Record(_message.Message): - __slots__ = ("data",) - DATA_FIELD_NUMBER: _ClassVar[int] - data: _struct_pb2.Struct - def __init__(self, data: _Optional[_Union[_struct_pb2.Struct, _Mapping]] = ...) -> None: ... - -class RecordReferrer(_message.Message): - __slots__ = ("type", "record_ref", "annotations", "created_at", "data") - class AnnotationsEntry(_message.Message): - __slots__ = ("key", "value") - KEY_FIELD_NUMBER: _ClassVar[int] - VALUE_FIELD_NUMBER: _ClassVar[int] - key: str - value: str - def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... - TYPE_FIELD_NUMBER: _ClassVar[int] - RECORD_REF_FIELD_NUMBER: _ClassVar[int] - ANNOTATIONS_FIELD_NUMBER: _ClassVar[int] - CREATED_AT_FIELD_NUMBER: _ClassVar[int] - DATA_FIELD_NUMBER: _ClassVar[int] - type: str - record_ref: RecordRef - annotations: _containers.ScalarMap[str, str] - created_at: str - data: _struct_pb2.Struct - def __init__(self, type: _Optional[str] = ..., record_ref: _Optional[_Union[RecordRef, _Mapping]] = ..., annotations: _Optional[_Mapping[str, str]] = ..., created_at: _Optional[str] = ..., data: _Optional[_Union[_struct_pb2.Struct, _Mapping]] = ...) -> None: ... +from google.protobuf import struct_pb2 as _struct_pb2 +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class RecordRef(_message.Message): + __slots__ = ("cid",) + CID_FIELD_NUMBER: _ClassVar[int] + cid: str + def __init__(self, cid: _Optional[str] = ...) -> None: ... + +class RecordMeta(_message.Message): + __slots__ = ("cid", "annotations", "schema_version", "created_at") + class AnnotationsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + CID_FIELD_NUMBER: _ClassVar[int] + ANNOTATIONS_FIELD_NUMBER: _ClassVar[int] + SCHEMA_VERSION_FIELD_NUMBER: _ClassVar[int] + CREATED_AT_FIELD_NUMBER: _ClassVar[int] + cid: str + annotations: _containers.ScalarMap[str, str] + schema_version: str + created_at: str + def __init__(self, cid: _Optional[str] = ..., annotations: _Optional[_Mapping[str, str]] = ..., schema_version: _Optional[str] = ..., created_at: _Optional[str] = ...) -> None: ... + +class Record(_message.Message): + __slots__ = ("data",) + DATA_FIELD_NUMBER: _ClassVar[int] + data: _struct_pb2.Struct + def __init__(self, data: _Optional[_Union[_struct_pb2.Struct, _Mapping]] = ...) -> None: ... + +class RecordReferrer(_message.Message): + __slots__ = ("type", "record_ref", "annotations", "created_at", "data") + class AnnotationsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + TYPE_FIELD_NUMBER: _ClassVar[int] + RECORD_REF_FIELD_NUMBER: _ClassVar[int] + ANNOTATIONS_FIELD_NUMBER: _ClassVar[int] + CREATED_AT_FIELD_NUMBER: _ClassVar[int] + DATA_FIELD_NUMBER: _ClassVar[int] + type: str + record_ref: RecordRef + annotations: _containers.ScalarMap[str, str] + created_at: str + data: _struct_pb2.Struct + def __init__(self, type: _Optional[str] = ..., record_ref: _Optional[_Union[RecordRef, _Mapping]] = ..., annotations: _Optional[_Mapping[str, str]] = ..., created_at: _Optional[str] = ..., data: _Optional[_Union[_struct_pb2.Struct, _Mapping]] = ...) -> None: ... diff --git a/sdk/dir-py/agntcy/dir/core/v1/record_pb2_grpc.py b/sdk/dir-py/agntcy/dir/core/v1/record_pb2_grpc.py index 2daafffeb..910a4354e 100644 --- a/sdk/dir-py/agntcy/dir/core/v1/record_pb2_grpc.py +++ b/sdk/dir-py/agntcy/dir/core/v1/record_pb2_grpc.py @@ -1,4 +1,4 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/sdk/dir-py/agntcy/dir/events/v1/event_service_pb2.py b/sdk/dir-py/agntcy/dir/events/v1/event_service_pb2.py index 43492c5da..ed97a711a 100644 --- a/sdk/dir-py/agntcy/dir/events/v1/event_service_pb2.py +++ b/sdk/dir-py/agntcy/dir/events/v1/event_service_pb2.py @@ -1,50 +1,50 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: agntcy/dir/events/v1/event_service.proto -# Protobuf Python Version: 6.32.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 32, - 1, - '', - 'agntcy/dir/events/v1/event_service.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n(agntcy/dir/events/v1/event_service.proto\x12\x14\x61gntcy.dir.events.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"\x97\x01\n\rListenRequest\x12@\n\x0b\x65vent_types\x18\x01 \x03(\x0e\x32\x1f.agntcy.dir.events.v1.EventTypeR\neventTypes\x12#\n\rlabel_filters\x18\x02 \x03(\tR\x0clabelFilters\x12\x1f\n\x0b\x63id_filters\x18\x03 \x03(\tR\ncidFilters\"C\n\x0eListenResponse\x12\x31\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x1b.agntcy.dir.events.v1.EventR\x05\x65vent\"\xc3\x02\n\x05\x45vent\x12\x0e\n\x02id\x18\x01 \x01(\tR\x02id\x12\x33\n\x04type\x18\x02 \x01(\x0e\x32\x1f.agntcy.dir.events.v1.EventTypeR\x04type\x12\x38\n\ttimestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\ttimestamp\x12\x1f\n\x0bresource_id\x18\x04 \x01(\tR\nresourceId\x12\x16\n\x06labels\x18\x05 \x03(\tR\x06labels\x12\x45\n\x08metadata\x18\x07 \x03(\x0b\x32).agntcy.dir.events.v1.Event.MetadataEntryR\x08metadata\x1a;\n\rMetadataEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01*\xbc\x02\n\tEventType\x12\x1a\n\x16\x45VENT_TYPE_UNSPECIFIED\x10\x00\x12\x1c\n\x18\x45VENT_TYPE_RECORD_PUSHED\x10\x01\x12\x1c\n\x18\x45VENT_TYPE_RECORD_PULLED\x10\x02\x12\x1d\n\x19\x45VENT_TYPE_RECORD_DELETED\x10\x03\x12\x1f\n\x1b\x45VENT_TYPE_RECORD_PUBLISHED\x10\x04\x12!\n\x1d\x45VENT_TYPE_RECORD_UNPUBLISHED\x10\x05\x12\x1b\n\x17\x45VENT_TYPE_SYNC_CREATED\x10\x06\x12\x1d\n\x19\x45VENT_TYPE_SYNC_COMPLETED\x10\x07\x12\x1a\n\x16\x45VENT_TYPE_SYNC_FAILED\x10\x08\x12\x1c\n\x18\x45VENT_TYPE_RECORD_SIGNED\x10\t2e\n\x0c\x45ventService\x12U\n\x06Listen\x12#.agntcy.dir.events.v1.ListenRequest\x1a$.agntcy.dir.events.v1.ListenResponse0\x01\x42\xc5\x01\n\x18\x63om.agntcy.dir.events.v1B\x11\x45ventServiceProtoP\x01Z#github.com/agntcy/dir/api/events/v1\xa2\x02\x03\x41\x44\x45\xaa\x02\x14\x41gntcy.Dir.Events.V1\xca\x02\x14\x41gntcy\\Dir\\Events\\V1\xe2\x02 Agntcy\\Dir\\Events\\V1\\GPBMetadata\xea\x02\x17\x41gntcy::Dir::Events::V1b\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.events.v1.event_service_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'\n\030com.agntcy.dir.events.v1B\021EventServiceProtoP\001Z#github.com/agntcy/dir/api/events/v1\242\002\003ADE\252\002\024Agntcy.Dir.Events.V1\312\002\024Agntcy\\Dir\\Events\\V1\342\002 Agntcy\\Dir\\Events\\V1\\GPBMetadata\352\002\027Agntcy::Dir::Events::V1' - _globals['_EVENT_METADATAENTRY']._loaded_options = None - _globals['_EVENT_METADATAENTRY']._serialized_options = b'8\001' - _globals['_EVENTTYPE']._serialized_start=649 - _globals['_EVENTTYPE']._serialized_end=965 - _globals['_LISTENREQUEST']._serialized_start=100 - _globals['_LISTENREQUEST']._serialized_end=251 - _globals['_LISTENRESPONSE']._serialized_start=253 - _globals['_LISTENRESPONSE']._serialized_end=320 - _globals['_EVENT']._serialized_start=323 - _globals['_EVENT']._serialized_end=646 - _globals['_EVENT_METADATAENTRY']._serialized_start=587 - _globals['_EVENT_METADATAENTRY']._serialized_end=646 - _globals['_EVENTSERVICE']._serialized_start=967 - _globals['_EVENTSERVICE']._serialized_end=1068 -# @@protoc_insertion_point(module_scope) +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: agntcy/dir/events/v1/event_service.proto +# Protobuf Python Version: 6.32.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 6, + 32, + 1, + '', + 'agntcy/dir/events/v1/event_service.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n(agntcy/dir/events/v1/event_service.proto\x12\x14\x61gntcy.dir.events.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"\x97\x01\n\rListenRequest\x12@\n\x0b\x65vent_types\x18\x01 \x03(\x0e\x32\x1f.agntcy.dir.events.v1.EventTypeR\neventTypes\x12#\n\rlabel_filters\x18\x02 \x03(\tR\x0clabelFilters\x12\x1f\n\x0b\x63id_filters\x18\x03 \x03(\tR\ncidFilters\"C\n\x0eListenResponse\x12\x31\n\x05\x65vent\x18\x01 \x01(\x0b\x32\x1b.agntcy.dir.events.v1.EventR\x05\x65vent\"\xc3\x02\n\x05\x45vent\x12\x0e\n\x02id\x18\x01 \x01(\tR\x02id\x12\x33\n\x04type\x18\x02 \x01(\x0e\x32\x1f.agntcy.dir.events.v1.EventTypeR\x04type\x12\x38\n\ttimestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\ttimestamp\x12\x1f\n\x0bresource_id\x18\x04 \x01(\tR\nresourceId\x12\x16\n\x06labels\x18\x05 \x03(\tR\x06labels\x12\x45\n\x08metadata\x18\x07 \x03(\x0b\x32).agntcy.dir.events.v1.Event.MetadataEntryR\x08metadata\x1a;\n\rMetadataEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01*\xbc\x02\n\tEventType\x12\x1a\n\x16\x45VENT_TYPE_UNSPECIFIED\x10\x00\x12\x1c\n\x18\x45VENT_TYPE_RECORD_PUSHED\x10\x01\x12\x1c\n\x18\x45VENT_TYPE_RECORD_PULLED\x10\x02\x12\x1d\n\x19\x45VENT_TYPE_RECORD_DELETED\x10\x03\x12\x1f\n\x1b\x45VENT_TYPE_RECORD_PUBLISHED\x10\x04\x12!\n\x1d\x45VENT_TYPE_RECORD_UNPUBLISHED\x10\x05\x12\x1b\n\x17\x45VENT_TYPE_SYNC_CREATED\x10\x06\x12\x1d\n\x19\x45VENT_TYPE_SYNC_COMPLETED\x10\x07\x12\x1a\n\x16\x45VENT_TYPE_SYNC_FAILED\x10\x08\x12\x1c\n\x18\x45VENT_TYPE_RECORD_SIGNED\x10\t2e\n\x0c\x45ventService\x12U\n\x06Listen\x12#.agntcy.dir.events.v1.ListenRequest\x1a$.agntcy.dir.events.v1.ListenResponse0\x01\x42\xc5\x01\n\x18\x63om.agntcy.dir.events.v1B\x11\x45ventServiceProtoP\x01Z#github.com/agntcy/dir/api/events/v1\xa2\x02\x03\x41\x44\x45\xaa\x02\x14\x41gntcy.Dir.Events.V1\xca\x02\x14\x41gntcy\\Dir\\Events\\V1\xe2\x02 Agntcy\\Dir\\Events\\V1\\GPBMetadata\xea\x02\x17\x41gntcy::Dir::Events::V1b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.events.v1.event_service_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\030com.agntcy.dir.events.v1B\021EventServiceProtoP\001Z#github.com/agntcy/dir/api/events/v1\242\002\003ADE\252\002\024Agntcy.Dir.Events.V1\312\002\024Agntcy\\Dir\\Events\\V1\342\002 Agntcy\\Dir\\Events\\V1\\GPBMetadata\352\002\027Agntcy::Dir::Events::V1' + _globals['_EVENT_METADATAENTRY']._loaded_options = None + _globals['_EVENT_METADATAENTRY']._serialized_options = b'8\001' + _globals['_EVENTTYPE']._serialized_start=649 + _globals['_EVENTTYPE']._serialized_end=965 + _globals['_LISTENREQUEST']._serialized_start=100 + _globals['_LISTENREQUEST']._serialized_end=251 + _globals['_LISTENRESPONSE']._serialized_start=253 + _globals['_LISTENRESPONSE']._serialized_end=320 + _globals['_EVENT']._serialized_start=323 + _globals['_EVENT']._serialized_end=646 + _globals['_EVENT_METADATAENTRY']._serialized_start=587 + _globals['_EVENT_METADATAENTRY']._serialized_end=646 + _globals['_EVENTSERVICE']._serialized_start=967 + _globals['_EVENTSERVICE']._serialized_end=1068 +# @@protoc_insertion_point(module_scope) diff --git a/sdk/dir-py/agntcy/dir/events/v1/event_service_pb2.pyi b/sdk/dir-py/agntcy/dir/events/v1/event_service_pb2.pyi index 7dc21d9d0..5d928e42b 100644 --- a/sdk/dir-py/agntcy/dir/events/v1/event_service_pb2.pyi +++ b/sdk/dir-py/agntcy/dir/events/v1/event_service_pb2.pyi @@ -1,70 +1,70 @@ -from google.protobuf import timestamp_pb2 as _timestamp_pb2 -from google.protobuf.internal import containers as _containers -from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union - -DESCRIPTOR: _descriptor.FileDescriptor - -class EventType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = () - EVENT_TYPE_UNSPECIFIED: _ClassVar[EventType] - EVENT_TYPE_RECORD_PUSHED: _ClassVar[EventType] - EVENT_TYPE_RECORD_PULLED: _ClassVar[EventType] - EVENT_TYPE_RECORD_DELETED: _ClassVar[EventType] - EVENT_TYPE_RECORD_PUBLISHED: _ClassVar[EventType] - EVENT_TYPE_RECORD_UNPUBLISHED: _ClassVar[EventType] - EVENT_TYPE_SYNC_CREATED: _ClassVar[EventType] - EVENT_TYPE_SYNC_COMPLETED: _ClassVar[EventType] - EVENT_TYPE_SYNC_FAILED: _ClassVar[EventType] - EVENT_TYPE_RECORD_SIGNED: _ClassVar[EventType] -EVENT_TYPE_UNSPECIFIED: EventType -EVENT_TYPE_RECORD_PUSHED: EventType -EVENT_TYPE_RECORD_PULLED: EventType -EVENT_TYPE_RECORD_DELETED: EventType -EVENT_TYPE_RECORD_PUBLISHED: EventType -EVENT_TYPE_RECORD_UNPUBLISHED: EventType -EVENT_TYPE_SYNC_CREATED: EventType -EVENT_TYPE_SYNC_COMPLETED: EventType -EVENT_TYPE_SYNC_FAILED: EventType -EVENT_TYPE_RECORD_SIGNED: EventType - -class ListenRequest(_message.Message): - __slots__ = ("event_types", "label_filters", "cid_filters") - EVENT_TYPES_FIELD_NUMBER: _ClassVar[int] - LABEL_FILTERS_FIELD_NUMBER: _ClassVar[int] - CID_FILTERS_FIELD_NUMBER: _ClassVar[int] - event_types: _containers.RepeatedScalarFieldContainer[EventType] - label_filters: _containers.RepeatedScalarFieldContainer[str] - cid_filters: _containers.RepeatedScalarFieldContainer[str] - def __init__(self, event_types: _Optional[_Iterable[_Union[EventType, str]]] = ..., label_filters: _Optional[_Iterable[str]] = ..., cid_filters: _Optional[_Iterable[str]] = ...) -> None: ... - -class ListenResponse(_message.Message): - __slots__ = ("event",) - EVENT_FIELD_NUMBER: _ClassVar[int] - event: Event - def __init__(self, event: _Optional[_Union[Event, _Mapping]] = ...) -> None: ... - -class Event(_message.Message): - __slots__ = ("id", "type", "timestamp", "resource_id", "labels", "metadata") - class MetadataEntry(_message.Message): - __slots__ = ("key", "value") - KEY_FIELD_NUMBER: _ClassVar[int] - VALUE_FIELD_NUMBER: _ClassVar[int] - key: str - value: str - def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... - ID_FIELD_NUMBER: _ClassVar[int] - TYPE_FIELD_NUMBER: _ClassVar[int] - TIMESTAMP_FIELD_NUMBER: _ClassVar[int] - RESOURCE_ID_FIELD_NUMBER: _ClassVar[int] - LABELS_FIELD_NUMBER: _ClassVar[int] - METADATA_FIELD_NUMBER: _ClassVar[int] - id: str - type: EventType - timestamp: _timestamp_pb2.Timestamp - resource_id: str - labels: _containers.RepeatedScalarFieldContainer[str] - metadata: _containers.ScalarMap[str, str] - def __init__(self, id: _Optional[str] = ..., type: _Optional[_Union[EventType, str]] = ..., timestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., resource_id: _Optional[str] = ..., labels: _Optional[_Iterable[str]] = ..., metadata: _Optional[_Mapping[str, str]] = ...) -> None: ... +from google.protobuf import timestamp_pb2 as _timestamp_pb2 +from google.protobuf.internal import containers as _containers +from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class EventType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + EVENT_TYPE_UNSPECIFIED: _ClassVar[EventType] + EVENT_TYPE_RECORD_PUSHED: _ClassVar[EventType] + EVENT_TYPE_RECORD_PULLED: _ClassVar[EventType] + EVENT_TYPE_RECORD_DELETED: _ClassVar[EventType] + EVENT_TYPE_RECORD_PUBLISHED: _ClassVar[EventType] + EVENT_TYPE_RECORD_UNPUBLISHED: _ClassVar[EventType] + EVENT_TYPE_SYNC_CREATED: _ClassVar[EventType] + EVENT_TYPE_SYNC_COMPLETED: _ClassVar[EventType] + EVENT_TYPE_SYNC_FAILED: _ClassVar[EventType] + EVENT_TYPE_RECORD_SIGNED: _ClassVar[EventType] +EVENT_TYPE_UNSPECIFIED: EventType +EVENT_TYPE_RECORD_PUSHED: EventType +EVENT_TYPE_RECORD_PULLED: EventType +EVENT_TYPE_RECORD_DELETED: EventType +EVENT_TYPE_RECORD_PUBLISHED: EventType +EVENT_TYPE_RECORD_UNPUBLISHED: EventType +EVENT_TYPE_SYNC_CREATED: EventType +EVENT_TYPE_SYNC_COMPLETED: EventType +EVENT_TYPE_SYNC_FAILED: EventType +EVENT_TYPE_RECORD_SIGNED: EventType + +class ListenRequest(_message.Message): + __slots__ = ("event_types", "label_filters", "cid_filters") + EVENT_TYPES_FIELD_NUMBER: _ClassVar[int] + LABEL_FILTERS_FIELD_NUMBER: _ClassVar[int] + CID_FILTERS_FIELD_NUMBER: _ClassVar[int] + event_types: _containers.RepeatedScalarFieldContainer[EventType] + label_filters: _containers.RepeatedScalarFieldContainer[str] + cid_filters: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, event_types: _Optional[_Iterable[_Union[EventType, str]]] = ..., label_filters: _Optional[_Iterable[str]] = ..., cid_filters: _Optional[_Iterable[str]] = ...) -> None: ... + +class ListenResponse(_message.Message): + __slots__ = ("event",) + EVENT_FIELD_NUMBER: _ClassVar[int] + event: Event + def __init__(self, event: _Optional[_Union[Event, _Mapping]] = ...) -> None: ... + +class Event(_message.Message): + __slots__ = ("id", "type", "timestamp", "resource_id", "labels", "metadata") + class MetadataEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + ID_FIELD_NUMBER: _ClassVar[int] + TYPE_FIELD_NUMBER: _ClassVar[int] + TIMESTAMP_FIELD_NUMBER: _ClassVar[int] + RESOURCE_ID_FIELD_NUMBER: _ClassVar[int] + LABELS_FIELD_NUMBER: _ClassVar[int] + METADATA_FIELD_NUMBER: _ClassVar[int] + id: str + type: EventType + timestamp: _timestamp_pb2.Timestamp + resource_id: str + labels: _containers.RepeatedScalarFieldContainer[str] + metadata: _containers.ScalarMap[str, str] + def __init__(self, id: _Optional[str] = ..., type: _Optional[_Union[EventType, str]] = ..., timestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., resource_id: _Optional[str] = ..., labels: _Optional[_Iterable[str]] = ..., metadata: _Optional[_Mapping[str, str]] = ...) -> None: ... diff --git a/sdk/dir-py/agntcy/dir/events/v1/event_service_pb2_grpc.py b/sdk/dir-py/agntcy/dir/events/v1/event_service_pb2_grpc.py index 7f59cbc83..779afadf5 100644 --- a/sdk/dir-py/agntcy/dir/events/v1/event_service_pb2_grpc.py +++ b/sdk/dir-py/agntcy/dir/events/v1/event_service_pb2_grpc.py @@ -1,89 +1,89 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from agntcy.dir.events.v1 import event_service_pb2 as agntcy_dot_dir_dot_events_dot_v1_dot_event__service__pb2 - - -class EventServiceStub(object): - """EventService provides real-time event streaming for all system operations. - Events are delivered from subscription time forward with no history or replay. - This service enables external applications to react to system changes in real-time. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.Listen = channel.unary_stream( - '/agntcy.dir.events.v1.EventService/Listen', - request_serializer=agntcy_dot_dir_dot_events_dot_v1_dot_event__service__pb2.ListenRequest.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_events_dot_v1_dot_event__service__pb2.ListenResponse.FromString, - _registered_method=True) - - -class EventServiceServicer(object): - """EventService provides real-time event streaming for all system operations. - Events are delivered from subscription time forward with no history or replay. - This service enables external applications to react to system changes in real-time. - """ - - def Listen(self, request, context): - """Listen establishes a streaming connection to receive events. - Events are only delivered while the stream is active. - On disconnect, missed events are not recoverable. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_EventServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - 'Listen': grpc.unary_stream_rpc_method_handler( - servicer.Listen, - request_deserializer=agntcy_dot_dir_dot_events_dot_v1_dot_event__service__pb2.ListenRequest.FromString, - response_serializer=agntcy_dot_dir_dot_events_dot_v1_dot_event__service__pb2.ListenResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'agntcy.dir.events.v1.EventService', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - server.add_registered_method_handlers('agntcy.dir.events.v1.EventService', rpc_method_handlers) - - - # This class is part of an EXPERIMENTAL API. -class EventService(object): - """EventService provides real-time event streaming for all system operations. - Events are delivered from subscription time forward with no history or replay. - This service enables external applications to react to system changes in real-time. - """ - - @staticmethod - def Listen(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_stream( - request, - target, - '/agntcy.dir.events.v1.EventService/Listen', - agntcy_dot_dir_dot_events_dot_v1_dot_event__service__pb2.ListenRequest.SerializeToString, - agntcy_dot_dir_dot_events_dot_v1_dot_event__service__pb2.ListenResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from agntcy.dir.events.v1 import event_service_pb2 as agntcy_dot_dir_dot_events_dot_v1_dot_event__service__pb2 + + +class EventServiceStub(object): + """EventService provides real-time event streaming for all system operations. + Events are delivered from subscription time forward with no history or replay. + This service enables external applications to react to system changes in real-time. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.Listen = channel.unary_stream( + '/agntcy.dir.events.v1.EventService/Listen', + request_serializer=agntcy_dot_dir_dot_events_dot_v1_dot_event__service__pb2.ListenRequest.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_events_dot_v1_dot_event__service__pb2.ListenResponse.FromString, + _registered_method=True) + + +class EventServiceServicer(object): + """EventService provides real-time event streaming for all system operations. + Events are delivered from subscription time forward with no history or replay. + This service enables external applications to react to system changes in real-time. + """ + + def Listen(self, request, context): + """Listen establishes a streaming connection to receive events. + Events are only delivered while the stream is active. + On disconnect, missed events are not recoverable. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_EventServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'Listen': grpc.unary_stream_rpc_method_handler( + servicer.Listen, + request_deserializer=agntcy_dot_dir_dot_events_dot_v1_dot_event__service__pb2.ListenRequest.FromString, + response_serializer=agntcy_dot_dir_dot_events_dot_v1_dot_event__service__pb2.ListenResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'agntcy.dir.events.v1.EventService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('agntcy.dir.events.v1.EventService', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class EventService(object): + """EventService provides real-time event streaming for all system operations. + Events are delivered from subscription time forward with no history or replay. + This service enables external applications to react to system changes in real-time. + """ + + @staticmethod + def Listen(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream( + request, + target, + '/agntcy.dir.events.v1.EventService/Listen', + agntcy_dot_dir_dot_events_dot_v1_dot_event__service__pb2.ListenRequest.SerializeToString, + agntcy_dot_dir_dot_events_dot_v1_dot_event__service__pb2.ListenResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/sdk/dir-py/agntcy/dir/routing/v1/peer_pb2.py b/sdk/dir-py/agntcy/dir/routing/v1/peer_pb2.py index af17fe206..05b784fd9 100644 --- a/sdk/dir-py/agntcy/dir/routing/v1/peer_pb2.py +++ b/sdk/dir-py/agntcy/dir/routing/v1/peer_pb2.py @@ -1,43 +1,43 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: agntcy/dir/routing/v1/peer.proto -# Protobuf Python Version: 6.32.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 32, - 1, - '', - 'agntcy/dir/routing/v1/peer.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n agntcy/dir/routing/v1/peer.proto\x12\x15\x61gntcy.dir.routing.v1\"\x87\x02\n\x04Peer\x12\x0e\n\x02id\x18\x01 \x01(\tR\x02id\x12\x14\n\x05\x61\x64\x64rs\x18\x02 \x03(\tR\x05\x61\x64\x64rs\x12N\n\x0b\x61nnotations\x18\x03 \x03(\x0b\x32,.agntcy.dir.routing.v1.Peer.AnnotationsEntryR\x0b\x61nnotations\x12I\n\nconnection\x18\x04 \x01(\x0e\x32).agntcy.dir.routing.v1.PeerConnectionTypeR\nconnection\x1a>\n\x10\x41nnotationsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01*\xaf\x01\n\x12PeerConnectionType\x12&\n\"PEER_CONNECTION_TYPE_NOT_CONNECTED\x10\x00\x12\"\n\x1ePEER_CONNECTION_TYPE_CONNECTED\x10\x01\x12$\n PEER_CONNECTION_TYPE_CAN_CONNECT\x10\x02\x12\'\n#PEER_CONNECTION_TYPE_CANNOT_CONNECT\x10\x03\x42\xc3\x01\n\x19\x63om.agntcy.dir.routing.v1B\tPeerProtoP\x01Z$github.com/agntcy/dir/api/routing/v1\xa2\x02\x03\x41\x44R\xaa\x02\x15\x41gntcy.Dir.Routing.V1\xca\x02\x15\x41gntcy\\Dir\\Routing\\V1\xe2\x02!Agntcy\\Dir\\Routing\\V1\\GPBMetadata\xea\x02\x18\x41gntcy::Dir::Routing::V1b\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.routing.v1.peer_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'\n\031com.agntcy.dir.routing.v1B\tPeerProtoP\001Z$github.com/agntcy/dir/api/routing/v1\242\002\003ADR\252\002\025Agntcy.Dir.Routing.V1\312\002\025Agntcy\\Dir\\Routing\\V1\342\002!Agntcy\\Dir\\Routing\\V1\\GPBMetadata\352\002\030Agntcy::Dir::Routing::V1' - _globals['_PEER_ANNOTATIONSENTRY']._loaded_options = None - _globals['_PEER_ANNOTATIONSENTRY']._serialized_options = b'8\001' - _globals['_PEERCONNECTIONTYPE']._serialized_start=326 - _globals['_PEERCONNECTIONTYPE']._serialized_end=501 - _globals['_PEER']._serialized_start=60 - _globals['_PEER']._serialized_end=323 - _globals['_PEER_ANNOTATIONSENTRY']._serialized_start=261 - _globals['_PEER_ANNOTATIONSENTRY']._serialized_end=323 -# @@protoc_insertion_point(module_scope) +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: agntcy/dir/routing/v1/peer.proto +# Protobuf Python Version: 6.32.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 6, + 32, + 1, + '', + 'agntcy/dir/routing/v1/peer.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n agntcy/dir/routing/v1/peer.proto\x12\x15\x61gntcy.dir.routing.v1\"\x87\x02\n\x04Peer\x12\x0e\n\x02id\x18\x01 \x01(\tR\x02id\x12\x14\n\x05\x61\x64\x64rs\x18\x02 \x03(\tR\x05\x61\x64\x64rs\x12N\n\x0b\x61nnotations\x18\x03 \x03(\x0b\x32,.agntcy.dir.routing.v1.Peer.AnnotationsEntryR\x0b\x61nnotations\x12I\n\nconnection\x18\x04 \x01(\x0e\x32).agntcy.dir.routing.v1.PeerConnectionTypeR\nconnection\x1a>\n\x10\x41nnotationsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01*\xaf\x01\n\x12PeerConnectionType\x12&\n\"PEER_CONNECTION_TYPE_NOT_CONNECTED\x10\x00\x12\"\n\x1ePEER_CONNECTION_TYPE_CONNECTED\x10\x01\x12$\n PEER_CONNECTION_TYPE_CAN_CONNECT\x10\x02\x12\'\n#PEER_CONNECTION_TYPE_CANNOT_CONNECT\x10\x03\x42\xc3\x01\n\x19\x63om.agntcy.dir.routing.v1B\tPeerProtoP\x01Z$github.com/agntcy/dir/api/routing/v1\xa2\x02\x03\x41\x44R\xaa\x02\x15\x41gntcy.Dir.Routing.V1\xca\x02\x15\x41gntcy\\Dir\\Routing\\V1\xe2\x02!Agntcy\\Dir\\Routing\\V1\\GPBMetadata\xea\x02\x18\x41gntcy::Dir::Routing::V1b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.routing.v1.peer_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\031com.agntcy.dir.routing.v1B\tPeerProtoP\001Z$github.com/agntcy/dir/api/routing/v1\242\002\003ADR\252\002\025Agntcy.Dir.Routing.V1\312\002\025Agntcy\\Dir\\Routing\\V1\342\002!Agntcy\\Dir\\Routing\\V1\\GPBMetadata\352\002\030Agntcy::Dir::Routing::V1' + _globals['_PEER_ANNOTATIONSENTRY']._loaded_options = None + _globals['_PEER_ANNOTATIONSENTRY']._serialized_options = b'8\001' + _globals['_PEERCONNECTIONTYPE']._serialized_start=326 + _globals['_PEERCONNECTIONTYPE']._serialized_end=501 + _globals['_PEER']._serialized_start=60 + _globals['_PEER']._serialized_end=323 + _globals['_PEER_ANNOTATIONSENTRY']._serialized_start=261 + _globals['_PEER_ANNOTATIONSENTRY']._serialized_end=323 +# @@protoc_insertion_point(module_scope) diff --git a/sdk/dir-py/agntcy/dir/routing/v1/peer_pb2.pyi b/sdk/dir-py/agntcy/dir/routing/v1/peer_pb2.pyi index ca5314a13..f3bf108d6 100644 --- a/sdk/dir-py/agntcy/dir/routing/v1/peer_pb2.pyi +++ b/sdk/dir-py/agntcy/dir/routing/v1/peer_pb2.pyi @@ -1,37 +1,37 @@ -from google.protobuf.internal import containers as _containers -from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union - -DESCRIPTOR: _descriptor.FileDescriptor - -class PeerConnectionType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = () - PEER_CONNECTION_TYPE_NOT_CONNECTED: _ClassVar[PeerConnectionType] - PEER_CONNECTION_TYPE_CONNECTED: _ClassVar[PeerConnectionType] - PEER_CONNECTION_TYPE_CAN_CONNECT: _ClassVar[PeerConnectionType] - PEER_CONNECTION_TYPE_CANNOT_CONNECT: _ClassVar[PeerConnectionType] -PEER_CONNECTION_TYPE_NOT_CONNECTED: PeerConnectionType -PEER_CONNECTION_TYPE_CONNECTED: PeerConnectionType -PEER_CONNECTION_TYPE_CAN_CONNECT: PeerConnectionType -PEER_CONNECTION_TYPE_CANNOT_CONNECT: PeerConnectionType - -class Peer(_message.Message): - __slots__ = ("id", "addrs", "annotations", "connection") - class AnnotationsEntry(_message.Message): - __slots__ = ("key", "value") - KEY_FIELD_NUMBER: _ClassVar[int] - VALUE_FIELD_NUMBER: _ClassVar[int] - key: str - value: str - def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... - ID_FIELD_NUMBER: _ClassVar[int] - ADDRS_FIELD_NUMBER: _ClassVar[int] - ANNOTATIONS_FIELD_NUMBER: _ClassVar[int] - CONNECTION_FIELD_NUMBER: _ClassVar[int] - id: str - addrs: _containers.RepeatedScalarFieldContainer[str] - annotations: _containers.ScalarMap[str, str] - connection: PeerConnectionType - def __init__(self, id: _Optional[str] = ..., addrs: _Optional[_Iterable[str]] = ..., annotations: _Optional[_Mapping[str, str]] = ..., connection: _Optional[_Union[PeerConnectionType, str]] = ...) -> None: ... +from google.protobuf.internal import containers as _containers +from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class PeerConnectionType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + PEER_CONNECTION_TYPE_NOT_CONNECTED: _ClassVar[PeerConnectionType] + PEER_CONNECTION_TYPE_CONNECTED: _ClassVar[PeerConnectionType] + PEER_CONNECTION_TYPE_CAN_CONNECT: _ClassVar[PeerConnectionType] + PEER_CONNECTION_TYPE_CANNOT_CONNECT: _ClassVar[PeerConnectionType] +PEER_CONNECTION_TYPE_NOT_CONNECTED: PeerConnectionType +PEER_CONNECTION_TYPE_CONNECTED: PeerConnectionType +PEER_CONNECTION_TYPE_CAN_CONNECT: PeerConnectionType +PEER_CONNECTION_TYPE_CANNOT_CONNECT: PeerConnectionType + +class Peer(_message.Message): + __slots__ = ("id", "addrs", "annotations", "connection") + class AnnotationsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + ID_FIELD_NUMBER: _ClassVar[int] + ADDRS_FIELD_NUMBER: _ClassVar[int] + ANNOTATIONS_FIELD_NUMBER: _ClassVar[int] + CONNECTION_FIELD_NUMBER: _ClassVar[int] + id: str + addrs: _containers.RepeatedScalarFieldContainer[str] + annotations: _containers.ScalarMap[str, str] + connection: PeerConnectionType + def __init__(self, id: _Optional[str] = ..., addrs: _Optional[_Iterable[str]] = ..., annotations: _Optional[_Mapping[str, str]] = ..., connection: _Optional[_Union[PeerConnectionType, str]] = ...) -> None: ... diff --git a/sdk/dir-py/agntcy/dir/routing/v1/peer_pb2_grpc.py b/sdk/dir-py/agntcy/dir/routing/v1/peer_pb2_grpc.py index 2daafffeb..910a4354e 100644 --- a/sdk/dir-py/agntcy/dir/routing/v1/peer_pb2_grpc.py +++ b/sdk/dir-py/agntcy/dir/routing/v1/peer_pb2_grpc.py @@ -1,4 +1,4 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/sdk/dir-py/agntcy/dir/routing/v1/publication_service_pb2.py b/sdk/dir-py/agntcy/dir/routing/v1/publication_service_pb2.py index 6dde3cb36..80a17011f 100644 --- a/sdk/dir-py/agntcy/dir/routing/v1/publication_service_pb2.py +++ b/sdk/dir-py/agntcy/dir/routing/v1/publication_service_pb2.py @@ -1,50 +1,50 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: agntcy/dir/routing/v1/publication_service.proto -# Protobuf Python Version: 6.32.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 32, - 1, - '', - 'agntcy/dir/routing/v1/publication_service.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from agntcy.dir.routing.v1 import routing_service_pb2 as agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/agntcy/dir/routing/v1/publication_service.proto\x12\x15\x61gntcy.dir.routing.v1\x1a+agntcy/dir/routing/v1/routing_service.proto\"B\n\x19\x43reatePublicationResponse\x12%\n\x0epublication_id\x18\x01 \x01(\tR\rpublicationId\"f\n\x17ListPublicationsRequest\x12\x19\n\x05limit\x18\x02 \x01(\rH\x00R\x05limit\x88\x01\x01\x12\x1b\n\x06offset\x18\x03 \x01(\rH\x01R\x06offset\x88\x01\x01\x42\x08\n\x06_limitB\t\n\x07_offset\"\xcc\x01\n\x14ListPublicationsItem\x12%\n\x0epublication_id\x18\x01 \x01(\tR\rpublicationId\x12@\n\x06status\x18\x02 \x01(\x0e\x32(.agntcy.dir.routing.v1.PublicationStatusR\x06status\x12!\n\x0c\x63reated_time\x18\x03 \x01(\tR\x0b\x63reatedTime\x12(\n\x10last_update_time\x18\x04 \x01(\tR\x0elastUpdateTime\">\n\x15GetPublicationRequest\x12%\n\x0epublication_id\x18\x01 \x01(\tR\rpublicationId\"\xce\x01\n\x16GetPublicationResponse\x12%\n\x0epublication_id\x18\x01 \x01(\tR\rpublicationId\x12@\n\x06status\x18\x02 \x01(\x0e\x32(.agntcy.dir.routing.v1.PublicationStatusR\x06status\x12!\n\x0c\x63reated_time\x18\x03 \x01(\tR\x0b\x63reatedTime\x12(\n\x10last_update_time\x18\x04 \x01(\tR\x0elastUpdateTime*\xbc\x01\n\x11PublicationStatus\x12\"\n\x1ePUBLICATION_STATUS_UNSPECIFIED\x10\x00\x12\x1e\n\x1aPUBLICATION_STATUS_PENDING\x10\x01\x12\"\n\x1ePUBLICATION_STATUS_IN_PROGRESS\x10\x02\x12 \n\x1cPUBLICATION_STATUS_COMPLETED\x10\x03\x12\x1d\n\x19PUBLICATION_STATUS_FAILED\x10\x04\x32\xe4\x02\n\x12PublicationService\x12l\n\x11\x43reatePublication\x12%.agntcy.dir.routing.v1.PublishRequest\x1a\x30.agntcy.dir.routing.v1.CreatePublicationResponse\x12q\n\x10ListPublications\x12..agntcy.dir.routing.v1.ListPublicationsRequest\x1a+.agntcy.dir.routing.v1.ListPublicationsItem0\x01\x12m\n\x0eGetPublication\x12,.agntcy.dir.routing.v1.GetPublicationRequest\x1a-.agntcy.dir.routing.v1.GetPublicationResponseB\xd1\x01\n\x19\x63om.agntcy.dir.routing.v1B\x17PublicationServiceProtoP\x01Z$github.com/agntcy/dir/api/routing/v1\xa2\x02\x03\x41\x44R\xaa\x02\x15\x41gntcy.Dir.Routing.V1\xca\x02\x15\x41gntcy\\Dir\\Routing\\V1\xe2\x02!Agntcy\\Dir\\Routing\\V1\\GPBMetadata\xea\x02\x18\x41gntcy::Dir::Routing::V1b\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.routing.v1.publication_service_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'\n\031com.agntcy.dir.routing.v1B\027PublicationServiceProtoP\001Z$github.com/agntcy/dir/api/routing/v1\242\002\003ADR\252\002\025Agntcy.Dir.Routing.V1\312\002\025Agntcy\\Dir\\Routing\\V1\342\002!Agntcy\\Dir\\Routing\\V1\\GPBMetadata\352\002\030Agntcy::Dir::Routing::V1' - _globals['_PUBLICATIONSTATUS']._serialized_start=772 - _globals['_PUBLICATIONSTATUS']._serialized_end=960 - _globals['_CREATEPUBLICATIONRESPONSE']._serialized_start=119 - _globals['_CREATEPUBLICATIONRESPONSE']._serialized_end=185 - _globals['_LISTPUBLICATIONSREQUEST']._serialized_start=187 - _globals['_LISTPUBLICATIONSREQUEST']._serialized_end=289 - _globals['_LISTPUBLICATIONSITEM']._serialized_start=292 - _globals['_LISTPUBLICATIONSITEM']._serialized_end=496 - _globals['_GETPUBLICATIONREQUEST']._serialized_start=498 - _globals['_GETPUBLICATIONREQUEST']._serialized_end=560 - _globals['_GETPUBLICATIONRESPONSE']._serialized_start=563 - _globals['_GETPUBLICATIONRESPONSE']._serialized_end=769 - _globals['_PUBLICATIONSERVICE']._serialized_start=963 - _globals['_PUBLICATIONSERVICE']._serialized_end=1319 -# @@protoc_insertion_point(module_scope) +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: agntcy/dir/routing/v1/publication_service.proto +# Protobuf Python Version: 6.32.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 6, + 32, + 1, + '', + 'agntcy/dir/routing/v1/publication_service.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from agntcy.dir.routing.v1 import routing_service_pb2 as agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n/agntcy/dir/routing/v1/publication_service.proto\x12\x15\x61gntcy.dir.routing.v1\x1a+agntcy/dir/routing/v1/routing_service.proto\"B\n\x19\x43reatePublicationResponse\x12%\n\x0epublication_id\x18\x01 \x01(\tR\rpublicationId\"f\n\x17ListPublicationsRequest\x12\x19\n\x05limit\x18\x02 \x01(\rH\x00R\x05limit\x88\x01\x01\x12\x1b\n\x06offset\x18\x03 \x01(\rH\x01R\x06offset\x88\x01\x01\x42\x08\n\x06_limitB\t\n\x07_offset\"\xcc\x01\n\x14ListPublicationsItem\x12%\n\x0epublication_id\x18\x01 \x01(\tR\rpublicationId\x12@\n\x06status\x18\x02 \x01(\x0e\x32(.agntcy.dir.routing.v1.PublicationStatusR\x06status\x12!\n\x0c\x63reated_time\x18\x03 \x01(\tR\x0b\x63reatedTime\x12(\n\x10last_update_time\x18\x04 \x01(\tR\x0elastUpdateTime\">\n\x15GetPublicationRequest\x12%\n\x0epublication_id\x18\x01 \x01(\tR\rpublicationId\"\xce\x01\n\x16GetPublicationResponse\x12%\n\x0epublication_id\x18\x01 \x01(\tR\rpublicationId\x12@\n\x06status\x18\x02 \x01(\x0e\x32(.agntcy.dir.routing.v1.PublicationStatusR\x06status\x12!\n\x0c\x63reated_time\x18\x03 \x01(\tR\x0b\x63reatedTime\x12(\n\x10last_update_time\x18\x04 \x01(\tR\x0elastUpdateTime*\xbc\x01\n\x11PublicationStatus\x12\"\n\x1ePUBLICATION_STATUS_UNSPECIFIED\x10\x00\x12\x1e\n\x1aPUBLICATION_STATUS_PENDING\x10\x01\x12\"\n\x1ePUBLICATION_STATUS_IN_PROGRESS\x10\x02\x12 \n\x1cPUBLICATION_STATUS_COMPLETED\x10\x03\x12\x1d\n\x19PUBLICATION_STATUS_FAILED\x10\x04\x32\xe4\x02\n\x12PublicationService\x12l\n\x11\x43reatePublication\x12%.agntcy.dir.routing.v1.PublishRequest\x1a\x30.agntcy.dir.routing.v1.CreatePublicationResponse\x12q\n\x10ListPublications\x12..agntcy.dir.routing.v1.ListPublicationsRequest\x1a+.agntcy.dir.routing.v1.ListPublicationsItem0\x01\x12m\n\x0eGetPublication\x12,.agntcy.dir.routing.v1.GetPublicationRequest\x1a-.agntcy.dir.routing.v1.GetPublicationResponseB\xd1\x01\n\x19\x63om.agntcy.dir.routing.v1B\x17PublicationServiceProtoP\x01Z$github.com/agntcy/dir/api/routing/v1\xa2\x02\x03\x41\x44R\xaa\x02\x15\x41gntcy.Dir.Routing.V1\xca\x02\x15\x41gntcy\\Dir\\Routing\\V1\xe2\x02!Agntcy\\Dir\\Routing\\V1\\GPBMetadata\xea\x02\x18\x41gntcy::Dir::Routing::V1b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.routing.v1.publication_service_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\031com.agntcy.dir.routing.v1B\027PublicationServiceProtoP\001Z$github.com/agntcy/dir/api/routing/v1\242\002\003ADR\252\002\025Agntcy.Dir.Routing.V1\312\002\025Agntcy\\Dir\\Routing\\V1\342\002!Agntcy\\Dir\\Routing\\V1\\GPBMetadata\352\002\030Agntcy::Dir::Routing::V1' + _globals['_PUBLICATIONSTATUS']._serialized_start=772 + _globals['_PUBLICATIONSTATUS']._serialized_end=960 + _globals['_CREATEPUBLICATIONRESPONSE']._serialized_start=119 + _globals['_CREATEPUBLICATIONRESPONSE']._serialized_end=185 + _globals['_LISTPUBLICATIONSREQUEST']._serialized_start=187 + _globals['_LISTPUBLICATIONSREQUEST']._serialized_end=289 + _globals['_LISTPUBLICATIONSITEM']._serialized_start=292 + _globals['_LISTPUBLICATIONSITEM']._serialized_end=496 + _globals['_GETPUBLICATIONREQUEST']._serialized_start=498 + _globals['_GETPUBLICATIONREQUEST']._serialized_end=560 + _globals['_GETPUBLICATIONRESPONSE']._serialized_start=563 + _globals['_GETPUBLICATIONRESPONSE']._serialized_end=769 + _globals['_PUBLICATIONSERVICE']._serialized_start=963 + _globals['_PUBLICATIONSERVICE']._serialized_end=1319 +# @@protoc_insertion_point(module_scope) diff --git a/sdk/dir-py/agntcy/dir/routing/v1/publication_service_pb2.pyi b/sdk/dir-py/agntcy/dir/routing/v1/publication_service_pb2.pyi index d5bfdbd70..eba6e79f0 100644 --- a/sdk/dir-py/agntcy/dir/routing/v1/publication_service_pb2.pyi +++ b/sdk/dir-py/agntcy/dir/routing/v1/publication_service_pb2.pyi @@ -1,64 +1,64 @@ -from agntcy.dir.routing.v1 import routing_service_pb2 as _routing_service_pb2 -from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union - -DESCRIPTOR: _descriptor.FileDescriptor - -class PublicationStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = () - PUBLICATION_STATUS_UNSPECIFIED: _ClassVar[PublicationStatus] - PUBLICATION_STATUS_PENDING: _ClassVar[PublicationStatus] - PUBLICATION_STATUS_IN_PROGRESS: _ClassVar[PublicationStatus] - PUBLICATION_STATUS_COMPLETED: _ClassVar[PublicationStatus] - PUBLICATION_STATUS_FAILED: _ClassVar[PublicationStatus] -PUBLICATION_STATUS_UNSPECIFIED: PublicationStatus -PUBLICATION_STATUS_PENDING: PublicationStatus -PUBLICATION_STATUS_IN_PROGRESS: PublicationStatus -PUBLICATION_STATUS_COMPLETED: PublicationStatus -PUBLICATION_STATUS_FAILED: PublicationStatus - -class CreatePublicationResponse(_message.Message): - __slots__ = ("publication_id",) - PUBLICATION_ID_FIELD_NUMBER: _ClassVar[int] - publication_id: str - def __init__(self, publication_id: _Optional[str] = ...) -> None: ... - -class ListPublicationsRequest(_message.Message): - __slots__ = ("limit", "offset") - LIMIT_FIELD_NUMBER: _ClassVar[int] - OFFSET_FIELD_NUMBER: _ClassVar[int] - limit: int - offset: int - def __init__(self, limit: _Optional[int] = ..., offset: _Optional[int] = ...) -> None: ... - -class ListPublicationsItem(_message.Message): - __slots__ = ("publication_id", "status", "created_time", "last_update_time") - PUBLICATION_ID_FIELD_NUMBER: _ClassVar[int] - STATUS_FIELD_NUMBER: _ClassVar[int] - CREATED_TIME_FIELD_NUMBER: _ClassVar[int] - LAST_UPDATE_TIME_FIELD_NUMBER: _ClassVar[int] - publication_id: str - status: PublicationStatus - created_time: str - last_update_time: str - def __init__(self, publication_id: _Optional[str] = ..., status: _Optional[_Union[PublicationStatus, str]] = ..., created_time: _Optional[str] = ..., last_update_time: _Optional[str] = ...) -> None: ... - -class GetPublicationRequest(_message.Message): - __slots__ = ("publication_id",) - PUBLICATION_ID_FIELD_NUMBER: _ClassVar[int] - publication_id: str - def __init__(self, publication_id: _Optional[str] = ...) -> None: ... - -class GetPublicationResponse(_message.Message): - __slots__ = ("publication_id", "status", "created_time", "last_update_time") - PUBLICATION_ID_FIELD_NUMBER: _ClassVar[int] - STATUS_FIELD_NUMBER: _ClassVar[int] - CREATED_TIME_FIELD_NUMBER: _ClassVar[int] - LAST_UPDATE_TIME_FIELD_NUMBER: _ClassVar[int] - publication_id: str - status: PublicationStatus - created_time: str - last_update_time: str - def __init__(self, publication_id: _Optional[str] = ..., status: _Optional[_Union[PublicationStatus, str]] = ..., created_time: _Optional[str] = ..., last_update_time: _Optional[str] = ...) -> None: ... +from agntcy.dir.routing.v1 import routing_service_pb2 as _routing_service_pb2 +from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class PublicationStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + PUBLICATION_STATUS_UNSPECIFIED: _ClassVar[PublicationStatus] + PUBLICATION_STATUS_PENDING: _ClassVar[PublicationStatus] + PUBLICATION_STATUS_IN_PROGRESS: _ClassVar[PublicationStatus] + PUBLICATION_STATUS_COMPLETED: _ClassVar[PublicationStatus] + PUBLICATION_STATUS_FAILED: _ClassVar[PublicationStatus] +PUBLICATION_STATUS_UNSPECIFIED: PublicationStatus +PUBLICATION_STATUS_PENDING: PublicationStatus +PUBLICATION_STATUS_IN_PROGRESS: PublicationStatus +PUBLICATION_STATUS_COMPLETED: PublicationStatus +PUBLICATION_STATUS_FAILED: PublicationStatus + +class CreatePublicationResponse(_message.Message): + __slots__ = ("publication_id",) + PUBLICATION_ID_FIELD_NUMBER: _ClassVar[int] + publication_id: str + def __init__(self, publication_id: _Optional[str] = ...) -> None: ... + +class ListPublicationsRequest(_message.Message): + __slots__ = ("limit", "offset") + LIMIT_FIELD_NUMBER: _ClassVar[int] + OFFSET_FIELD_NUMBER: _ClassVar[int] + limit: int + offset: int + def __init__(self, limit: _Optional[int] = ..., offset: _Optional[int] = ...) -> None: ... + +class ListPublicationsItem(_message.Message): + __slots__ = ("publication_id", "status", "created_time", "last_update_time") + PUBLICATION_ID_FIELD_NUMBER: _ClassVar[int] + STATUS_FIELD_NUMBER: _ClassVar[int] + CREATED_TIME_FIELD_NUMBER: _ClassVar[int] + LAST_UPDATE_TIME_FIELD_NUMBER: _ClassVar[int] + publication_id: str + status: PublicationStatus + created_time: str + last_update_time: str + def __init__(self, publication_id: _Optional[str] = ..., status: _Optional[_Union[PublicationStatus, str]] = ..., created_time: _Optional[str] = ..., last_update_time: _Optional[str] = ...) -> None: ... + +class GetPublicationRequest(_message.Message): + __slots__ = ("publication_id",) + PUBLICATION_ID_FIELD_NUMBER: _ClassVar[int] + publication_id: str + def __init__(self, publication_id: _Optional[str] = ...) -> None: ... + +class GetPublicationResponse(_message.Message): + __slots__ = ("publication_id", "status", "created_time", "last_update_time") + PUBLICATION_ID_FIELD_NUMBER: _ClassVar[int] + STATUS_FIELD_NUMBER: _ClassVar[int] + CREATED_TIME_FIELD_NUMBER: _ClassVar[int] + LAST_UPDATE_TIME_FIELD_NUMBER: _ClassVar[int] + publication_id: str + status: PublicationStatus + created_time: str + last_update_time: str + def __init__(self, publication_id: _Optional[str] = ..., status: _Optional[_Union[PublicationStatus, str]] = ..., created_time: _Optional[str] = ..., last_update_time: _Optional[str] = ...) -> None: ... diff --git a/sdk/dir-py/agntcy/dir/routing/v1/publication_service_pb2_grpc.py b/sdk/dir-py/agntcy/dir/routing/v1/publication_service_pb2_grpc.py index c1409fbf8..83c14562f 100644 --- a/sdk/dir-py/agntcy/dir/routing/v1/publication_service_pb2_grpc.py +++ b/sdk/dir-py/agntcy/dir/routing/v1/publication_service_pb2_grpc.py @@ -1,194 +1,194 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from agntcy.dir.routing.v1 import publication_service_pb2 as agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2 -from agntcy.dir.routing.v1 import routing_service_pb2 as agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2 - - -class PublicationServiceStub(object): - """PublicationService manages publication requests for announcing records to the DHT. - - Publications are stored in the database and processed by a worker that runs every hour. - The publication workflow: - 1. Publications are created via routing's Publish RPC by specifying either a query, a list of CIDs, or all records - 2. Publication requests are added to the database - 3. PublicationWorker queries the data using the publication request from the database to get the list of CIDs to be published - 4. PublicationWorker announces the records with these CIDs to the DHT - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreatePublication = channel.unary_unary( - '/agntcy.dir.routing.v1.PublicationService/CreatePublication', - request_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.PublishRequest.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.CreatePublicationResponse.FromString, - _registered_method=True) - self.ListPublications = channel.unary_stream( - '/agntcy.dir.routing.v1.PublicationService/ListPublications', - request_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.ListPublicationsRequest.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.ListPublicationsItem.FromString, - _registered_method=True) - self.GetPublication = channel.unary_unary( - '/agntcy.dir.routing.v1.PublicationService/GetPublication', - request_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.GetPublicationRequest.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.GetPublicationResponse.FromString, - _registered_method=True) - - -class PublicationServiceServicer(object): - """PublicationService manages publication requests for announcing records to the DHT. - - Publications are stored in the database and processed by a worker that runs every hour. - The publication workflow: - 1. Publications are created via routing's Publish RPC by specifying either a query, a list of CIDs, or all records - 2. Publication requests are added to the database - 3. PublicationWorker queries the data using the publication request from the database to get the list of CIDs to be published - 4. PublicationWorker announces the records with these CIDs to the DHT - """ - - def CreatePublication(self, request, context): - """CreatePublication creates a new publication request that will be processed by the PublicationWorker. - The publication request can specify either a query, a list of specific CIDs, or all records to be announced to the DHT. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def ListPublications(self, request, context): - """ListPublications returns a stream of all publication requests in the system. - This allows monitoring of pending, processing, and completed publication requests. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def GetPublication(self, request, context): - """GetPublication retrieves details of a specific publication request by its identifier. - This includes the current status and any associated metadata. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_PublicationServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - 'CreatePublication': grpc.unary_unary_rpc_method_handler( - servicer.CreatePublication, - request_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.PublishRequest.FromString, - response_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.CreatePublicationResponse.SerializeToString, - ), - 'ListPublications': grpc.unary_stream_rpc_method_handler( - servicer.ListPublications, - request_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.ListPublicationsRequest.FromString, - response_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.ListPublicationsItem.SerializeToString, - ), - 'GetPublication': grpc.unary_unary_rpc_method_handler( - servicer.GetPublication, - request_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.GetPublicationRequest.FromString, - response_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.GetPublicationResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'agntcy.dir.routing.v1.PublicationService', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - server.add_registered_method_handlers('agntcy.dir.routing.v1.PublicationService', rpc_method_handlers) - - - # This class is part of an EXPERIMENTAL API. -class PublicationService(object): - """PublicationService manages publication requests for announcing records to the DHT. - - Publications are stored in the database and processed by a worker that runs every hour. - The publication workflow: - 1. Publications are created via routing's Publish RPC by specifying either a query, a list of CIDs, or all records - 2. Publication requests are added to the database - 3. PublicationWorker queries the data using the publication request from the database to get the list of CIDs to be published - 4. PublicationWorker announces the records with these CIDs to the DHT - """ - - @staticmethod - def CreatePublication(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/agntcy.dir.routing.v1.PublicationService/CreatePublication', - agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.PublishRequest.SerializeToString, - agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.CreatePublicationResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def ListPublications(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_stream( - request, - target, - '/agntcy.dir.routing.v1.PublicationService/ListPublications', - agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.ListPublicationsRequest.SerializeToString, - agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.ListPublicationsItem.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def GetPublication(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/agntcy.dir.routing.v1.PublicationService/GetPublication', - agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.GetPublicationRequest.SerializeToString, - agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.GetPublicationResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from agntcy.dir.routing.v1 import publication_service_pb2 as agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2 +from agntcy.dir.routing.v1 import routing_service_pb2 as agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2 + + +class PublicationServiceStub(object): + """PublicationService manages publication requests for announcing records to the DHT. + + Publications are stored in the database and processed by a worker that runs every hour. + The publication workflow: + 1. Publications are created via routing's Publish RPC by specifying either a query, a list of CIDs, or all records + 2. Publication requests are added to the database + 3. PublicationWorker queries the data using the publication request from the database to get the list of CIDs to be published + 4. PublicationWorker announces the records with these CIDs to the DHT + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreatePublication = channel.unary_unary( + '/agntcy.dir.routing.v1.PublicationService/CreatePublication', + request_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.PublishRequest.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.CreatePublicationResponse.FromString, + _registered_method=True) + self.ListPublications = channel.unary_stream( + '/agntcy.dir.routing.v1.PublicationService/ListPublications', + request_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.ListPublicationsRequest.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.ListPublicationsItem.FromString, + _registered_method=True) + self.GetPublication = channel.unary_unary( + '/agntcy.dir.routing.v1.PublicationService/GetPublication', + request_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.GetPublicationRequest.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.GetPublicationResponse.FromString, + _registered_method=True) + + +class PublicationServiceServicer(object): + """PublicationService manages publication requests for announcing records to the DHT. + + Publications are stored in the database and processed by a worker that runs every hour. + The publication workflow: + 1. Publications are created via routing's Publish RPC by specifying either a query, a list of CIDs, or all records + 2. Publication requests are added to the database + 3. PublicationWorker queries the data using the publication request from the database to get the list of CIDs to be published + 4. PublicationWorker announces the records with these CIDs to the DHT + """ + + def CreatePublication(self, request, context): + """CreatePublication creates a new publication request that will be processed by the PublicationWorker. + The publication request can specify either a query, a list of specific CIDs, or all records to be announced to the DHT. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListPublications(self, request, context): + """ListPublications returns a stream of all publication requests in the system. + This allows monitoring of pending, processing, and completed publication requests. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetPublication(self, request, context): + """GetPublication retrieves details of a specific publication request by its identifier. + This includes the current status and any associated metadata. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_PublicationServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreatePublication': grpc.unary_unary_rpc_method_handler( + servicer.CreatePublication, + request_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.PublishRequest.FromString, + response_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.CreatePublicationResponse.SerializeToString, + ), + 'ListPublications': grpc.unary_stream_rpc_method_handler( + servicer.ListPublications, + request_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.ListPublicationsRequest.FromString, + response_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.ListPublicationsItem.SerializeToString, + ), + 'GetPublication': grpc.unary_unary_rpc_method_handler( + servicer.GetPublication, + request_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.GetPublicationRequest.FromString, + response_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.GetPublicationResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'agntcy.dir.routing.v1.PublicationService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('agntcy.dir.routing.v1.PublicationService', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class PublicationService(object): + """PublicationService manages publication requests for announcing records to the DHT. + + Publications are stored in the database and processed by a worker that runs every hour. + The publication workflow: + 1. Publications are created via routing's Publish RPC by specifying either a query, a list of CIDs, or all records + 2. Publication requests are added to the database + 3. PublicationWorker queries the data using the publication request from the database to get the list of CIDs to be published + 4. PublicationWorker announces the records with these CIDs to the DHT + """ + + @staticmethod + def CreatePublication(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/agntcy.dir.routing.v1.PublicationService/CreatePublication', + agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.PublishRequest.SerializeToString, + agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.CreatePublicationResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ListPublications(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream( + request, + target, + '/agntcy.dir.routing.v1.PublicationService/ListPublications', + agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.ListPublicationsRequest.SerializeToString, + agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.ListPublicationsItem.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def GetPublication(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/agntcy.dir.routing.v1.PublicationService/GetPublication', + agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.GetPublicationRequest.SerializeToString, + agntcy_dot_dir_dot_routing_dot_v1_dot_publication__service__pb2.GetPublicationResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/sdk/dir-py/agntcy/dir/routing/v1/record_query_pb2.py b/sdk/dir-py/agntcy/dir/routing/v1/record_query_pb2.py index f5c7f54c2..5f3e55b98 100644 --- a/sdk/dir-py/agntcy/dir/routing/v1/record_query_pb2.py +++ b/sdk/dir-py/agntcy/dir/routing/v1/record_query_pb2.py @@ -1,39 +1,39 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: agntcy/dir/routing/v1/record_query.proto -# Protobuf Python Version: 6.32.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 32, - 1, - '', - 'agntcy/dir/routing/v1/record_query.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n(agntcy/dir/routing/v1/record_query.proto\x12\x15\x61gntcy.dir.routing.v1\"_\n\x0bRecordQuery\x12:\n\x04type\x18\x01 \x01(\x0e\x32&.agntcy.dir.routing.v1.RecordQueryTypeR\x04type\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value*\xac\x01\n\x0fRecordQueryType\x12!\n\x1dRECORD_QUERY_TYPE_UNSPECIFIED\x10\x00\x12\x1b\n\x17RECORD_QUERY_TYPE_SKILL\x10\x01\x12\x1d\n\x19RECORD_QUERY_TYPE_LOCATOR\x10\x02\x12\x1c\n\x18RECORD_QUERY_TYPE_DOMAIN\x10\x03\x12\x1c\n\x18RECORD_QUERY_TYPE_MODULE\x10\x04\x42\xca\x01\n\x19\x63om.agntcy.dir.routing.v1B\x10RecordQueryProtoP\x01Z$github.com/agntcy/dir/api/routing/v1\xa2\x02\x03\x41\x44R\xaa\x02\x15\x41gntcy.Dir.Routing.V1\xca\x02\x15\x41gntcy\\Dir\\Routing\\V1\xe2\x02!Agntcy\\Dir\\Routing\\V1\\GPBMetadata\xea\x02\x18\x41gntcy::Dir::Routing::V1b\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.routing.v1.record_query_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'\n\031com.agntcy.dir.routing.v1B\020RecordQueryProtoP\001Z$github.com/agntcy/dir/api/routing/v1\242\002\003ADR\252\002\025Agntcy.Dir.Routing.V1\312\002\025Agntcy\\Dir\\Routing\\V1\342\002!Agntcy\\Dir\\Routing\\V1\\GPBMetadata\352\002\030Agntcy::Dir::Routing::V1' - _globals['_RECORDQUERYTYPE']._serialized_start=165 - _globals['_RECORDQUERYTYPE']._serialized_end=337 - _globals['_RECORDQUERY']._serialized_start=67 - _globals['_RECORDQUERY']._serialized_end=162 -# @@protoc_insertion_point(module_scope) +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: agntcy/dir/routing/v1/record_query.proto +# Protobuf Python Version: 6.32.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 6, + 32, + 1, + '', + 'agntcy/dir/routing/v1/record_query.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n(agntcy/dir/routing/v1/record_query.proto\x12\x15\x61gntcy.dir.routing.v1\"_\n\x0bRecordQuery\x12:\n\x04type\x18\x01 \x01(\x0e\x32&.agntcy.dir.routing.v1.RecordQueryTypeR\x04type\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value*\xac\x01\n\x0fRecordQueryType\x12!\n\x1dRECORD_QUERY_TYPE_UNSPECIFIED\x10\x00\x12\x1b\n\x17RECORD_QUERY_TYPE_SKILL\x10\x01\x12\x1d\n\x19RECORD_QUERY_TYPE_LOCATOR\x10\x02\x12\x1c\n\x18RECORD_QUERY_TYPE_DOMAIN\x10\x03\x12\x1c\n\x18RECORD_QUERY_TYPE_MODULE\x10\x04\x42\xca\x01\n\x19\x63om.agntcy.dir.routing.v1B\x10RecordQueryProtoP\x01Z$github.com/agntcy/dir/api/routing/v1\xa2\x02\x03\x41\x44R\xaa\x02\x15\x41gntcy.Dir.Routing.V1\xca\x02\x15\x41gntcy\\Dir\\Routing\\V1\xe2\x02!Agntcy\\Dir\\Routing\\V1\\GPBMetadata\xea\x02\x18\x41gntcy::Dir::Routing::V1b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.routing.v1.record_query_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\031com.agntcy.dir.routing.v1B\020RecordQueryProtoP\001Z$github.com/agntcy/dir/api/routing/v1\242\002\003ADR\252\002\025Agntcy.Dir.Routing.V1\312\002\025Agntcy\\Dir\\Routing\\V1\342\002!Agntcy\\Dir\\Routing\\V1\\GPBMetadata\352\002\030Agntcy::Dir::Routing::V1' + _globals['_RECORDQUERYTYPE']._serialized_start=165 + _globals['_RECORDQUERYTYPE']._serialized_end=337 + _globals['_RECORDQUERY']._serialized_start=67 + _globals['_RECORDQUERY']._serialized_end=162 +# @@protoc_insertion_point(module_scope) diff --git a/sdk/dir-py/agntcy/dir/routing/v1/record_query_pb2.pyi b/sdk/dir-py/agntcy/dir/routing/v1/record_query_pb2.pyi index ed1b99312..ad76da076 100644 --- a/sdk/dir-py/agntcy/dir/routing/v1/record_query_pb2.pyi +++ b/sdk/dir-py/agntcy/dir/routing/v1/record_query_pb2.pyi @@ -1,27 +1,27 @@ -from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union - -DESCRIPTOR: _descriptor.FileDescriptor - -class RecordQueryType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = () - RECORD_QUERY_TYPE_UNSPECIFIED: _ClassVar[RecordQueryType] - RECORD_QUERY_TYPE_SKILL: _ClassVar[RecordQueryType] - RECORD_QUERY_TYPE_LOCATOR: _ClassVar[RecordQueryType] - RECORD_QUERY_TYPE_DOMAIN: _ClassVar[RecordQueryType] - RECORD_QUERY_TYPE_MODULE: _ClassVar[RecordQueryType] -RECORD_QUERY_TYPE_UNSPECIFIED: RecordQueryType -RECORD_QUERY_TYPE_SKILL: RecordQueryType -RECORD_QUERY_TYPE_LOCATOR: RecordQueryType -RECORD_QUERY_TYPE_DOMAIN: RecordQueryType -RECORD_QUERY_TYPE_MODULE: RecordQueryType - -class RecordQuery(_message.Message): - __slots__ = ("type", "value") - TYPE_FIELD_NUMBER: _ClassVar[int] - VALUE_FIELD_NUMBER: _ClassVar[int] - type: RecordQueryType - value: str - def __init__(self, type: _Optional[_Union[RecordQueryType, str]] = ..., value: _Optional[str] = ...) -> None: ... +from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class RecordQueryType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + RECORD_QUERY_TYPE_UNSPECIFIED: _ClassVar[RecordQueryType] + RECORD_QUERY_TYPE_SKILL: _ClassVar[RecordQueryType] + RECORD_QUERY_TYPE_LOCATOR: _ClassVar[RecordQueryType] + RECORD_QUERY_TYPE_DOMAIN: _ClassVar[RecordQueryType] + RECORD_QUERY_TYPE_MODULE: _ClassVar[RecordQueryType] +RECORD_QUERY_TYPE_UNSPECIFIED: RecordQueryType +RECORD_QUERY_TYPE_SKILL: RecordQueryType +RECORD_QUERY_TYPE_LOCATOR: RecordQueryType +RECORD_QUERY_TYPE_DOMAIN: RecordQueryType +RECORD_QUERY_TYPE_MODULE: RecordQueryType + +class RecordQuery(_message.Message): + __slots__ = ("type", "value") + TYPE_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + type: RecordQueryType + value: str + def __init__(self, type: _Optional[_Union[RecordQueryType, str]] = ..., value: _Optional[str] = ...) -> None: ... diff --git a/sdk/dir-py/agntcy/dir/routing/v1/record_query_pb2_grpc.py b/sdk/dir-py/agntcy/dir/routing/v1/record_query_pb2_grpc.py index 2daafffeb..910a4354e 100644 --- a/sdk/dir-py/agntcy/dir/routing/v1/record_query_pb2_grpc.py +++ b/sdk/dir-py/agntcy/dir/routing/v1/record_query_pb2_grpc.py @@ -1,4 +1,4 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/sdk/dir-py/agntcy/dir/routing/v1/routing_service_pb2.py b/sdk/dir-py/agntcy/dir/routing/v1/routing_service_pb2.py index 24d58faa6..3e4c50733 100644 --- a/sdk/dir-py/agntcy/dir/routing/v1/routing_service_pb2.py +++ b/sdk/dir-py/agntcy/dir/routing/v1/routing_service_pb2.py @@ -1,58 +1,58 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: agntcy/dir/routing/v1/routing_service.proto -# Protobuf Python Version: 6.32.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 32, - 1, - '', - 'agntcy/dir/routing/v1/routing_service.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from agntcy.dir.core.v1 import record_pb2 as agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2 -from agntcy.dir.routing.v1 import peer_pb2 as agntcy_dot_dir_dot_routing_dot_v1_dot_peer__pb2 -from agntcy.dir.routing.v1 import record_query_pb2 as agntcy_dot_dir_dot_routing_dot_v1_dot_record__query__pb2 -from agntcy.dir.search.v1 import record_query_pb2 as agntcy_dot_dir_dot_search_dot_v1_dot_record__query__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n+agntcy/dir/routing/v1/routing_service.proto\x12\x15\x61gntcy.dir.routing.v1\x1a\x1f\x61gntcy/dir/core/v1/record.proto\x1a agntcy/dir/routing/v1/peer.proto\x1a(agntcy/dir/routing/v1/record_query.proto\x1a\'agntcy/dir/search/v1/record_query.proto\x1a\x1bgoogle/protobuf/empty.proto\"\xa3\x01\n\x0ePublishRequest\x12\x44\n\x0brecord_refs\x18\x01 \x01(\x0b\x32!.agntcy.dir.routing.v1.RecordRefsH\x00R\nrecordRefs\x12@\n\x07queries\x18\x02 \x01(\x0b\x32$.agntcy.dir.routing.v1.RecordQueriesH\x00R\x07queriesB\t\n\x07request\"\xa5\x01\n\x10UnpublishRequest\x12\x44\n\x0brecord_refs\x18\x01 \x01(\x0b\x32!.agntcy.dir.routing.v1.RecordRefsH\x00R\nrecordRefs\x12@\n\x07queries\x18\x02 \x01(\x0b\x32$.agntcy.dir.routing.v1.RecordQueriesH\x00R\x07queriesB\t\n\x07request\"?\n\nRecordRefs\x12\x31\n\x04refs\x18\x01 \x03(\x0b\x32\x1d.agntcy.dir.core.v1.RecordRefR\x04refs\"L\n\rRecordQueries\x12;\n\x07queries\x18\x01 \x03(\x0b\x32!.agntcy.dir.search.v1.RecordQueryR\x07queries\"\xb3\x01\n\rSearchRequest\x12<\n\x07queries\x18\x01 \x03(\x0b\x32\".agntcy.dir.routing.v1.RecordQueryR\x07queries\x12+\n\x0fmin_match_score\x18\x02 \x01(\rH\x00R\rminMatchScore\x88\x01\x01\x12\x19\n\x05limit\x18\x03 \x01(\rH\x01R\x05limit\x88\x01\x01\x42\x12\n\x10_min_match_scoreB\x08\n\x06_limit\"\xe9\x01\n\x0eSearchResponse\x12<\n\nrecord_ref\x18\x01 \x01(\x0b\x32\x1d.agntcy.dir.core.v1.RecordRefR\trecordRef\x12/\n\x04peer\x18\x02 \x01(\x0b\x32\x1b.agntcy.dir.routing.v1.PeerR\x04peer\x12G\n\rmatch_queries\x18\x03 \x03(\x0b\x32\".agntcy.dir.routing.v1.RecordQueryR\x0cmatchQueries\x12\x1f\n\x0bmatch_score\x18\x04 \x01(\rR\nmatchScore\"p\n\x0bListRequest\x12<\n\x07queries\x18\x01 \x03(\x0b\x32\".agntcy.dir.routing.v1.RecordQueryR\x07queries\x12\x19\n\x05limit\x18\x02 \x01(\rH\x00R\x05limit\x88\x01\x01\x42\x08\n\x06_limit\"d\n\x0cListResponse\x12<\n\nrecord_ref\x18\x01 \x01(\x0b\x32\x1d.agntcy.dir.core.v1.RecordRefR\trecordRef\x12\x16\n\x06labels\x18\x02 \x03(\tR\x06labels2\xd4\x02\n\x0eRoutingService\x12H\n\x07Publish\x12%.agntcy.dir.routing.v1.PublishRequest\x1a\x16.google.protobuf.Empty\x12L\n\tUnpublish\x12\'.agntcy.dir.routing.v1.UnpublishRequest\x1a\x16.google.protobuf.Empty\x12W\n\x06Search\x12$.agntcy.dir.routing.v1.SearchRequest\x1a%.agntcy.dir.routing.v1.SearchResponse0\x01\x12Q\n\x04List\x12\".agntcy.dir.routing.v1.ListRequest\x1a#.agntcy.dir.routing.v1.ListResponse0\x01\x42\xcd\x01\n\x19\x63om.agntcy.dir.routing.v1B\x13RoutingServiceProtoP\x01Z$github.com/agntcy/dir/api/routing/v1\xa2\x02\x03\x41\x44R\xaa\x02\x15\x41gntcy.Dir.Routing.V1\xca\x02\x15\x41gntcy\\Dir\\Routing\\V1\xe2\x02!Agntcy\\Dir\\Routing\\V1\\GPBMetadata\xea\x02\x18\x41gntcy::Dir::Routing::V1b\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.routing.v1.routing_service_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'\n\031com.agntcy.dir.routing.v1B\023RoutingServiceProtoP\001Z$github.com/agntcy/dir/api/routing/v1\242\002\003ADR\252\002\025Agntcy.Dir.Routing.V1\312\002\025Agntcy\\Dir\\Routing\\V1\342\002!Agntcy\\Dir\\Routing\\V1\\GPBMetadata\352\002\030Agntcy::Dir::Routing::V1' - _globals['_PUBLISHREQUEST']._serialized_start=250 - _globals['_PUBLISHREQUEST']._serialized_end=413 - _globals['_UNPUBLISHREQUEST']._serialized_start=416 - _globals['_UNPUBLISHREQUEST']._serialized_end=581 - _globals['_RECORDREFS']._serialized_start=583 - _globals['_RECORDREFS']._serialized_end=646 - _globals['_RECORDQUERIES']._serialized_start=648 - _globals['_RECORDQUERIES']._serialized_end=724 - _globals['_SEARCHREQUEST']._serialized_start=727 - _globals['_SEARCHREQUEST']._serialized_end=906 - _globals['_SEARCHRESPONSE']._serialized_start=909 - _globals['_SEARCHRESPONSE']._serialized_end=1142 - _globals['_LISTREQUEST']._serialized_start=1144 - _globals['_LISTREQUEST']._serialized_end=1256 - _globals['_LISTRESPONSE']._serialized_start=1258 - _globals['_LISTRESPONSE']._serialized_end=1358 - _globals['_ROUTINGSERVICE']._serialized_start=1361 - _globals['_ROUTINGSERVICE']._serialized_end=1701 -# @@protoc_insertion_point(module_scope) +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: agntcy/dir/routing/v1/routing_service.proto +# Protobuf Python Version: 6.32.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 6, + 32, + 1, + '', + 'agntcy/dir/routing/v1/routing_service.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from agntcy.dir.core.v1 import record_pb2 as agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2 +from agntcy.dir.routing.v1 import peer_pb2 as agntcy_dot_dir_dot_routing_dot_v1_dot_peer__pb2 +from agntcy.dir.routing.v1 import record_query_pb2 as agntcy_dot_dir_dot_routing_dot_v1_dot_record__query__pb2 +from agntcy.dir.search.v1 import record_query_pb2 as agntcy_dot_dir_dot_search_dot_v1_dot_record__query__pb2 +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n+agntcy/dir/routing/v1/routing_service.proto\x12\x15\x61gntcy.dir.routing.v1\x1a\x1f\x61gntcy/dir/core/v1/record.proto\x1a agntcy/dir/routing/v1/peer.proto\x1a(agntcy/dir/routing/v1/record_query.proto\x1a\'agntcy/dir/search/v1/record_query.proto\x1a\x1bgoogle/protobuf/empty.proto\"\xa3\x01\n\x0ePublishRequest\x12\x44\n\x0brecord_refs\x18\x01 \x01(\x0b\x32!.agntcy.dir.routing.v1.RecordRefsH\x00R\nrecordRefs\x12@\n\x07queries\x18\x02 \x01(\x0b\x32$.agntcy.dir.routing.v1.RecordQueriesH\x00R\x07queriesB\t\n\x07request\"\xa5\x01\n\x10UnpublishRequest\x12\x44\n\x0brecord_refs\x18\x01 \x01(\x0b\x32!.agntcy.dir.routing.v1.RecordRefsH\x00R\nrecordRefs\x12@\n\x07queries\x18\x02 \x01(\x0b\x32$.agntcy.dir.routing.v1.RecordQueriesH\x00R\x07queriesB\t\n\x07request\"?\n\nRecordRefs\x12\x31\n\x04refs\x18\x01 \x03(\x0b\x32\x1d.agntcy.dir.core.v1.RecordRefR\x04refs\"L\n\rRecordQueries\x12;\n\x07queries\x18\x01 \x03(\x0b\x32!.agntcy.dir.search.v1.RecordQueryR\x07queries\"\xb3\x01\n\rSearchRequest\x12<\n\x07queries\x18\x01 \x03(\x0b\x32\".agntcy.dir.routing.v1.RecordQueryR\x07queries\x12+\n\x0fmin_match_score\x18\x02 \x01(\rH\x00R\rminMatchScore\x88\x01\x01\x12\x19\n\x05limit\x18\x03 \x01(\rH\x01R\x05limit\x88\x01\x01\x42\x12\n\x10_min_match_scoreB\x08\n\x06_limit\"\xe9\x01\n\x0eSearchResponse\x12<\n\nrecord_ref\x18\x01 \x01(\x0b\x32\x1d.agntcy.dir.core.v1.RecordRefR\trecordRef\x12/\n\x04peer\x18\x02 \x01(\x0b\x32\x1b.agntcy.dir.routing.v1.PeerR\x04peer\x12G\n\rmatch_queries\x18\x03 \x03(\x0b\x32\".agntcy.dir.routing.v1.RecordQueryR\x0cmatchQueries\x12\x1f\n\x0bmatch_score\x18\x04 \x01(\rR\nmatchScore\"p\n\x0bListRequest\x12<\n\x07queries\x18\x01 \x03(\x0b\x32\".agntcy.dir.routing.v1.RecordQueryR\x07queries\x12\x19\n\x05limit\x18\x02 \x01(\rH\x00R\x05limit\x88\x01\x01\x42\x08\n\x06_limit\"d\n\x0cListResponse\x12<\n\nrecord_ref\x18\x01 \x01(\x0b\x32\x1d.agntcy.dir.core.v1.RecordRefR\trecordRef\x12\x16\n\x06labels\x18\x02 \x03(\tR\x06labels2\xd4\x02\n\x0eRoutingService\x12H\n\x07Publish\x12%.agntcy.dir.routing.v1.PublishRequest\x1a\x16.google.protobuf.Empty\x12L\n\tUnpublish\x12\'.agntcy.dir.routing.v1.UnpublishRequest\x1a\x16.google.protobuf.Empty\x12W\n\x06Search\x12$.agntcy.dir.routing.v1.SearchRequest\x1a%.agntcy.dir.routing.v1.SearchResponse0\x01\x12Q\n\x04List\x12\".agntcy.dir.routing.v1.ListRequest\x1a#.agntcy.dir.routing.v1.ListResponse0\x01\x42\xcd\x01\n\x19\x63om.agntcy.dir.routing.v1B\x13RoutingServiceProtoP\x01Z$github.com/agntcy/dir/api/routing/v1\xa2\x02\x03\x41\x44R\xaa\x02\x15\x41gntcy.Dir.Routing.V1\xca\x02\x15\x41gntcy\\Dir\\Routing\\V1\xe2\x02!Agntcy\\Dir\\Routing\\V1\\GPBMetadata\xea\x02\x18\x41gntcy::Dir::Routing::V1b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.routing.v1.routing_service_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\031com.agntcy.dir.routing.v1B\023RoutingServiceProtoP\001Z$github.com/agntcy/dir/api/routing/v1\242\002\003ADR\252\002\025Agntcy.Dir.Routing.V1\312\002\025Agntcy\\Dir\\Routing\\V1\342\002!Agntcy\\Dir\\Routing\\V1\\GPBMetadata\352\002\030Agntcy::Dir::Routing::V1' + _globals['_PUBLISHREQUEST']._serialized_start=250 + _globals['_PUBLISHREQUEST']._serialized_end=413 + _globals['_UNPUBLISHREQUEST']._serialized_start=416 + _globals['_UNPUBLISHREQUEST']._serialized_end=581 + _globals['_RECORDREFS']._serialized_start=583 + _globals['_RECORDREFS']._serialized_end=646 + _globals['_RECORDQUERIES']._serialized_start=648 + _globals['_RECORDQUERIES']._serialized_end=724 + _globals['_SEARCHREQUEST']._serialized_start=727 + _globals['_SEARCHREQUEST']._serialized_end=906 + _globals['_SEARCHRESPONSE']._serialized_start=909 + _globals['_SEARCHRESPONSE']._serialized_end=1142 + _globals['_LISTREQUEST']._serialized_start=1144 + _globals['_LISTREQUEST']._serialized_end=1256 + _globals['_LISTRESPONSE']._serialized_start=1258 + _globals['_LISTRESPONSE']._serialized_end=1358 + _globals['_ROUTINGSERVICE']._serialized_start=1361 + _globals['_ROUTINGSERVICE']._serialized_end=1701 +# @@protoc_insertion_point(module_scope) diff --git a/sdk/dir-py/agntcy/dir/routing/v1/routing_service_pb2.pyi b/sdk/dir-py/agntcy/dir/routing/v1/routing_service_pb2.pyi index 0f6dade26..f7c42fd8c 100644 --- a/sdk/dir-py/agntcy/dir/routing/v1/routing_service_pb2.pyi +++ b/sdk/dir-py/agntcy/dir/routing/v1/routing_service_pb2.pyi @@ -1,77 +1,77 @@ -from agntcy.dir.core.v1 import record_pb2 as _record_pb2 -from agntcy.dir.routing.v1 import peer_pb2 as _peer_pb2 -from agntcy.dir.routing.v1 import record_query_pb2 as _record_query_pb2 -from agntcy.dir.search.v1 import record_query_pb2 as _record_query_pb2_1 -from google.protobuf import empty_pb2 as _empty_pb2 -from google.protobuf.internal import containers as _containers -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union - -DESCRIPTOR: _descriptor.FileDescriptor - -class PublishRequest(_message.Message): - __slots__ = ("record_refs", "queries") - RECORD_REFS_FIELD_NUMBER: _ClassVar[int] - QUERIES_FIELD_NUMBER: _ClassVar[int] - record_refs: RecordRefs - queries: RecordQueries - def __init__(self, record_refs: _Optional[_Union[RecordRefs, _Mapping]] = ..., queries: _Optional[_Union[RecordQueries, _Mapping]] = ...) -> None: ... - -class UnpublishRequest(_message.Message): - __slots__ = ("record_refs", "queries") - RECORD_REFS_FIELD_NUMBER: _ClassVar[int] - QUERIES_FIELD_NUMBER: _ClassVar[int] - record_refs: RecordRefs - queries: RecordQueries - def __init__(self, record_refs: _Optional[_Union[RecordRefs, _Mapping]] = ..., queries: _Optional[_Union[RecordQueries, _Mapping]] = ...) -> None: ... - -class RecordRefs(_message.Message): - __slots__ = ("refs",) - REFS_FIELD_NUMBER: _ClassVar[int] - refs: _containers.RepeatedCompositeFieldContainer[_record_pb2.RecordRef] - def __init__(self, refs: _Optional[_Iterable[_Union[_record_pb2.RecordRef, _Mapping]]] = ...) -> None: ... - -class RecordQueries(_message.Message): - __slots__ = ("queries",) - QUERIES_FIELD_NUMBER: _ClassVar[int] - queries: _containers.RepeatedCompositeFieldContainer[_record_query_pb2_1.RecordQuery] - def __init__(self, queries: _Optional[_Iterable[_Union[_record_query_pb2_1.RecordQuery, _Mapping]]] = ...) -> None: ... - -class SearchRequest(_message.Message): - __slots__ = ("queries", "min_match_score", "limit") - QUERIES_FIELD_NUMBER: _ClassVar[int] - MIN_MATCH_SCORE_FIELD_NUMBER: _ClassVar[int] - LIMIT_FIELD_NUMBER: _ClassVar[int] - queries: _containers.RepeatedCompositeFieldContainer[_record_query_pb2.RecordQuery] - min_match_score: int - limit: int - def __init__(self, queries: _Optional[_Iterable[_Union[_record_query_pb2.RecordQuery, _Mapping]]] = ..., min_match_score: _Optional[int] = ..., limit: _Optional[int] = ...) -> None: ... - -class SearchResponse(_message.Message): - __slots__ = ("record_ref", "peer", "match_queries", "match_score") - RECORD_REF_FIELD_NUMBER: _ClassVar[int] - PEER_FIELD_NUMBER: _ClassVar[int] - MATCH_QUERIES_FIELD_NUMBER: _ClassVar[int] - MATCH_SCORE_FIELD_NUMBER: _ClassVar[int] - record_ref: _record_pb2.RecordRef - peer: _peer_pb2.Peer - match_queries: _containers.RepeatedCompositeFieldContainer[_record_query_pb2.RecordQuery] - match_score: int - def __init__(self, record_ref: _Optional[_Union[_record_pb2.RecordRef, _Mapping]] = ..., peer: _Optional[_Union[_peer_pb2.Peer, _Mapping]] = ..., match_queries: _Optional[_Iterable[_Union[_record_query_pb2.RecordQuery, _Mapping]]] = ..., match_score: _Optional[int] = ...) -> None: ... - -class ListRequest(_message.Message): - __slots__ = ("queries", "limit") - QUERIES_FIELD_NUMBER: _ClassVar[int] - LIMIT_FIELD_NUMBER: _ClassVar[int] - queries: _containers.RepeatedCompositeFieldContainer[_record_query_pb2.RecordQuery] - limit: int - def __init__(self, queries: _Optional[_Iterable[_Union[_record_query_pb2.RecordQuery, _Mapping]]] = ..., limit: _Optional[int] = ...) -> None: ... - -class ListResponse(_message.Message): - __slots__ = ("record_ref", "labels") - RECORD_REF_FIELD_NUMBER: _ClassVar[int] - LABELS_FIELD_NUMBER: _ClassVar[int] - record_ref: _record_pb2.RecordRef - labels: _containers.RepeatedScalarFieldContainer[str] - def __init__(self, record_ref: _Optional[_Union[_record_pb2.RecordRef, _Mapping]] = ..., labels: _Optional[_Iterable[str]] = ...) -> None: ... +from agntcy.dir.core.v1 import record_pb2 as _record_pb2 +from agntcy.dir.routing.v1 import peer_pb2 as _peer_pb2 +from agntcy.dir.routing.v1 import record_query_pb2 as _record_query_pb2 +from agntcy.dir.search.v1 import record_query_pb2 as _record_query_pb2_1 +from google.protobuf import empty_pb2 as _empty_pb2 +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class PublishRequest(_message.Message): + __slots__ = ("record_refs", "queries") + RECORD_REFS_FIELD_NUMBER: _ClassVar[int] + QUERIES_FIELD_NUMBER: _ClassVar[int] + record_refs: RecordRefs + queries: RecordQueries + def __init__(self, record_refs: _Optional[_Union[RecordRefs, _Mapping]] = ..., queries: _Optional[_Union[RecordQueries, _Mapping]] = ...) -> None: ... + +class UnpublishRequest(_message.Message): + __slots__ = ("record_refs", "queries") + RECORD_REFS_FIELD_NUMBER: _ClassVar[int] + QUERIES_FIELD_NUMBER: _ClassVar[int] + record_refs: RecordRefs + queries: RecordQueries + def __init__(self, record_refs: _Optional[_Union[RecordRefs, _Mapping]] = ..., queries: _Optional[_Union[RecordQueries, _Mapping]] = ...) -> None: ... + +class RecordRefs(_message.Message): + __slots__ = ("refs",) + REFS_FIELD_NUMBER: _ClassVar[int] + refs: _containers.RepeatedCompositeFieldContainer[_record_pb2.RecordRef] + def __init__(self, refs: _Optional[_Iterable[_Union[_record_pb2.RecordRef, _Mapping]]] = ...) -> None: ... + +class RecordQueries(_message.Message): + __slots__ = ("queries",) + QUERIES_FIELD_NUMBER: _ClassVar[int] + queries: _containers.RepeatedCompositeFieldContainer[_record_query_pb2_1.RecordQuery] + def __init__(self, queries: _Optional[_Iterable[_Union[_record_query_pb2_1.RecordQuery, _Mapping]]] = ...) -> None: ... + +class SearchRequest(_message.Message): + __slots__ = ("queries", "min_match_score", "limit") + QUERIES_FIELD_NUMBER: _ClassVar[int] + MIN_MATCH_SCORE_FIELD_NUMBER: _ClassVar[int] + LIMIT_FIELD_NUMBER: _ClassVar[int] + queries: _containers.RepeatedCompositeFieldContainer[_record_query_pb2.RecordQuery] + min_match_score: int + limit: int + def __init__(self, queries: _Optional[_Iterable[_Union[_record_query_pb2.RecordQuery, _Mapping]]] = ..., min_match_score: _Optional[int] = ..., limit: _Optional[int] = ...) -> None: ... + +class SearchResponse(_message.Message): + __slots__ = ("record_ref", "peer", "match_queries", "match_score") + RECORD_REF_FIELD_NUMBER: _ClassVar[int] + PEER_FIELD_NUMBER: _ClassVar[int] + MATCH_QUERIES_FIELD_NUMBER: _ClassVar[int] + MATCH_SCORE_FIELD_NUMBER: _ClassVar[int] + record_ref: _record_pb2.RecordRef + peer: _peer_pb2.Peer + match_queries: _containers.RepeatedCompositeFieldContainer[_record_query_pb2.RecordQuery] + match_score: int + def __init__(self, record_ref: _Optional[_Union[_record_pb2.RecordRef, _Mapping]] = ..., peer: _Optional[_Union[_peer_pb2.Peer, _Mapping]] = ..., match_queries: _Optional[_Iterable[_Union[_record_query_pb2.RecordQuery, _Mapping]]] = ..., match_score: _Optional[int] = ...) -> None: ... + +class ListRequest(_message.Message): + __slots__ = ("queries", "limit") + QUERIES_FIELD_NUMBER: _ClassVar[int] + LIMIT_FIELD_NUMBER: _ClassVar[int] + queries: _containers.RepeatedCompositeFieldContainer[_record_query_pb2.RecordQuery] + limit: int + def __init__(self, queries: _Optional[_Iterable[_Union[_record_query_pb2.RecordQuery, _Mapping]]] = ..., limit: _Optional[int] = ...) -> None: ... + +class ListResponse(_message.Message): + __slots__ = ("record_ref", "labels") + RECORD_REF_FIELD_NUMBER: _ClassVar[int] + LABELS_FIELD_NUMBER: _ClassVar[int] + record_ref: _record_pb2.RecordRef + labels: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, record_ref: _Optional[_Union[_record_pb2.RecordRef, _Mapping]] = ..., labels: _Optional[_Iterable[str]] = ...) -> None: ... diff --git a/sdk/dir-py/agntcy/dir/routing/v1/routing_service_pb2_grpc.py b/sdk/dir-py/agntcy/dir/routing/v1/routing_service_pb2_grpc.py index 0a6465f81..addf35636 100644 --- a/sdk/dir-py/agntcy/dir/routing/v1/routing_service_pb2_grpc.py +++ b/sdk/dir-py/agntcy/dir/routing/v1/routing_service_pb2_grpc.py @@ -1,242 +1,242 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from agntcy.dir.routing.v1 import routing_service_pb2 as agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class RoutingServiceStub(object): - """Defines an interface for announcement and discovery - of records across interconnected network. - - Middleware should be used to control who can perform these RPCs. - Policies for the middleware can be handled via separate service. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.Publish = channel.unary_unary( - '/agntcy.dir.routing.v1.RoutingService/Publish', - request_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.PublishRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - _registered_method=True) - self.Unpublish = channel.unary_unary( - '/agntcy.dir.routing.v1.RoutingService/Unpublish', - request_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.UnpublishRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - _registered_method=True) - self.Search = channel.unary_stream( - '/agntcy.dir.routing.v1.RoutingService/Search', - request_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.SearchRequest.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.SearchResponse.FromString, - _registered_method=True) - self.List = channel.unary_stream( - '/agntcy.dir.routing.v1.RoutingService/List', - request_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.ListRequest.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.ListResponse.FromString, - _registered_method=True) - - -class RoutingServiceServicer(object): - """Defines an interface for announcement and discovery - of records across interconnected network. - - Middleware should be used to control who can perform these RPCs. - Policies for the middleware can be handled via separate service. - """ - - def Publish(self, request, context): - """Announce to the network that this peer is providing a given record. - This enables other peers to discover this record and retrieve it - from this peer. Listeners can use this event to perform custom operations, - for example by cloning the record. - - Items need to be periodically republished (eg. 24h) to the network - to avoid stale data. Republication should be done in the background. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def Unpublish(self, request, context): - """Stop serving this record to the network. If other peers try - to retrieve this record, the peer will refuse the request. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def Search(self, request, context): - """Search records based on the request across the network. - This will search the network for the record with the given parameters. - - It is possible that the records are stale or that they do not exist. - Some records may be provided by multiple peers. - - Results from the search can be used as an input - to Pull operation to retrieve the records. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def List(self, request, context): - """List all records that this peer is currently providing - that match the given parameters. - This operation does not interact with the network. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_RoutingServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - 'Publish': grpc.unary_unary_rpc_method_handler( - servicer.Publish, - request_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.PublishRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - 'Unpublish': grpc.unary_unary_rpc_method_handler( - servicer.Unpublish, - request_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.UnpublishRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - 'Search': grpc.unary_stream_rpc_method_handler( - servicer.Search, - request_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.SearchRequest.FromString, - response_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.SearchResponse.SerializeToString, - ), - 'List': grpc.unary_stream_rpc_method_handler( - servicer.List, - request_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.ListRequest.FromString, - response_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.ListResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'agntcy.dir.routing.v1.RoutingService', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - server.add_registered_method_handlers('agntcy.dir.routing.v1.RoutingService', rpc_method_handlers) - - - # This class is part of an EXPERIMENTAL API. -class RoutingService(object): - """Defines an interface for announcement and discovery - of records across interconnected network. - - Middleware should be used to control who can perform these RPCs. - Policies for the middleware can be handled via separate service. - """ - - @staticmethod - def Publish(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/agntcy.dir.routing.v1.RoutingService/Publish', - agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.PublishRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def Unpublish(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/agntcy.dir.routing.v1.RoutingService/Unpublish', - agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.UnpublishRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def Search(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_stream( - request, - target, - '/agntcy.dir.routing.v1.RoutingService/Search', - agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.SearchRequest.SerializeToString, - agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.SearchResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def List(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_stream( - request, - target, - '/agntcy.dir.routing.v1.RoutingService/List', - agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.ListRequest.SerializeToString, - agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.ListResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from agntcy.dir.routing.v1 import routing_service_pb2 as agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2 +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 + + +class RoutingServiceStub(object): + """Defines an interface for announcement and discovery + of records across interconnected network. + + Middleware should be used to control who can perform these RPCs. + Policies for the middleware can be handled via separate service. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.Publish = channel.unary_unary( + '/agntcy.dir.routing.v1.RoutingService/Publish', + request_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.PublishRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + _registered_method=True) + self.Unpublish = channel.unary_unary( + '/agntcy.dir.routing.v1.RoutingService/Unpublish', + request_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.UnpublishRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + _registered_method=True) + self.Search = channel.unary_stream( + '/agntcy.dir.routing.v1.RoutingService/Search', + request_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.SearchRequest.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.SearchResponse.FromString, + _registered_method=True) + self.List = channel.unary_stream( + '/agntcy.dir.routing.v1.RoutingService/List', + request_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.ListRequest.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.ListResponse.FromString, + _registered_method=True) + + +class RoutingServiceServicer(object): + """Defines an interface for announcement and discovery + of records across interconnected network. + + Middleware should be used to control who can perform these RPCs. + Policies for the middleware can be handled via separate service. + """ + + def Publish(self, request, context): + """Announce to the network that this peer is providing a given record. + This enables other peers to discover this record and retrieve it + from this peer. Listeners can use this event to perform custom operations, + for example by cloning the record. + + Items need to be periodically republished (eg. 24h) to the network + to avoid stale data. Republication should be done in the background. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Unpublish(self, request, context): + """Stop serving this record to the network. If other peers try + to retrieve this record, the peer will refuse the request. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Search(self, request, context): + """Search records based on the request across the network. + This will search the network for the record with the given parameters. + + It is possible that the records are stale or that they do not exist. + Some records may be provided by multiple peers. + + Results from the search can be used as an input + to Pull operation to retrieve the records. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def List(self, request, context): + """List all records that this peer is currently providing + that match the given parameters. + This operation does not interact with the network. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_RoutingServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'Publish': grpc.unary_unary_rpc_method_handler( + servicer.Publish, + request_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.PublishRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + 'Unpublish': grpc.unary_unary_rpc_method_handler( + servicer.Unpublish, + request_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.UnpublishRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + 'Search': grpc.unary_stream_rpc_method_handler( + servicer.Search, + request_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.SearchRequest.FromString, + response_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.SearchResponse.SerializeToString, + ), + 'List': grpc.unary_stream_rpc_method_handler( + servicer.List, + request_deserializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.ListRequest.FromString, + response_serializer=agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.ListResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'agntcy.dir.routing.v1.RoutingService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('agntcy.dir.routing.v1.RoutingService', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class RoutingService(object): + """Defines an interface for announcement and discovery + of records across interconnected network. + + Middleware should be used to control who can perform these RPCs. + Policies for the middleware can be handled via separate service. + """ + + @staticmethod + def Publish(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/agntcy.dir.routing.v1.RoutingService/Publish', + agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.PublishRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Unpublish(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/agntcy.dir.routing.v1.RoutingService/Unpublish', + agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.UnpublishRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Search(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream( + request, + target, + '/agntcy.dir.routing.v1.RoutingService/Search', + agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.SearchRequest.SerializeToString, + agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.SearchResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def List(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream( + request, + target, + '/agntcy.dir.routing.v1.RoutingService/List', + agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.ListRequest.SerializeToString, + agntcy_dot_dir_dot_routing_dot_v1_dot_routing__service__pb2.ListResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/sdk/dir-py/agntcy/dir/search/v1/record_query_pb2.py b/sdk/dir-py/agntcy/dir/search/v1/record_query_pb2.py index f48f7d378..ff19776c8 100644 --- a/sdk/dir-py/agntcy/dir/search/v1/record_query_pb2.py +++ b/sdk/dir-py/agntcy/dir/search/v1/record_query_pb2.py @@ -1,39 +1,39 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: agntcy/dir/search/v1/record_query.proto -# Protobuf Python Version: 6.32.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 32, - 1, - '', - 'agntcy/dir/search/v1/record_query.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\'agntcy/dir/search/v1/record_query.proto\x12\x14\x61gntcy.dir.search.v1\"^\n\x0bRecordQuery\x12\x39\n\x04type\x18\x01 \x01(\x0e\x32%.agntcy.dir.search.v1.RecordQueryTypeR\x04type\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value*\xbe\x03\n\x0fRecordQueryType\x12!\n\x1dRECORD_QUERY_TYPE_UNSPECIFIED\x10\x00\x12\x1a\n\x16RECORD_QUERY_TYPE_NAME\x10\x01\x12\x1d\n\x19RECORD_QUERY_TYPE_VERSION\x10\x02\x12\x1e\n\x1aRECORD_QUERY_TYPE_SKILL_ID\x10\x03\x12 \n\x1cRECORD_QUERY_TYPE_SKILL_NAME\x10\x04\x12\x1d\n\x19RECORD_QUERY_TYPE_LOCATOR\x10\x05\x12!\n\x1dRECORD_QUERY_TYPE_MODULE_NAME\x10\x06\x12\x1f\n\x1bRECORD_QUERY_TYPE_DOMAIN_ID\x10\x07\x12!\n\x1dRECORD_QUERY_TYPE_DOMAIN_NAME\x10\x08\x12 \n\x1cRECORD_QUERY_TYPE_CREATED_AT\x10\t\x12\x1c\n\x18RECORD_QUERY_TYPE_AUTHOR\x10\n\x12$\n RECORD_QUERY_TYPE_SCHEMA_VERSION\x10\x0b\x12\x1f\n\x1bRECORD_QUERY_TYPE_MODULE_ID\x10\x0c\x42\xc4\x01\n\x18\x63om.agntcy.dir.search.v1B\x10RecordQueryProtoP\x01Z#github.com/agntcy/dir/api/search/v1\xa2\x02\x03\x41\x44S\xaa\x02\x14\x41gntcy.Dir.Search.V1\xca\x02\x14\x41gntcy\\Dir\\Search\\V1\xe2\x02 Agntcy\\Dir\\Search\\V1\\GPBMetadata\xea\x02\x17\x41gntcy::Dir::Search::V1b\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.search.v1.record_query_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'\n\030com.agntcy.dir.search.v1B\020RecordQueryProtoP\001Z#github.com/agntcy/dir/api/search/v1\242\002\003ADS\252\002\024Agntcy.Dir.Search.V1\312\002\024Agntcy\\Dir\\Search\\V1\342\002 Agntcy\\Dir\\Search\\V1\\GPBMetadata\352\002\027Agntcy::Dir::Search::V1' - _globals['_RECORDQUERYTYPE']._serialized_start=162 - _globals['_RECORDQUERYTYPE']._serialized_end=608 - _globals['_RECORDQUERY']._serialized_start=65 - _globals['_RECORDQUERY']._serialized_end=159 -# @@protoc_insertion_point(module_scope) +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: agntcy/dir/search/v1/record_query.proto +# Protobuf Python Version: 6.32.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 6, + 32, + 1, + '', + 'agntcy/dir/search/v1/record_query.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\'agntcy/dir/search/v1/record_query.proto\x12\x14\x61gntcy.dir.search.v1\"^\n\x0bRecordQuery\x12\x39\n\x04type\x18\x01 \x01(\x0e\x32%.agntcy.dir.search.v1.RecordQueryTypeR\x04type\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value*\xbe\x03\n\x0fRecordQueryType\x12!\n\x1dRECORD_QUERY_TYPE_UNSPECIFIED\x10\x00\x12\x1a\n\x16RECORD_QUERY_TYPE_NAME\x10\x01\x12\x1d\n\x19RECORD_QUERY_TYPE_VERSION\x10\x02\x12\x1e\n\x1aRECORD_QUERY_TYPE_SKILL_ID\x10\x03\x12 \n\x1cRECORD_QUERY_TYPE_SKILL_NAME\x10\x04\x12\x1d\n\x19RECORD_QUERY_TYPE_LOCATOR\x10\x05\x12!\n\x1dRECORD_QUERY_TYPE_MODULE_NAME\x10\x06\x12\x1f\n\x1bRECORD_QUERY_TYPE_DOMAIN_ID\x10\x07\x12!\n\x1dRECORD_QUERY_TYPE_DOMAIN_NAME\x10\x08\x12 \n\x1cRECORD_QUERY_TYPE_CREATED_AT\x10\t\x12\x1c\n\x18RECORD_QUERY_TYPE_AUTHOR\x10\n\x12$\n RECORD_QUERY_TYPE_SCHEMA_VERSION\x10\x0b\x12\x1f\n\x1bRECORD_QUERY_TYPE_MODULE_ID\x10\x0c\x42\xc4\x01\n\x18\x63om.agntcy.dir.search.v1B\x10RecordQueryProtoP\x01Z#github.com/agntcy/dir/api/search/v1\xa2\x02\x03\x41\x44S\xaa\x02\x14\x41gntcy.Dir.Search.V1\xca\x02\x14\x41gntcy\\Dir\\Search\\V1\xe2\x02 Agntcy\\Dir\\Search\\V1\\GPBMetadata\xea\x02\x17\x41gntcy::Dir::Search::V1b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.search.v1.record_query_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\030com.agntcy.dir.search.v1B\020RecordQueryProtoP\001Z#github.com/agntcy/dir/api/search/v1\242\002\003ADS\252\002\024Agntcy.Dir.Search.V1\312\002\024Agntcy\\Dir\\Search\\V1\342\002 Agntcy\\Dir\\Search\\V1\\GPBMetadata\352\002\027Agntcy::Dir::Search::V1' + _globals['_RECORDQUERYTYPE']._serialized_start=162 + _globals['_RECORDQUERYTYPE']._serialized_end=608 + _globals['_RECORDQUERY']._serialized_start=65 + _globals['_RECORDQUERY']._serialized_end=159 +# @@protoc_insertion_point(module_scope) diff --git a/sdk/dir-py/agntcy/dir/search/v1/record_query_pb2.pyi b/sdk/dir-py/agntcy/dir/search/v1/record_query_pb2.pyi index 861caf47e..6a312b242 100644 --- a/sdk/dir-py/agntcy/dir/search/v1/record_query_pb2.pyi +++ b/sdk/dir-py/agntcy/dir/search/v1/record_query_pb2.pyi @@ -1,43 +1,43 @@ -from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union - -DESCRIPTOR: _descriptor.FileDescriptor - -class RecordQueryType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = () - RECORD_QUERY_TYPE_UNSPECIFIED: _ClassVar[RecordQueryType] - RECORD_QUERY_TYPE_NAME: _ClassVar[RecordQueryType] - RECORD_QUERY_TYPE_VERSION: _ClassVar[RecordQueryType] - RECORD_QUERY_TYPE_SKILL_ID: _ClassVar[RecordQueryType] - RECORD_QUERY_TYPE_SKILL_NAME: _ClassVar[RecordQueryType] - RECORD_QUERY_TYPE_LOCATOR: _ClassVar[RecordQueryType] - RECORD_QUERY_TYPE_MODULE_NAME: _ClassVar[RecordQueryType] - RECORD_QUERY_TYPE_DOMAIN_ID: _ClassVar[RecordQueryType] - RECORD_QUERY_TYPE_DOMAIN_NAME: _ClassVar[RecordQueryType] - RECORD_QUERY_TYPE_CREATED_AT: _ClassVar[RecordQueryType] - RECORD_QUERY_TYPE_AUTHOR: _ClassVar[RecordQueryType] - RECORD_QUERY_TYPE_SCHEMA_VERSION: _ClassVar[RecordQueryType] - RECORD_QUERY_TYPE_MODULE_ID: _ClassVar[RecordQueryType] -RECORD_QUERY_TYPE_UNSPECIFIED: RecordQueryType -RECORD_QUERY_TYPE_NAME: RecordQueryType -RECORD_QUERY_TYPE_VERSION: RecordQueryType -RECORD_QUERY_TYPE_SKILL_ID: RecordQueryType -RECORD_QUERY_TYPE_SKILL_NAME: RecordQueryType -RECORD_QUERY_TYPE_LOCATOR: RecordQueryType -RECORD_QUERY_TYPE_MODULE_NAME: RecordQueryType -RECORD_QUERY_TYPE_DOMAIN_ID: RecordQueryType -RECORD_QUERY_TYPE_DOMAIN_NAME: RecordQueryType -RECORD_QUERY_TYPE_CREATED_AT: RecordQueryType -RECORD_QUERY_TYPE_AUTHOR: RecordQueryType -RECORD_QUERY_TYPE_SCHEMA_VERSION: RecordQueryType -RECORD_QUERY_TYPE_MODULE_ID: RecordQueryType - -class RecordQuery(_message.Message): - __slots__ = ("type", "value") - TYPE_FIELD_NUMBER: _ClassVar[int] - VALUE_FIELD_NUMBER: _ClassVar[int] - type: RecordQueryType - value: str - def __init__(self, type: _Optional[_Union[RecordQueryType, str]] = ..., value: _Optional[str] = ...) -> None: ... +from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class RecordQueryType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + RECORD_QUERY_TYPE_UNSPECIFIED: _ClassVar[RecordQueryType] + RECORD_QUERY_TYPE_NAME: _ClassVar[RecordQueryType] + RECORD_QUERY_TYPE_VERSION: _ClassVar[RecordQueryType] + RECORD_QUERY_TYPE_SKILL_ID: _ClassVar[RecordQueryType] + RECORD_QUERY_TYPE_SKILL_NAME: _ClassVar[RecordQueryType] + RECORD_QUERY_TYPE_LOCATOR: _ClassVar[RecordQueryType] + RECORD_QUERY_TYPE_MODULE_NAME: _ClassVar[RecordQueryType] + RECORD_QUERY_TYPE_DOMAIN_ID: _ClassVar[RecordQueryType] + RECORD_QUERY_TYPE_DOMAIN_NAME: _ClassVar[RecordQueryType] + RECORD_QUERY_TYPE_CREATED_AT: _ClassVar[RecordQueryType] + RECORD_QUERY_TYPE_AUTHOR: _ClassVar[RecordQueryType] + RECORD_QUERY_TYPE_SCHEMA_VERSION: _ClassVar[RecordQueryType] + RECORD_QUERY_TYPE_MODULE_ID: _ClassVar[RecordQueryType] +RECORD_QUERY_TYPE_UNSPECIFIED: RecordQueryType +RECORD_QUERY_TYPE_NAME: RecordQueryType +RECORD_QUERY_TYPE_VERSION: RecordQueryType +RECORD_QUERY_TYPE_SKILL_ID: RecordQueryType +RECORD_QUERY_TYPE_SKILL_NAME: RecordQueryType +RECORD_QUERY_TYPE_LOCATOR: RecordQueryType +RECORD_QUERY_TYPE_MODULE_NAME: RecordQueryType +RECORD_QUERY_TYPE_DOMAIN_ID: RecordQueryType +RECORD_QUERY_TYPE_DOMAIN_NAME: RecordQueryType +RECORD_QUERY_TYPE_CREATED_AT: RecordQueryType +RECORD_QUERY_TYPE_AUTHOR: RecordQueryType +RECORD_QUERY_TYPE_SCHEMA_VERSION: RecordQueryType +RECORD_QUERY_TYPE_MODULE_ID: RecordQueryType + +class RecordQuery(_message.Message): + __slots__ = ("type", "value") + TYPE_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + type: RecordQueryType + value: str + def __init__(self, type: _Optional[_Union[RecordQueryType, str]] = ..., value: _Optional[str] = ...) -> None: ... diff --git a/sdk/dir-py/agntcy/dir/search/v1/record_query_pb2_grpc.py b/sdk/dir-py/agntcy/dir/search/v1/record_query_pb2_grpc.py index 2daafffeb..910a4354e 100644 --- a/sdk/dir-py/agntcy/dir/search/v1/record_query_pb2_grpc.py +++ b/sdk/dir-py/agntcy/dir/search/v1/record_query_pb2_grpc.py @@ -1,4 +1,4 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/sdk/dir-py/agntcy/dir/search/v1/search_service_pb2.py b/sdk/dir-py/agntcy/dir/search/v1/search_service_pb2.py index 9560e2ad1..584838dc6 100644 --- a/sdk/dir-py/agntcy/dir/search/v1/search_service_pb2.py +++ b/sdk/dir-py/agntcy/dir/search/v1/search_service_pb2.py @@ -1,47 +1,47 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: agntcy/dir/search/v1/search_service.proto -# Protobuf Python Version: 6.32.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 32, - 1, - '', - 'agntcy/dir/search/v1/search_service.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from agntcy.dir.core.v1 import record_pb2 as agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2 -from agntcy.dir.search.v1 import record_query_pb2 as agntcy_dot_dir_dot_search_dot_v1_dot_record__query__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n)agntcy/dir/search/v1/search_service.proto\x12\x14\x61gntcy.dir.search.v1\x1a\x1f\x61gntcy/dir/core/v1/record.proto\x1a\'agntcy/dir/search/v1/record_query.proto\"\x9d\x01\n\x11SearchCIDsRequest\x12;\n\x07queries\x18\x01 \x03(\x0b\x32!.agntcy.dir.search.v1.RecordQueryR\x07queries\x12\x19\n\x05limit\x18\x02 \x01(\rH\x00R\x05limit\x88\x01\x01\x12\x1b\n\x06offset\x18\x03 \x01(\rH\x01R\x06offset\x88\x01\x01\x42\x08\n\x06_limitB\t\n\x07_offset\"\xa0\x01\n\x14SearchRecordsRequest\x12;\n\x07queries\x18\x01 \x03(\x0b\x32!.agntcy.dir.search.v1.RecordQueryR\x07queries\x12\x19\n\x05limit\x18\x02 \x01(\rH\x00R\x05limit\x88\x01\x01\x12\x1b\n\x06offset\x18\x03 \x01(\rH\x01R\x06offset\x88\x01\x01\x42\x08\n\x06_limitB\t\n\x07_offset\"3\n\x12SearchCIDsResponse\x12\x1d\n\nrecord_cid\x18\x01 \x01(\tR\trecordCid\"K\n\x15SearchRecordsResponse\x12\x32\n\x06record\x18\x01 \x01(\x0b\x32\x1a.agntcy.dir.core.v1.RecordR\x06record2\xde\x01\n\rSearchService\x12\x61\n\nSearchCIDs\x12\'.agntcy.dir.search.v1.SearchCIDsRequest\x1a(.agntcy.dir.search.v1.SearchCIDsResponse0\x01\x12j\n\rSearchRecords\x12*.agntcy.dir.search.v1.SearchRecordsRequest\x1a+.agntcy.dir.search.v1.SearchRecordsResponse0\x01\x42\xc6\x01\n\x18\x63om.agntcy.dir.search.v1B\x12SearchServiceProtoP\x01Z#github.com/agntcy/dir/api/search/v1\xa2\x02\x03\x41\x44S\xaa\x02\x14\x41gntcy.Dir.Search.V1\xca\x02\x14\x41gntcy\\Dir\\Search\\V1\xe2\x02 Agntcy\\Dir\\Search\\V1\\GPBMetadata\xea\x02\x17\x41gntcy::Dir::Search::V1b\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.search.v1.search_service_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'\n\030com.agntcy.dir.search.v1B\022SearchServiceProtoP\001Z#github.com/agntcy/dir/api/search/v1\242\002\003ADS\252\002\024Agntcy.Dir.Search.V1\312\002\024Agntcy\\Dir\\Search\\V1\342\002 Agntcy\\Dir\\Search\\V1\\GPBMetadata\352\002\027Agntcy::Dir::Search::V1' - _globals['_SEARCHCIDSREQUEST']._serialized_start=142 - _globals['_SEARCHCIDSREQUEST']._serialized_end=299 - _globals['_SEARCHRECORDSREQUEST']._serialized_start=302 - _globals['_SEARCHRECORDSREQUEST']._serialized_end=462 - _globals['_SEARCHCIDSRESPONSE']._serialized_start=464 - _globals['_SEARCHCIDSRESPONSE']._serialized_end=515 - _globals['_SEARCHRECORDSRESPONSE']._serialized_start=517 - _globals['_SEARCHRECORDSRESPONSE']._serialized_end=592 - _globals['_SEARCHSERVICE']._serialized_start=595 - _globals['_SEARCHSERVICE']._serialized_end=817 -# @@protoc_insertion_point(module_scope) +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: agntcy/dir/search/v1/search_service.proto +# Protobuf Python Version: 6.32.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 6, + 32, + 1, + '', + 'agntcy/dir/search/v1/search_service.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from agntcy.dir.core.v1 import record_pb2 as agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2 +from agntcy.dir.search.v1 import record_query_pb2 as agntcy_dot_dir_dot_search_dot_v1_dot_record__query__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n)agntcy/dir/search/v1/search_service.proto\x12\x14\x61gntcy.dir.search.v1\x1a\x1f\x61gntcy/dir/core/v1/record.proto\x1a\'agntcy/dir/search/v1/record_query.proto\"\x9d\x01\n\x11SearchCIDsRequest\x12;\n\x07queries\x18\x01 \x03(\x0b\x32!.agntcy.dir.search.v1.RecordQueryR\x07queries\x12\x19\n\x05limit\x18\x02 \x01(\rH\x00R\x05limit\x88\x01\x01\x12\x1b\n\x06offset\x18\x03 \x01(\rH\x01R\x06offset\x88\x01\x01\x42\x08\n\x06_limitB\t\n\x07_offset\"\xa0\x01\n\x14SearchRecordsRequest\x12;\n\x07queries\x18\x01 \x03(\x0b\x32!.agntcy.dir.search.v1.RecordQueryR\x07queries\x12\x19\n\x05limit\x18\x02 \x01(\rH\x00R\x05limit\x88\x01\x01\x12\x1b\n\x06offset\x18\x03 \x01(\rH\x01R\x06offset\x88\x01\x01\x42\x08\n\x06_limitB\t\n\x07_offset\"3\n\x12SearchCIDsResponse\x12\x1d\n\nrecord_cid\x18\x01 \x01(\tR\trecordCid\"K\n\x15SearchRecordsResponse\x12\x32\n\x06record\x18\x01 \x01(\x0b\x32\x1a.agntcy.dir.core.v1.RecordR\x06record2\xde\x01\n\rSearchService\x12\x61\n\nSearchCIDs\x12\'.agntcy.dir.search.v1.SearchCIDsRequest\x1a(.agntcy.dir.search.v1.SearchCIDsResponse0\x01\x12j\n\rSearchRecords\x12*.agntcy.dir.search.v1.SearchRecordsRequest\x1a+.agntcy.dir.search.v1.SearchRecordsResponse0\x01\x42\xc6\x01\n\x18\x63om.agntcy.dir.search.v1B\x12SearchServiceProtoP\x01Z#github.com/agntcy/dir/api/search/v1\xa2\x02\x03\x41\x44S\xaa\x02\x14\x41gntcy.Dir.Search.V1\xca\x02\x14\x41gntcy\\Dir\\Search\\V1\xe2\x02 Agntcy\\Dir\\Search\\V1\\GPBMetadata\xea\x02\x17\x41gntcy::Dir::Search::V1b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.search.v1.search_service_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\030com.agntcy.dir.search.v1B\022SearchServiceProtoP\001Z#github.com/agntcy/dir/api/search/v1\242\002\003ADS\252\002\024Agntcy.Dir.Search.V1\312\002\024Agntcy\\Dir\\Search\\V1\342\002 Agntcy\\Dir\\Search\\V1\\GPBMetadata\352\002\027Agntcy::Dir::Search::V1' + _globals['_SEARCHCIDSREQUEST']._serialized_start=142 + _globals['_SEARCHCIDSREQUEST']._serialized_end=299 + _globals['_SEARCHRECORDSREQUEST']._serialized_start=302 + _globals['_SEARCHRECORDSREQUEST']._serialized_end=462 + _globals['_SEARCHCIDSRESPONSE']._serialized_start=464 + _globals['_SEARCHCIDSRESPONSE']._serialized_end=515 + _globals['_SEARCHRECORDSRESPONSE']._serialized_start=517 + _globals['_SEARCHRECORDSRESPONSE']._serialized_end=592 + _globals['_SEARCHSERVICE']._serialized_start=595 + _globals['_SEARCHSERVICE']._serialized_end=817 +# @@protoc_insertion_point(module_scope) diff --git a/sdk/dir-py/agntcy/dir/search/v1/search_service_pb2.pyi b/sdk/dir-py/agntcy/dir/search/v1/search_service_pb2.pyi index 7d87a79a8..03306d526 100644 --- a/sdk/dir-py/agntcy/dir/search/v1/search_service_pb2.pyi +++ b/sdk/dir-py/agntcy/dir/search/v1/search_service_pb2.pyi @@ -1,40 +1,40 @@ -from agntcy.dir.core.v1 import record_pb2 as _record_pb2 -from agntcy.dir.search.v1 import record_query_pb2 as _record_query_pb2 -from google.protobuf.internal import containers as _containers -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union - -DESCRIPTOR: _descriptor.FileDescriptor - -class SearchCIDsRequest(_message.Message): - __slots__ = ("queries", "limit", "offset") - QUERIES_FIELD_NUMBER: _ClassVar[int] - LIMIT_FIELD_NUMBER: _ClassVar[int] - OFFSET_FIELD_NUMBER: _ClassVar[int] - queries: _containers.RepeatedCompositeFieldContainer[_record_query_pb2.RecordQuery] - limit: int - offset: int - def __init__(self, queries: _Optional[_Iterable[_Union[_record_query_pb2.RecordQuery, _Mapping]]] = ..., limit: _Optional[int] = ..., offset: _Optional[int] = ...) -> None: ... - -class SearchRecordsRequest(_message.Message): - __slots__ = ("queries", "limit", "offset") - QUERIES_FIELD_NUMBER: _ClassVar[int] - LIMIT_FIELD_NUMBER: _ClassVar[int] - OFFSET_FIELD_NUMBER: _ClassVar[int] - queries: _containers.RepeatedCompositeFieldContainer[_record_query_pb2.RecordQuery] - limit: int - offset: int - def __init__(self, queries: _Optional[_Iterable[_Union[_record_query_pb2.RecordQuery, _Mapping]]] = ..., limit: _Optional[int] = ..., offset: _Optional[int] = ...) -> None: ... - -class SearchCIDsResponse(_message.Message): - __slots__ = ("record_cid",) - RECORD_CID_FIELD_NUMBER: _ClassVar[int] - record_cid: str - def __init__(self, record_cid: _Optional[str] = ...) -> None: ... - -class SearchRecordsResponse(_message.Message): - __slots__ = ("record",) - RECORD_FIELD_NUMBER: _ClassVar[int] - record: _record_pb2.Record - def __init__(self, record: _Optional[_Union[_record_pb2.Record, _Mapping]] = ...) -> None: ... +from agntcy.dir.core.v1 import record_pb2 as _record_pb2 +from agntcy.dir.search.v1 import record_query_pb2 as _record_query_pb2 +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class SearchCIDsRequest(_message.Message): + __slots__ = ("queries", "limit", "offset") + QUERIES_FIELD_NUMBER: _ClassVar[int] + LIMIT_FIELD_NUMBER: _ClassVar[int] + OFFSET_FIELD_NUMBER: _ClassVar[int] + queries: _containers.RepeatedCompositeFieldContainer[_record_query_pb2.RecordQuery] + limit: int + offset: int + def __init__(self, queries: _Optional[_Iterable[_Union[_record_query_pb2.RecordQuery, _Mapping]]] = ..., limit: _Optional[int] = ..., offset: _Optional[int] = ...) -> None: ... + +class SearchRecordsRequest(_message.Message): + __slots__ = ("queries", "limit", "offset") + QUERIES_FIELD_NUMBER: _ClassVar[int] + LIMIT_FIELD_NUMBER: _ClassVar[int] + OFFSET_FIELD_NUMBER: _ClassVar[int] + queries: _containers.RepeatedCompositeFieldContainer[_record_query_pb2.RecordQuery] + limit: int + offset: int + def __init__(self, queries: _Optional[_Iterable[_Union[_record_query_pb2.RecordQuery, _Mapping]]] = ..., limit: _Optional[int] = ..., offset: _Optional[int] = ...) -> None: ... + +class SearchCIDsResponse(_message.Message): + __slots__ = ("record_cid",) + RECORD_CID_FIELD_NUMBER: _ClassVar[int] + record_cid: str + def __init__(self, record_cid: _Optional[str] = ...) -> None: ... + +class SearchRecordsResponse(_message.Message): + __slots__ = ("record",) + RECORD_FIELD_NUMBER: _ClassVar[int] + record: _record_pb2.Record + def __init__(self, record: _Optional[_Union[_record_pb2.Record, _Mapping]] = ...) -> None: ... diff --git a/sdk/dir-py/agntcy/dir/search/v1/search_service_pb2_grpc.py b/sdk/dir-py/agntcy/dir/search/v1/search_service_pb2_grpc.py index 45eea5003..700255635 100644 --- a/sdk/dir-py/agntcy/dir/search/v1/search_service_pb2_grpc.py +++ b/sdk/dir-py/agntcy/dir/search/v1/search_service_pb2_grpc.py @@ -1,126 +1,126 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from agntcy.dir.search.v1 import search_service_pb2 as agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2 - - -class SearchServiceStub(object): - """Missing associated documentation comment in .proto file.""" - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.SearchCIDs = channel.unary_stream( - '/agntcy.dir.search.v1.SearchService/SearchCIDs', - request_serializer=agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchCIDsRequest.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchCIDsResponse.FromString, - _registered_method=True) - self.SearchRecords = channel.unary_stream( - '/agntcy.dir.search.v1.SearchService/SearchRecords', - request_serializer=agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchRecordsRequest.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchRecordsResponse.FromString, - _registered_method=True) - - -class SearchServiceServicer(object): - """Missing associated documentation comment in .proto file.""" - - def SearchCIDs(self, request, context): - """Search for record CIDs that match the given parameters. - Returns only CIDs for efficient lookups and piping to other commands. - This operation does not interact with the network. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def SearchRecords(self, request, context): - """Search for full records that match the given parameters. - Returns complete record data including all metadata, skills, domains, etc. - This operation does not interact with the network. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_SearchServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - 'SearchCIDs': grpc.unary_stream_rpc_method_handler( - servicer.SearchCIDs, - request_deserializer=agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchCIDsRequest.FromString, - response_serializer=agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchCIDsResponse.SerializeToString, - ), - 'SearchRecords': grpc.unary_stream_rpc_method_handler( - servicer.SearchRecords, - request_deserializer=agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchRecordsRequest.FromString, - response_serializer=agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchRecordsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'agntcy.dir.search.v1.SearchService', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - server.add_registered_method_handlers('agntcy.dir.search.v1.SearchService', rpc_method_handlers) - - - # This class is part of an EXPERIMENTAL API. -class SearchService(object): - """Missing associated documentation comment in .proto file.""" - - @staticmethod - def SearchCIDs(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_stream( - request, - target, - '/agntcy.dir.search.v1.SearchService/SearchCIDs', - agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchCIDsRequest.SerializeToString, - agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchCIDsResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def SearchRecords(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_stream( - request, - target, - '/agntcy.dir.search.v1.SearchService/SearchRecords', - agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchRecordsRequest.SerializeToString, - agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchRecordsResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from agntcy.dir.search.v1 import search_service_pb2 as agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2 + + +class SearchServiceStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.SearchCIDs = channel.unary_stream( + '/agntcy.dir.search.v1.SearchService/SearchCIDs', + request_serializer=agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchCIDsRequest.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchCIDsResponse.FromString, + _registered_method=True) + self.SearchRecords = channel.unary_stream( + '/agntcy.dir.search.v1.SearchService/SearchRecords', + request_serializer=agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchRecordsRequest.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchRecordsResponse.FromString, + _registered_method=True) + + +class SearchServiceServicer(object): + """Missing associated documentation comment in .proto file.""" + + def SearchCIDs(self, request, context): + """Search for record CIDs that match the given parameters. + Returns only CIDs for efficient lookups and piping to other commands. + This operation does not interact with the network. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SearchRecords(self, request, context): + """Search for full records that match the given parameters. + Returns complete record data including all metadata, skills, domains, etc. + This operation does not interact with the network. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_SearchServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'SearchCIDs': grpc.unary_stream_rpc_method_handler( + servicer.SearchCIDs, + request_deserializer=agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchCIDsRequest.FromString, + response_serializer=agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchCIDsResponse.SerializeToString, + ), + 'SearchRecords': grpc.unary_stream_rpc_method_handler( + servicer.SearchRecords, + request_deserializer=agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchRecordsRequest.FromString, + response_serializer=agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchRecordsResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'agntcy.dir.search.v1.SearchService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('agntcy.dir.search.v1.SearchService', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class SearchService(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def SearchCIDs(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream( + request, + target, + '/agntcy.dir.search.v1.SearchService/SearchCIDs', + agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchCIDsRequest.SerializeToString, + agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchCIDsResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def SearchRecords(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream( + request, + target, + '/agntcy.dir.search.v1.SearchService/SearchRecords', + agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchRecordsRequest.SerializeToString, + agntcy_dot_dir_dot_search_dot_v1_dot_search__service__pb2.SearchRecordsResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/sdk/dir-py/agntcy/dir/sign/v1/public_key_pb2.py b/sdk/dir-py/agntcy/dir/sign/v1/public_key_pb2.py index 7e700e367..b6fbbc05e 100644 --- a/sdk/dir-py/agntcy/dir/sign/v1/public_key_pb2.py +++ b/sdk/dir-py/agntcy/dir/sign/v1/public_key_pb2.py @@ -1,37 +1,37 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: agntcy/dir/sign/v1/public_key.proto -# Protobuf Python Version: 6.32.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 32, - 1, - '', - 'agntcy/dir/sign/v1/public_key.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n#agntcy/dir/sign/v1/public_key.proto\x12\x12\x61gntcy.dir.sign.v1\"\x1d\n\tPublicKey\x12\x10\n\x03key\x18\x01 \x01(\tR\x03keyB\xb6\x01\n\x16\x63om.agntcy.dir.sign.v1B\x0ePublicKeyProtoP\x01Z!github.com/agntcy/dir/api/sign/v1\xa2\x02\x03\x41\x44S\xaa\x02\x12\x41gntcy.Dir.Sign.V1\xca\x02\x12\x41gntcy\\Dir\\Sign\\V1\xe2\x02\x1e\x41gntcy\\Dir\\Sign\\V1\\GPBMetadata\xea\x02\x15\x41gntcy::Dir::Sign::V1b\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.sign.v1.public_key_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'\n\026com.agntcy.dir.sign.v1B\016PublicKeyProtoP\001Z!github.com/agntcy/dir/api/sign/v1\242\002\003ADS\252\002\022Agntcy.Dir.Sign.V1\312\002\022Agntcy\\Dir\\Sign\\V1\342\002\036Agntcy\\Dir\\Sign\\V1\\GPBMetadata\352\002\025Agntcy::Dir::Sign::V1' - _globals['_PUBLICKEY']._serialized_start=59 - _globals['_PUBLICKEY']._serialized_end=88 -# @@protoc_insertion_point(module_scope) +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: agntcy/dir/sign/v1/public_key.proto +# Protobuf Python Version: 6.32.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 6, + 32, + 1, + '', + 'agntcy/dir/sign/v1/public_key.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n#agntcy/dir/sign/v1/public_key.proto\x12\x12\x61gntcy.dir.sign.v1\"\x1d\n\tPublicKey\x12\x10\n\x03key\x18\x01 \x01(\tR\x03keyB\xb6\x01\n\x16\x63om.agntcy.dir.sign.v1B\x0ePublicKeyProtoP\x01Z!github.com/agntcy/dir/api/sign/v1\xa2\x02\x03\x41\x44S\xaa\x02\x12\x41gntcy.Dir.Sign.V1\xca\x02\x12\x41gntcy\\Dir\\Sign\\V1\xe2\x02\x1e\x41gntcy\\Dir\\Sign\\V1\\GPBMetadata\xea\x02\x15\x41gntcy::Dir::Sign::V1b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.sign.v1.public_key_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\026com.agntcy.dir.sign.v1B\016PublicKeyProtoP\001Z!github.com/agntcy/dir/api/sign/v1\242\002\003ADS\252\002\022Agntcy.Dir.Sign.V1\312\002\022Agntcy\\Dir\\Sign\\V1\342\002\036Agntcy\\Dir\\Sign\\V1\\GPBMetadata\352\002\025Agntcy::Dir::Sign::V1' + _globals['_PUBLICKEY']._serialized_start=59 + _globals['_PUBLICKEY']._serialized_end=88 +# @@protoc_insertion_point(module_scope) diff --git a/sdk/dir-py/agntcy/dir/sign/v1/public_key_pb2.pyi b/sdk/dir-py/agntcy/dir/sign/v1/public_key_pb2.pyi index aa499f995..efb835796 100644 --- a/sdk/dir-py/agntcy/dir/sign/v1/public_key_pb2.pyi +++ b/sdk/dir-py/agntcy/dir/sign/v1/public_key_pb2.pyi @@ -1,11 +1,11 @@ -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Optional as _Optional - -DESCRIPTOR: _descriptor.FileDescriptor - -class PublicKey(_message.Message): - __slots__ = ("key",) - KEY_FIELD_NUMBER: _ClassVar[int] - key: str - def __init__(self, key: _Optional[str] = ...) -> None: ... +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Optional as _Optional + +DESCRIPTOR: _descriptor.FileDescriptor + +class PublicKey(_message.Message): + __slots__ = ("key",) + KEY_FIELD_NUMBER: _ClassVar[int] + key: str + def __init__(self, key: _Optional[str] = ...) -> None: ... diff --git a/sdk/dir-py/agntcy/dir/sign/v1/public_key_pb2_grpc.py b/sdk/dir-py/agntcy/dir/sign/v1/public_key_pb2_grpc.py index 2daafffeb..910a4354e 100644 --- a/sdk/dir-py/agntcy/dir/sign/v1/public_key_pb2_grpc.py +++ b/sdk/dir-py/agntcy/dir/sign/v1/public_key_pb2_grpc.py @@ -1,4 +1,4 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/sdk/dir-py/agntcy/dir/sign/v1/sign_service_pb2.py b/sdk/dir-py/agntcy/dir/sign/v1/sign_service_pb2.py index 242e12c31..3bfea6b8d 100644 --- a/sdk/dir-py/agntcy/dir/sign/v1/sign_service_pb2.py +++ b/sdk/dir-py/agntcy/dir/sign/v1/sign_service_pb2.py @@ -1,55 +1,55 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: agntcy/dir/sign/v1/sign_service.proto -# Protobuf Python Version: 6.32.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 32, - 1, - '', - 'agntcy/dir/sign/v1/sign_service.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from agntcy.dir.core.v1 import record_pb2 as agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2 -from agntcy.dir.sign.v1 import signature_pb2 as agntcy_dot_dir_dot_sign_dot_v1_dot_signature__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n%agntcy/dir/sign/v1/sign_service.proto\x12\x12\x61gntcy.dir.sign.v1\x1a\x1f\x61gntcy/dir/core/v1/record.proto\x1a\"agntcy/dir/sign/v1/signature.proto\"\x90\x01\n\x0bSignRequest\x12<\n\nrecord_ref\x18\x01 \x01(\x0b\x32\x1d.agntcy.dir.core.v1.RecordRefR\trecordRef\x12\x43\n\x08provider\x18\x02 \x01(\x0b\x32\'.agntcy.dir.sign.v1.SignRequestProviderR\x08provider\"\x8d\x01\n\x13SignRequestProvider\x12\x36\n\x04oidc\x18\x01 \x01(\x0b\x32 .agntcy.dir.sign.v1.SignWithOIDCH\x00R\x04oidc\x12\x33\n\x03key\x18\x02 \x01(\x0b\x32\x1f.agntcy.dir.sign.v1.SignWithKeyH\x00R\x03keyB\t\n\x07request\"\xe1\x02\n\x0cSignWithOIDC\x12\x19\n\x08id_token\x18\x01 \x01(\tR\x07idToken\x12\x43\n\x07options\x18\x02 \x01(\x0b\x32).agntcy.dir.sign.v1.SignWithOIDC.SignOptsR\x07options\x1a\xf0\x01\n\x08SignOpts\x12\"\n\nfulcio_url\x18\x01 \x01(\tH\x00R\tfulcioUrl\x88\x01\x01\x12 \n\trekor_url\x18\x02 \x01(\tH\x01R\x08rekorUrl\x88\x01\x01\x12(\n\rtimestamp_url\x18\x03 \x01(\tH\x02R\x0ctimestampUrl\x88\x01\x01\x12/\n\x11oidc_provider_url\x18\x04 \x01(\tH\x03R\x0foidcProviderUrl\x88\x01\x01\x42\r\n\x0b_fulcio_urlB\x0c\n\n_rekor_urlB\x10\n\x0e_timestamp_urlB\x14\n\x12_oidc_provider_url\"\\\n\x0bSignWithKey\x12\x1f\n\x0bprivate_key\x18\x01 \x01(\x0cR\nprivateKey\x12\x1f\n\x08password\x18\x02 \x01(\x0cH\x00R\x08password\x88\x01\x01\x42\x0b\n\t_password\"K\n\x0cSignResponse\x12;\n\tsignature\x18\x01 \x01(\x0b\x32\x1d.agntcy.dir.sign.v1.SignatureR\tsignature\"M\n\rVerifyRequest\x12<\n\nrecord_ref\x18\x01 \x01(\x0b\x32\x1d.agntcy.dir.core.v1.RecordRefR\trecordRef\"f\n\x0eVerifyResponse\x12\x18\n\x07success\x18\x01 \x01(\x08R\x07success\x12(\n\rerror_message\x18\x02 \x01(\tH\x00R\x0c\x65rrorMessage\x88\x01\x01\x42\x10\n\x0e_error_message2\xa9\x01\n\x0bSignService\x12I\n\x04Sign\x12\x1f.agntcy.dir.sign.v1.SignRequest\x1a .agntcy.dir.sign.v1.SignResponse\x12O\n\x06Verify\x12!.agntcy.dir.sign.v1.VerifyRequest\x1a\".agntcy.dir.sign.v1.VerifyResponseB\xb8\x01\n\x16\x63om.agntcy.dir.sign.v1B\x10SignServiceProtoP\x01Z!github.com/agntcy/dir/api/sign/v1\xa2\x02\x03\x41\x44S\xaa\x02\x12\x41gntcy.Dir.Sign.V1\xca\x02\x12\x41gntcy\\Dir\\Sign\\V1\xe2\x02\x1e\x41gntcy\\Dir\\Sign\\V1\\GPBMetadata\xea\x02\x15\x41gntcy::Dir::Sign::V1b\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.sign.v1.sign_service_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'\n\026com.agntcy.dir.sign.v1B\020SignServiceProtoP\001Z!github.com/agntcy/dir/api/sign/v1\242\002\003ADS\252\002\022Agntcy.Dir.Sign.V1\312\002\022Agntcy\\Dir\\Sign\\V1\342\002\036Agntcy\\Dir\\Sign\\V1\\GPBMetadata\352\002\025Agntcy::Dir::Sign::V1' - _globals['_SIGNREQUEST']._serialized_start=131 - _globals['_SIGNREQUEST']._serialized_end=275 - _globals['_SIGNREQUESTPROVIDER']._serialized_start=278 - _globals['_SIGNREQUESTPROVIDER']._serialized_end=419 - _globals['_SIGNWITHOIDC']._serialized_start=422 - _globals['_SIGNWITHOIDC']._serialized_end=775 - _globals['_SIGNWITHOIDC_SIGNOPTS']._serialized_start=535 - _globals['_SIGNWITHOIDC_SIGNOPTS']._serialized_end=775 - _globals['_SIGNWITHKEY']._serialized_start=777 - _globals['_SIGNWITHKEY']._serialized_end=869 - _globals['_SIGNRESPONSE']._serialized_start=871 - _globals['_SIGNRESPONSE']._serialized_end=946 - _globals['_VERIFYREQUEST']._serialized_start=948 - _globals['_VERIFYREQUEST']._serialized_end=1025 - _globals['_VERIFYRESPONSE']._serialized_start=1027 - _globals['_VERIFYRESPONSE']._serialized_end=1129 - _globals['_SIGNSERVICE']._serialized_start=1132 - _globals['_SIGNSERVICE']._serialized_end=1301 -# @@protoc_insertion_point(module_scope) +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: agntcy/dir/sign/v1/sign_service.proto +# Protobuf Python Version: 6.32.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 6, + 32, + 1, + '', + 'agntcy/dir/sign/v1/sign_service.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from agntcy.dir.core.v1 import record_pb2 as agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2 +from agntcy.dir.sign.v1 import signature_pb2 as agntcy_dot_dir_dot_sign_dot_v1_dot_signature__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n%agntcy/dir/sign/v1/sign_service.proto\x12\x12\x61gntcy.dir.sign.v1\x1a\x1f\x61gntcy/dir/core/v1/record.proto\x1a\"agntcy/dir/sign/v1/signature.proto\"\x90\x01\n\x0bSignRequest\x12<\n\nrecord_ref\x18\x01 \x01(\x0b\x32\x1d.agntcy.dir.core.v1.RecordRefR\trecordRef\x12\x43\n\x08provider\x18\x02 \x01(\x0b\x32\'.agntcy.dir.sign.v1.SignRequestProviderR\x08provider\"\x8d\x01\n\x13SignRequestProvider\x12\x36\n\x04oidc\x18\x01 \x01(\x0b\x32 .agntcy.dir.sign.v1.SignWithOIDCH\x00R\x04oidc\x12\x33\n\x03key\x18\x02 \x01(\x0b\x32\x1f.agntcy.dir.sign.v1.SignWithKeyH\x00R\x03keyB\t\n\x07request\"\xe1\x02\n\x0cSignWithOIDC\x12\x19\n\x08id_token\x18\x01 \x01(\tR\x07idToken\x12\x43\n\x07options\x18\x02 \x01(\x0b\x32).agntcy.dir.sign.v1.SignWithOIDC.SignOptsR\x07options\x1a\xf0\x01\n\x08SignOpts\x12\"\n\nfulcio_url\x18\x01 \x01(\tH\x00R\tfulcioUrl\x88\x01\x01\x12 \n\trekor_url\x18\x02 \x01(\tH\x01R\x08rekorUrl\x88\x01\x01\x12(\n\rtimestamp_url\x18\x03 \x01(\tH\x02R\x0ctimestampUrl\x88\x01\x01\x12/\n\x11oidc_provider_url\x18\x04 \x01(\tH\x03R\x0foidcProviderUrl\x88\x01\x01\x42\r\n\x0b_fulcio_urlB\x0c\n\n_rekor_urlB\x10\n\x0e_timestamp_urlB\x14\n\x12_oidc_provider_url\"\\\n\x0bSignWithKey\x12\x1f\n\x0bprivate_key\x18\x01 \x01(\x0cR\nprivateKey\x12\x1f\n\x08password\x18\x02 \x01(\x0cH\x00R\x08password\x88\x01\x01\x42\x0b\n\t_password\"K\n\x0cSignResponse\x12;\n\tsignature\x18\x01 \x01(\x0b\x32\x1d.agntcy.dir.sign.v1.SignatureR\tsignature\"M\n\rVerifyRequest\x12<\n\nrecord_ref\x18\x01 \x01(\x0b\x32\x1d.agntcy.dir.core.v1.RecordRefR\trecordRef\"f\n\x0eVerifyResponse\x12\x18\n\x07success\x18\x01 \x01(\x08R\x07success\x12(\n\rerror_message\x18\x02 \x01(\tH\x00R\x0c\x65rrorMessage\x88\x01\x01\x42\x10\n\x0e_error_message2\xa9\x01\n\x0bSignService\x12I\n\x04Sign\x12\x1f.agntcy.dir.sign.v1.SignRequest\x1a .agntcy.dir.sign.v1.SignResponse\x12O\n\x06Verify\x12!.agntcy.dir.sign.v1.VerifyRequest\x1a\".agntcy.dir.sign.v1.VerifyResponseB\xb8\x01\n\x16\x63om.agntcy.dir.sign.v1B\x10SignServiceProtoP\x01Z!github.com/agntcy/dir/api/sign/v1\xa2\x02\x03\x41\x44S\xaa\x02\x12\x41gntcy.Dir.Sign.V1\xca\x02\x12\x41gntcy\\Dir\\Sign\\V1\xe2\x02\x1e\x41gntcy\\Dir\\Sign\\V1\\GPBMetadata\xea\x02\x15\x41gntcy::Dir::Sign::V1b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.sign.v1.sign_service_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\026com.agntcy.dir.sign.v1B\020SignServiceProtoP\001Z!github.com/agntcy/dir/api/sign/v1\242\002\003ADS\252\002\022Agntcy.Dir.Sign.V1\312\002\022Agntcy\\Dir\\Sign\\V1\342\002\036Agntcy\\Dir\\Sign\\V1\\GPBMetadata\352\002\025Agntcy::Dir::Sign::V1' + _globals['_SIGNREQUEST']._serialized_start=131 + _globals['_SIGNREQUEST']._serialized_end=275 + _globals['_SIGNREQUESTPROVIDER']._serialized_start=278 + _globals['_SIGNREQUESTPROVIDER']._serialized_end=419 + _globals['_SIGNWITHOIDC']._serialized_start=422 + _globals['_SIGNWITHOIDC']._serialized_end=775 + _globals['_SIGNWITHOIDC_SIGNOPTS']._serialized_start=535 + _globals['_SIGNWITHOIDC_SIGNOPTS']._serialized_end=775 + _globals['_SIGNWITHKEY']._serialized_start=777 + _globals['_SIGNWITHKEY']._serialized_end=869 + _globals['_SIGNRESPONSE']._serialized_start=871 + _globals['_SIGNRESPONSE']._serialized_end=946 + _globals['_VERIFYREQUEST']._serialized_start=948 + _globals['_VERIFYREQUEST']._serialized_end=1025 + _globals['_VERIFYRESPONSE']._serialized_start=1027 + _globals['_VERIFYRESPONSE']._serialized_end=1129 + _globals['_SIGNSERVICE']._serialized_start=1132 + _globals['_SIGNSERVICE']._serialized_end=1301 +# @@protoc_insertion_point(module_scope) diff --git a/sdk/dir-py/agntcy/dir/sign/v1/sign_service_pb2.pyi b/sdk/dir-py/agntcy/dir/sign/v1/sign_service_pb2.pyi index 167354727..a65081ef4 100644 --- a/sdk/dir-py/agntcy/dir/sign/v1/sign_service_pb2.pyi +++ b/sdk/dir-py/agntcy/dir/sign/v1/sign_service_pb2.pyi @@ -1,70 +1,70 @@ -from agntcy.dir.core.v1 import record_pb2 as _record_pb2 -from agntcy.dir.sign.v1 import signature_pb2 as _signature_pb2 -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Mapping as _Mapping, Optional as _Optional, Union as _Union - -DESCRIPTOR: _descriptor.FileDescriptor - -class SignRequest(_message.Message): - __slots__ = ("record_ref", "provider") - RECORD_REF_FIELD_NUMBER: _ClassVar[int] - PROVIDER_FIELD_NUMBER: _ClassVar[int] - record_ref: _record_pb2.RecordRef - provider: SignRequestProvider - def __init__(self, record_ref: _Optional[_Union[_record_pb2.RecordRef, _Mapping]] = ..., provider: _Optional[_Union[SignRequestProvider, _Mapping]] = ...) -> None: ... - -class SignRequestProvider(_message.Message): - __slots__ = ("oidc", "key") - OIDC_FIELD_NUMBER: _ClassVar[int] - KEY_FIELD_NUMBER: _ClassVar[int] - oidc: SignWithOIDC - key: SignWithKey - def __init__(self, oidc: _Optional[_Union[SignWithOIDC, _Mapping]] = ..., key: _Optional[_Union[SignWithKey, _Mapping]] = ...) -> None: ... - -class SignWithOIDC(_message.Message): - __slots__ = ("id_token", "options") - class SignOpts(_message.Message): - __slots__ = ("fulcio_url", "rekor_url", "timestamp_url", "oidc_provider_url") - FULCIO_URL_FIELD_NUMBER: _ClassVar[int] - REKOR_URL_FIELD_NUMBER: _ClassVar[int] - TIMESTAMP_URL_FIELD_NUMBER: _ClassVar[int] - OIDC_PROVIDER_URL_FIELD_NUMBER: _ClassVar[int] - fulcio_url: str - rekor_url: str - timestamp_url: str - oidc_provider_url: str - def __init__(self, fulcio_url: _Optional[str] = ..., rekor_url: _Optional[str] = ..., timestamp_url: _Optional[str] = ..., oidc_provider_url: _Optional[str] = ...) -> None: ... - ID_TOKEN_FIELD_NUMBER: _ClassVar[int] - OPTIONS_FIELD_NUMBER: _ClassVar[int] - id_token: str - options: SignWithOIDC.SignOpts - def __init__(self, id_token: _Optional[str] = ..., options: _Optional[_Union[SignWithOIDC.SignOpts, _Mapping]] = ...) -> None: ... - -class SignWithKey(_message.Message): - __slots__ = ("private_key", "password") - PRIVATE_KEY_FIELD_NUMBER: _ClassVar[int] - PASSWORD_FIELD_NUMBER: _ClassVar[int] - private_key: bytes - password: bytes - def __init__(self, private_key: _Optional[bytes] = ..., password: _Optional[bytes] = ...) -> None: ... - -class SignResponse(_message.Message): - __slots__ = ("signature",) - SIGNATURE_FIELD_NUMBER: _ClassVar[int] - signature: _signature_pb2.Signature - def __init__(self, signature: _Optional[_Union[_signature_pb2.Signature, _Mapping]] = ...) -> None: ... - -class VerifyRequest(_message.Message): - __slots__ = ("record_ref",) - RECORD_REF_FIELD_NUMBER: _ClassVar[int] - record_ref: _record_pb2.RecordRef - def __init__(self, record_ref: _Optional[_Union[_record_pb2.RecordRef, _Mapping]] = ...) -> None: ... - -class VerifyResponse(_message.Message): - __slots__ = ("success", "error_message") - SUCCESS_FIELD_NUMBER: _ClassVar[int] - ERROR_MESSAGE_FIELD_NUMBER: _ClassVar[int] - success: bool - error_message: str - def __init__(self, success: bool = ..., error_message: _Optional[str] = ...) -> None: ... +from agntcy.dir.core.v1 import record_pb2 as _record_pb2 +from agntcy.dir.sign.v1 import signature_pb2 as _signature_pb2 +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class SignRequest(_message.Message): + __slots__ = ("record_ref", "provider") + RECORD_REF_FIELD_NUMBER: _ClassVar[int] + PROVIDER_FIELD_NUMBER: _ClassVar[int] + record_ref: _record_pb2.RecordRef + provider: SignRequestProvider + def __init__(self, record_ref: _Optional[_Union[_record_pb2.RecordRef, _Mapping]] = ..., provider: _Optional[_Union[SignRequestProvider, _Mapping]] = ...) -> None: ... + +class SignRequestProvider(_message.Message): + __slots__ = ("oidc", "key") + OIDC_FIELD_NUMBER: _ClassVar[int] + KEY_FIELD_NUMBER: _ClassVar[int] + oidc: SignWithOIDC + key: SignWithKey + def __init__(self, oidc: _Optional[_Union[SignWithOIDC, _Mapping]] = ..., key: _Optional[_Union[SignWithKey, _Mapping]] = ...) -> None: ... + +class SignWithOIDC(_message.Message): + __slots__ = ("id_token", "options") + class SignOpts(_message.Message): + __slots__ = ("fulcio_url", "rekor_url", "timestamp_url", "oidc_provider_url") + FULCIO_URL_FIELD_NUMBER: _ClassVar[int] + REKOR_URL_FIELD_NUMBER: _ClassVar[int] + TIMESTAMP_URL_FIELD_NUMBER: _ClassVar[int] + OIDC_PROVIDER_URL_FIELD_NUMBER: _ClassVar[int] + fulcio_url: str + rekor_url: str + timestamp_url: str + oidc_provider_url: str + def __init__(self, fulcio_url: _Optional[str] = ..., rekor_url: _Optional[str] = ..., timestamp_url: _Optional[str] = ..., oidc_provider_url: _Optional[str] = ...) -> None: ... + ID_TOKEN_FIELD_NUMBER: _ClassVar[int] + OPTIONS_FIELD_NUMBER: _ClassVar[int] + id_token: str + options: SignWithOIDC.SignOpts + def __init__(self, id_token: _Optional[str] = ..., options: _Optional[_Union[SignWithOIDC.SignOpts, _Mapping]] = ...) -> None: ... + +class SignWithKey(_message.Message): + __slots__ = ("private_key", "password") + PRIVATE_KEY_FIELD_NUMBER: _ClassVar[int] + PASSWORD_FIELD_NUMBER: _ClassVar[int] + private_key: bytes + password: bytes + def __init__(self, private_key: _Optional[bytes] = ..., password: _Optional[bytes] = ...) -> None: ... + +class SignResponse(_message.Message): + __slots__ = ("signature",) + SIGNATURE_FIELD_NUMBER: _ClassVar[int] + signature: _signature_pb2.Signature + def __init__(self, signature: _Optional[_Union[_signature_pb2.Signature, _Mapping]] = ...) -> None: ... + +class VerifyRequest(_message.Message): + __slots__ = ("record_ref",) + RECORD_REF_FIELD_NUMBER: _ClassVar[int] + record_ref: _record_pb2.RecordRef + def __init__(self, record_ref: _Optional[_Union[_record_pb2.RecordRef, _Mapping]] = ...) -> None: ... + +class VerifyResponse(_message.Message): + __slots__ = ("success", "error_message") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + ERROR_MESSAGE_FIELD_NUMBER: _ClassVar[int] + success: bool + error_message: str + def __init__(self, success: bool = ..., error_message: _Optional[str] = ...) -> None: ... diff --git a/sdk/dir-py/agntcy/dir/sign/v1/sign_service_pb2_grpc.py b/sdk/dir-py/agntcy/dir/sign/v1/sign_service_pb2_grpc.py index 6f29b521e..7f0ea6513 100644 --- a/sdk/dir-py/agntcy/dir/sign/v1/sign_service_pb2_grpc.py +++ b/sdk/dir-py/agntcy/dir/sign/v1/sign_service_pb2_grpc.py @@ -1,125 +1,125 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from agntcy.dir.sign.v1 import sign_service_pb2 as agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2 - - -class SignServiceStub(object): - """SignService provides methods to sign and verify records. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.Sign = channel.unary_unary( - '/agntcy.dir.sign.v1.SignService/Sign', - request_serializer=agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.SignRequest.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.SignResponse.FromString, - _registered_method=True) - self.Verify = channel.unary_unary( - '/agntcy.dir.sign.v1.SignService/Verify', - request_serializer=agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.VerifyRequest.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.VerifyResponse.FromString, - _registered_method=True) - - -class SignServiceServicer(object): - """SignService provides methods to sign and verify records. - """ - - def Sign(self, request, context): - """Sign record using keyless OIDC based provider or using PEM-encoded private key with an optional passphrase - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def Verify(self, request, context): - """Verify signed record using keyless OIDC based provider or using PEM-encoded formatted PEM public key encrypted - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_SignServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - 'Sign': grpc.unary_unary_rpc_method_handler( - servicer.Sign, - request_deserializer=agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.SignRequest.FromString, - response_serializer=agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.SignResponse.SerializeToString, - ), - 'Verify': grpc.unary_unary_rpc_method_handler( - servicer.Verify, - request_deserializer=agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.VerifyRequest.FromString, - response_serializer=agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.VerifyResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'agntcy.dir.sign.v1.SignService', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - server.add_registered_method_handlers('agntcy.dir.sign.v1.SignService', rpc_method_handlers) - - - # This class is part of an EXPERIMENTAL API. -class SignService(object): - """SignService provides methods to sign and verify records. - """ - - @staticmethod - def Sign(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/agntcy.dir.sign.v1.SignService/Sign', - agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.SignRequest.SerializeToString, - agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.SignResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def Verify(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/agntcy.dir.sign.v1.SignService/Verify', - agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.VerifyRequest.SerializeToString, - agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.VerifyResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from agntcy.dir.sign.v1 import sign_service_pb2 as agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2 + + +class SignServiceStub(object): + """SignService provides methods to sign and verify records. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.Sign = channel.unary_unary( + '/agntcy.dir.sign.v1.SignService/Sign', + request_serializer=agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.SignRequest.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.SignResponse.FromString, + _registered_method=True) + self.Verify = channel.unary_unary( + '/agntcy.dir.sign.v1.SignService/Verify', + request_serializer=agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.VerifyRequest.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.VerifyResponse.FromString, + _registered_method=True) + + +class SignServiceServicer(object): + """SignService provides methods to sign and verify records. + """ + + def Sign(self, request, context): + """Sign record using keyless OIDC based provider or using PEM-encoded private key with an optional passphrase + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Verify(self, request, context): + """Verify signed record using keyless OIDC based provider or using PEM-encoded formatted PEM public key encrypted + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_SignServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'Sign': grpc.unary_unary_rpc_method_handler( + servicer.Sign, + request_deserializer=agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.SignRequest.FromString, + response_serializer=agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.SignResponse.SerializeToString, + ), + 'Verify': grpc.unary_unary_rpc_method_handler( + servicer.Verify, + request_deserializer=agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.VerifyRequest.FromString, + response_serializer=agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.VerifyResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'agntcy.dir.sign.v1.SignService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('agntcy.dir.sign.v1.SignService', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class SignService(object): + """SignService provides methods to sign and verify records. + """ + + @staticmethod + def Sign(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/agntcy.dir.sign.v1.SignService/Sign', + agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.SignRequest.SerializeToString, + agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.SignResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Verify(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/agntcy.dir.sign.v1.SignService/Verify', + agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.VerifyRequest.SerializeToString, + agntcy_dot_dir_dot_sign_dot_v1_dot_sign__service__pb2.VerifyResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/sdk/dir-py/agntcy/dir/sign/v1/signature_pb2.py b/sdk/dir-py/agntcy/dir/sign/v1/signature_pb2.py index 68d53a8e9..863a37df6 100644 --- a/sdk/dir-py/agntcy/dir/sign/v1/signature_pb2.py +++ b/sdk/dir-py/agntcy/dir/sign/v1/signature_pb2.py @@ -1,41 +1,41 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: agntcy/dir/sign/v1/signature.proto -# Protobuf Python Version: 6.32.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 32, - 1, - '', - 'agntcy/dir/sign/v1/signature.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\"agntcy/dir/sign/v1/signature.proto\x12\x12\x61gntcy.dir.sign.v1\"\xe2\x02\n\tSignature\x12P\n\x0b\x61nnotations\x18\x01 \x03(\x0b\x32..agntcy.dir.sign.v1.Signature.AnnotationsEntryR\x0b\x61nnotations\x12\x1b\n\tsigned_at\x18\x02 \x01(\tR\x08signedAt\x12\x1c\n\talgorithm\x18\x03 \x01(\tR\talgorithm\x12\x1c\n\tsignature\x18\x04 \x01(\tR\tsignature\x12 \n\x0b\x63\x65rtificate\x18\x05 \x01(\tR\x0b\x63\x65rtificate\x12!\n\x0c\x63ontent_type\x18\x06 \x01(\tR\x0b\x63ontentType\x12%\n\x0e\x63ontent_bundle\x18\x07 \x01(\tR\rcontentBundle\x1a>\n\x10\x41nnotationsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x42\xb6\x01\n\x16\x63om.agntcy.dir.sign.v1B\x0eSignatureProtoP\x01Z!github.com/agntcy/dir/api/sign/v1\xa2\x02\x03\x41\x44S\xaa\x02\x12\x41gntcy.Dir.Sign.V1\xca\x02\x12\x41gntcy\\Dir\\Sign\\V1\xe2\x02\x1e\x41gntcy\\Dir\\Sign\\V1\\GPBMetadata\xea\x02\x15\x41gntcy::Dir::Sign::V1b\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.sign.v1.signature_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'\n\026com.agntcy.dir.sign.v1B\016SignatureProtoP\001Z!github.com/agntcy/dir/api/sign/v1\242\002\003ADS\252\002\022Agntcy.Dir.Sign.V1\312\002\022Agntcy\\Dir\\Sign\\V1\342\002\036Agntcy\\Dir\\Sign\\V1\\GPBMetadata\352\002\025Agntcy::Dir::Sign::V1' - _globals['_SIGNATURE_ANNOTATIONSENTRY']._loaded_options = None - _globals['_SIGNATURE_ANNOTATIONSENTRY']._serialized_options = b'8\001' - _globals['_SIGNATURE']._serialized_start=59 - _globals['_SIGNATURE']._serialized_end=413 - _globals['_SIGNATURE_ANNOTATIONSENTRY']._serialized_start=351 - _globals['_SIGNATURE_ANNOTATIONSENTRY']._serialized_end=413 -# @@protoc_insertion_point(module_scope) +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: agntcy/dir/sign/v1/signature.proto +# Protobuf Python Version: 6.32.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 6, + 32, + 1, + '', + 'agntcy/dir/sign/v1/signature.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\"agntcy/dir/sign/v1/signature.proto\x12\x12\x61gntcy.dir.sign.v1\"\xe2\x02\n\tSignature\x12P\n\x0b\x61nnotations\x18\x01 \x03(\x0b\x32..agntcy.dir.sign.v1.Signature.AnnotationsEntryR\x0b\x61nnotations\x12\x1b\n\tsigned_at\x18\x02 \x01(\tR\x08signedAt\x12\x1c\n\talgorithm\x18\x03 \x01(\tR\talgorithm\x12\x1c\n\tsignature\x18\x04 \x01(\tR\tsignature\x12 \n\x0b\x63\x65rtificate\x18\x05 \x01(\tR\x0b\x63\x65rtificate\x12!\n\x0c\x63ontent_type\x18\x06 \x01(\tR\x0b\x63ontentType\x12%\n\x0e\x63ontent_bundle\x18\x07 \x01(\tR\rcontentBundle\x1a>\n\x10\x41nnotationsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\x42\xb6\x01\n\x16\x63om.agntcy.dir.sign.v1B\x0eSignatureProtoP\x01Z!github.com/agntcy/dir/api/sign/v1\xa2\x02\x03\x41\x44S\xaa\x02\x12\x41gntcy.Dir.Sign.V1\xca\x02\x12\x41gntcy\\Dir\\Sign\\V1\xe2\x02\x1e\x41gntcy\\Dir\\Sign\\V1\\GPBMetadata\xea\x02\x15\x41gntcy::Dir::Sign::V1b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.sign.v1.signature_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\026com.agntcy.dir.sign.v1B\016SignatureProtoP\001Z!github.com/agntcy/dir/api/sign/v1\242\002\003ADS\252\002\022Agntcy.Dir.Sign.V1\312\002\022Agntcy\\Dir\\Sign\\V1\342\002\036Agntcy\\Dir\\Sign\\V1\\GPBMetadata\352\002\025Agntcy::Dir::Sign::V1' + _globals['_SIGNATURE_ANNOTATIONSENTRY']._loaded_options = None + _globals['_SIGNATURE_ANNOTATIONSENTRY']._serialized_options = b'8\001' + _globals['_SIGNATURE']._serialized_start=59 + _globals['_SIGNATURE']._serialized_end=413 + _globals['_SIGNATURE_ANNOTATIONSENTRY']._serialized_start=351 + _globals['_SIGNATURE_ANNOTATIONSENTRY']._serialized_end=413 +# @@protoc_insertion_point(module_scope) diff --git a/sdk/dir-py/agntcy/dir/sign/v1/signature_pb2.pyi b/sdk/dir-py/agntcy/dir/sign/v1/signature_pb2.pyi index 9a241204c..261c8143c 100644 --- a/sdk/dir-py/agntcy/dir/sign/v1/signature_pb2.pyi +++ b/sdk/dir-py/agntcy/dir/sign/v1/signature_pb2.pyi @@ -1,31 +1,31 @@ -from google.protobuf.internal import containers as _containers -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Mapping as _Mapping, Optional as _Optional - -DESCRIPTOR: _descriptor.FileDescriptor - -class Signature(_message.Message): - __slots__ = ("annotations", "signed_at", "algorithm", "signature", "certificate", "content_type", "content_bundle") - class AnnotationsEntry(_message.Message): - __slots__ = ("key", "value") - KEY_FIELD_NUMBER: _ClassVar[int] - VALUE_FIELD_NUMBER: _ClassVar[int] - key: str - value: str - def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... - ANNOTATIONS_FIELD_NUMBER: _ClassVar[int] - SIGNED_AT_FIELD_NUMBER: _ClassVar[int] - ALGORITHM_FIELD_NUMBER: _ClassVar[int] - SIGNATURE_FIELD_NUMBER: _ClassVar[int] - CERTIFICATE_FIELD_NUMBER: _ClassVar[int] - CONTENT_TYPE_FIELD_NUMBER: _ClassVar[int] - CONTENT_BUNDLE_FIELD_NUMBER: _ClassVar[int] - annotations: _containers.ScalarMap[str, str] - signed_at: str - algorithm: str - signature: str - certificate: str - content_type: str - content_bundle: str - def __init__(self, annotations: _Optional[_Mapping[str, str]] = ..., signed_at: _Optional[str] = ..., algorithm: _Optional[str] = ..., signature: _Optional[str] = ..., certificate: _Optional[str] = ..., content_type: _Optional[str] = ..., content_bundle: _Optional[str] = ...) -> None: ... +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Mapping as _Mapping, Optional as _Optional + +DESCRIPTOR: _descriptor.FileDescriptor + +class Signature(_message.Message): + __slots__ = ("annotations", "signed_at", "algorithm", "signature", "certificate", "content_type", "content_bundle") + class AnnotationsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + ANNOTATIONS_FIELD_NUMBER: _ClassVar[int] + SIGNED_AT_FIELD_NUMBER: _ClassVar[int] + ALGORITHM_FIELD_NUMBER: _ClassVar[int] + SIGNATURE_FIELD_NUMBER: _ClassVar[int] + CERTIFICATE_FIELD_NUMBER: _ClassVar[int] + CONTENT_TYPE_FIELD_NUMBER: _ClassVar[int] + CONTENT_BUNDLE_FIELD_NUMBER: _ClassVar[int] + annotations: _containers.ScalarMap[str, str] + signed_at: str + algorithm: str + signature: str + certificate: str + content_type: str + content_bundle: str + def __init__(self, annotations: _Optional[_Mapping[str, str]] = ..., signed_at: _Optional[str] = ..., algorithm: _Optional[str] = ..., signature: _Optional[str] = ..., certificate: _Optional[str] = ..., content_type: _Optional[str] = ..., content_bundle: _Optional[str] = ...) -> None: ... diff --git a/sdk/dir-py/agntcy/dir/sign/v1/signature_pb2_grpc.py b/sdk/dir-py/agntcy/dir/sign/v1/signature_pb2_grpc.py index 2daafffeb..910a4354e 100644 --- a/sdk/dir-py/agntcy/dir/sign/v1/signature_pb2_grpc.py +++ b/sdk/dir-py/agntcy/dir/sign/v1/signature_pb2_grpc.py @@ -1,4 +1,4 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/sdk/dir-py/agntcy/dir/store/v1/store_service_pb2.py b/sdk/dir-py/agntcy/dir/store/v1/store_service_pb2.py index 3fd07c2b4..d6ab1910d 100644 --- a/sdk/dir-py/agntcy/dir/store/v1/store_service_pb2.py +++ b/sdk/dir-py/agntcy/dir/store/v1/store_service_pb2.py @@ -1,47 +1,47 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: agntcy/dir/store/v1/store_service.proto -# Protobuf Python Version: 6.32.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 32, - 1, - '', - 'agntcy/dir/store/v1/store_service.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from agntcy.dir.core.v1 import record_pb2 as agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\'agntcy/dir/store/v1/store_service.proto\x12\x13\x61gntcy.dir.store.v1\x1a\x1f\x61gntcy/dir/core/v1/record.proto\x1a\x1bgoogle/protobuf/empty.proto\"\x93\x01\n\x13PushReferrerRequest\x12<\n\nrecord_ref\x18\x01 \x01(\x0b\x32\x1d.agntcy.dir.core.v1.RecordRefR\trecordRef\x12>\n\x08referrer\x18\x02 \x01(\x0b\x32\".agntcy.dir.core.v1.RecordReferrerR\x08referrer\"l\n\x14PushReferrerResponse\x12\x18\n\x07success\x18\x01 \x01(\x08R\x07success\x12(\n\rerror_message\x18\x02 \x01(\tH\x00R\x0c\x65rrorMessage\x88\x01\x01\x42\x10\n\x0e_error_message\"\x8f\x01\n\x13PullReferrerRequest\x12<\n\nrecord_ref\x18\x01 \x01(\x0b\x32\x1d.agntcy.dir.core.v1.RecordRefR\trecordRef\x12(\n\rreferrer_type\x18\x02 \x01(\tH\x00R\x0creferrerType\x88\x01\x01\x42\x10\n\x0e_referrer_type\"V\n\x14PullReferrerResponse\x12>\n\x08referrer\x18\x01 \x01(\x0b\x32\".agntcy.dir.core.v1.RecordReferrerR\x08referrer2\xfe\x03\n\x0cStoreService\x12\x45\n\x04Push\x12\x1a.agntcy.dir.core.v1.Record\x1a\x1d.agntcy.dir.core.v1.RecordRef(\x01\x30\x01\x12\x45\n\x04Pull\x12\x1d.agntcy.dir.core.v1.RecordRef\x1a\x1a.agntcy.dir.core.v1.Record(\x01\x30\x01\x12K\n\x06Lookup\x12\x1d.agntcy.dir.core.v1.RecordRef\x1a\x1e.agntcy.dir.core.v1.RecordMeta(\x01\x30\x01\x12\x41\n\x06\x44\x65lete\x12\x1d.agntcy.dir.core.v1.RecordRef\x1a\x16.google.protobuf.Empty(\x01\x12g\n\x0cPushReferrer\x12(.agntcy.dir.store.v1.PushReferrerRequest\x1a).agntcy.dir.store.v1.PushReferrerResponse(\x01\x30\x01\x12g\n\x0cPullReferrer\x12(.agntcy.dir.store.v1.PullReferrerRequest\x1a).agntcy.dir.store.v1.PullReferrerResponse(\x01\x30\x01\x42\xbf\x01\n\x17\x63om.agntcy.dir.store.v1B\x11StoreServiceProtoP\x01Z\"github.com/agntcy/dir/api/store/v1\xa2\x02\x03\x41\x44S\xaa\x02\x13\x41gntcy.Dir.Store.V1\xca\x02\x13\x41gntcy\\Dir\\Store\\V1\xe2\x02\x1f\x41gntcy\\Dir\\Store\\V1\\GPBMetadata\xea\x02\x16\x41gntcy::Dir::Store::V1b\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.store.v1.store_service_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'\n\027com.agntcy.dir.store.v1B\021StoreServiceProtoP\001Z\"github.com/agntcy/dir/api/store/v1\242\002\003ADS\252\002\023Agntcy.Dir.Store.V1\312\002\023Agntcy\\Dir\\Store\\V1\342\002\037Agntcy\\Dir\\Store\\V1\\GPBMetadata\352\002\026Agntcy::Dir::Store::V1' - _globals['_PUSHREFERRERREQUEST']._serialized_start=127 - _globals['_PUSHREFERRERREQUEST']._serialized_end=274 - _globals['_PUSHREFERRERRESPONSE']._serialized_start=276 - _globals['_PUSHREFERRERRESPONSE']._serialized_end=384 - _globals['_PULLREFERRERREQUEST']._serialized_start=387 - _globals['_PULLREFERRERREQUEST']._serialized_end=530 - _globals['_PULLREFERRERRESPONSE']._serialized_start=532 - _globals['_PULLREFERRERRESPONSE']._serialized_end=618 - _globals['_STORESERVICE']._serialized_start=621 - _globals['_STORESERVICE']._serialized_end=1131 -# @@protoc_insertion_point(module_scope) +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: agntcy/dir/store/v1/store_service.proto +# Protobuf Python Version: 6.32.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 6, + 32, + 1, + '', + 'agntcy/dir/store/v1/store_service.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from agntcy.dir.core.v1 import record_pb2 as agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2 +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\'agntcy/dir/store/v1/store_service.proto\x12\x13\x61gntcy.dir.store.v1\x1a\x1f\x61gntcy/dir/core/v1/record.proto\x1a\x1bgoogle/protobuf/empty.proto\"\x93\x01\n\x13PushReferrerRequest\x12<\n\nrecord_ref\x18\x01 \x01(\x0b\x32\x1d.agntcy.dir.core.v1.RecordRefR\trecordRef\x12>\n\x08referrer\x18\x02 \x01(\x0b\x32\".agntcy.dir.core.v1.RecordReferrerR\x08referrer\"l\n\x14PushReferrerResponse\x12\x18\n\x07success\x18\x01 \x01(\x08R\x07success\x12(\n\rerror_message\x18\x02 \x01(\tH\x00R\x0c\x65rrorMessage\x88\x01\x01\x42\x10\n\x0e_error_message\"\x8f\x01\n\x13PullReferrerRequest\x12<\n\nrecord_ref\x18\x01 \x01(\x0b\x32\x1d.agntcy.dir.core.v1.RecordRefR\trecordRef\x12(\n\rreferrer_type\x18\x02 \x01(\tH\x00R\x0creferrerType\x88\x01\x01\x42\x10\n\x0e_referrer_type\"V\n\x14PullReferrerResponse\x12>\n\x08referrer\x18\x01 \x01(\x0b\x32\".agntcy.dir.core.v1.RecordReferrerR\x08referrer2\xfe\x03\n\x0cStoreService\x12\x45\n\x04Push\x12\x1a.agntcy.dir.core.v1.Record\x1a\x1d.agntcy.dir.core.v1.RecordRef(\x01\x30\x01\x12\x45\n\x04Pull\x12\x1d.agntcy.dir.core.v1.RecordRef\x1a\x1a.agntcy.dir.core.v1.Record(\x01\x30\x01\x12K\n\x06Lookup\x12\x1d.agntcy.dir.core.v1.RecordRef\x1a\x1e.agntcy.dir.core.v1.RecordMeta(\x01\x30\x01\x12\x41\n\x06\x44\x65lete\x12\x1d.agntcy.dir.core.v1.RecordRef\x1a\x16.google.protobuf.Empty(\x01\x12g\n\x0cPushReferrer\x12(.agntcy.dir.store.v1.PushReferrerRequest\x1a).agntcy.dir.store.v1.PushReferrerResponse(\x01\x30\x01\x12g\n\x0cPullReferrer\x12(.agntcy.dir.store.v1.PullReferrerRequest\x1a).agntcy.dir.store.v1.PullReferrerResponse(\x01\x30\x01\x42\xbf\x01\n\x17\x63om.agntcy.dir.store.v1B\x11StoreServiceProtoP\x01Z\"github.com/agntcy/dir/api/store/v1\xa2\x02\x03\x41\x44S\xaa\x02\x13\x41gntcy.Dir.Store.V1\xca\x02\x13\x41gntcy\\Dir\\Store\\V1\xe2\x02\x1f\x41gntcy\\Dir\\Store\\V1\\GPBMetadata\xea\x02\x16\x41gntcy::Dir::Store::V1b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.store.v1.store_service_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\027com.agntcy.dir.store.v1B\021StoreServiceProtoP\001Z\"github.com/agntcy/dir/api/store/v1\242\002\003ADS\252\002\023Agntcy.Dir.Store.V1\312\002\023Agntcy\\Dir\\Store\\V1\342\002\037Agntcy\\Dir\\Store\\V1\\GPBMetadata\352\002\026Agntcy::Dir::Store::V1' + _globals['_PUSHREFERRERREQUEST']._serialized_start=127 + _globals['_PUSHREFERRERREQUEST']._serialized_end=274 + _globals['_PUSHREFERRERRESPONSE']._serialized_start=276 + _globals['_PUSHREFERRERRESPONSE']._serialized_end=384 + _globals['_PULLREFERRERREQUEST']._serialized_start=387 + _globals['_PULLREFERRERREQUEST']._serialized_end=530 + _globals['_PULLREFERRERRESPONSE']._serialized_start=532 + _globals['_PULLREFERRERRESPONSE']._serialized_end=618 + _globals['_STORESERVICE']._serialized_start=621 + _globals['_STORESERVICE']._serialized_end=1131 +# @@protoc_insertion_point(module_scope) diff --git a/sdk/dir-py/agntcy/dir/store/v1/store_service_pb2.pyi b/sdk/dir-py/agntcy/dir/store/v1/store_service_pb2.pyi index 9604c4c06..aeb1b6966 100644 --- a/sdk/dir-py/agntcy/dir/store/v1/store_service_pb2.pyi +++ b/sdk/dir-py/agntcy/dir/store/v1/store_service_pb2.pyi @@ -1,37 +1,37 @@ -from agntcy.dir.core.v1 import record_pb2 as _record_pb2 -from google.protobuf import empty_pb2 as _empty_pb2 -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Mapping as _Mapping, Optional as _Optional, Union as _Union - -DESCRIPTOR: _descriptor.FileDescriptor - -class PushReferrerRequest(_message.Message): - __slots__ = ("record_ref", "referrer") - RECORD_REF_FIELD_NUMBER: _ClassVar[int] - REFERRER_FIELD_NUMBER: _ClassVar[int] - record_ref: _record_pb2.RecordRef - referrer: _record_pb2.RecordReferrer - def __init__(self, record_ref: _Optional[_Union[_record_pb2.RecordRef, _Mapping]] = ..., referrer: _Optional[_Union[_record_pb2.RecordReferrer, _Mapping]] = ...) -> None: ... - -class PushReferrerResponse(_message.Message): - __slots__ = ("success", "error_message") - SUCCESS_FIELD_NUMBER: _ClassVar[int] - ERROR_MESSAGE_FIELD_NUMBER: _ClassVar[int] - success: bool - error_message: str - def __init__(self, success: bool = ..., error_message: _Optional[str] = ...) -> None: ... - -class PullReferrerRequest(_message.Message): - __slots__ = ("record_ref", "referrer_type") - RECORD_REF_FIELD_NUMBER: _ClassVar[int] - REFERRER_TYPE_FIELD_NUMBER: _ClassVar[int] - record_ref: _record_pb2.RecordRef - referrer_type: str - def __init__(self, record_ref: _Optional[_Union[_record_pb2.RecordRef, _Mapping]] = ..., referrer_type: _Optional[str] = ...) -> None: ... - -class PullReferrerResponse(_message.Message): - __slots__ = ("referrer",) - REFERRER_FIELD_NUMBER: _ClassVar[int] - referrer: _record_pb2.RecordReferrer - def __init__(self, referrer: _Optional[_Union[_record_pb2.RecordReferrer, _Mapping]] = ...) -> None: ... +from agntcy.dir.core.v1 import record_pb2 as _record_pb2 +from google.protobuf import empty_pb2 as _empty_pb2 +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class PushReferrerRequest(_message.Message): + __slots__ = ("record_ref", "referrer") + RECORD_REF_FIELD_NUMBER: _ClassVar[int] + REFERRER_FIELD_NUMBER: _ClassVar[int] + record_ref: _record_pb2.RecordRef + referrer: _record_pb2.RecordReferrer + def __init__(self, record_ref: _Optional[_Union[_record_pb2.RecordRef, _Mapping]] = ..., referrer: _Optional[_Union[_record_pb2.RecordReferrer, _Mapping]] = ...) -> None: ... + +class PushReferrerResponse(_message.Message): + __slots__ = ("success", "error_message") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + ERROR_MESSAGE_FIELD_NUMBER: _ClassVar[int] + success: bool + error_message: str + def __init__(self, success: bool = ..., error_message: _Optional[str] = ...) -> None: ... + +class PullReferrerRequest(_message.Message): + __slots__ = ("record_ref", "referrer_type") + RECORD_REF_FIELD_NUMBER: _ClassVar[int] + REFERRER_TYPE_FIELD_NUMBER: _ClassVar[int] + record_ref: _record_pb2.RecordRef + referrer_type: str + def __init__(self, record_ref: _Optional[_Union[_record_pb2.RecordRef, _Mapping]] = ..., referrer_type: _Optional[str] = ...) -> None: ... + +class PullReferrerResponse(_message.Message): + __slots__ = ("referrer",) + REFERRER_FIELD_NUMBER: _ClassVar[int] + referrer: _record_pb2.RecordReferrer + def __init__(self, referrer: _Optional[_Union[_record_pb2.RecordReferrer, _Mapping]] = ...) -> None: ... diff --git a/sdk/dir-py/agntcy/dir/store/v1/store_service_pb2_grpc.py b/sdk/dir-py/agntcy/dir/store/v1/store_service_pb2_grpc.py index c5586ddd9..4eb04db0a 100644 --- a/sdk/dir-py/agntcy/dir/store/v1/store_service_pb2_grpc.py +++ b/sdk/dir-py/agntcy/dir/store/v1/store_service_pb2_grpc.py @@ -1,345 +1,345 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from agntcy.dir.core.v1 import record_pb2 as agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2 -from agntcy.dir.store.v1 import store_service_pb2 as agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class StoreServiceStub(object): - """Defines an interface for content-addressable storage - service for objects. - - Max object size: 4MB (to fully fit in a single request) - Max metadata size: 100KB - - Store service can be implemented by various storage backends, - such as local file system, OCI registry, etc. - - Middleware should be used to control who can perform these RPCs. - Policies for the middleware can be handled via separate service. - - Each operation is performed sequentially, meaning that - for the N-th request, N-th response will be returned. - If an error occurs, the stream will be cancelled. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.Push = channel.stream_stream( - '/agntcy.dir.store.v1.StoreService/Push', - request_serializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.Record.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.FromString, - _registered_method=True) - self.Pull = channel.stream_stream( - '/agntcy.dir.store.v1.StoreService/Pull', - request_serializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.Record.FromString, - _registered_method=True) - self.Lookup = channel.stream_stream( - '/agntcy.dir.store.v1.StoreService/Lookup', - request_serializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordMeta.FromString, - _registered_method=True) - self.Delete = channel.stream_unary( - '/agntcy.dir.store.v1.StoreService/Delete', - request_serializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - _registered_method=True) - self.PushReferrer = channel.stream_stream( - '/agntcy.dir.store.v1.StoreService/PushReferrer', - request_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PushReferrerRequest.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PushReferrerResponse.FromString, - _registered_method=True) - self.PullReferrer = channel.stream_stream( - '/agntcy.dir.store.v1.StoreService/PullReferrer', - request_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PullReferrerRequest.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PullReferrerResponse.FromString, - _registered_method=True) - - -class StoreServiceServicer(object): - """Defines an interface for content-addressable storage - service for objects. - - Max object size: 4MB (to fully fit in a single request) - Max metadata size: 100KB - - Store service can be implemented by various storage backends, - such as local file system, OCI registry, etc. - - Middleware should be used to control who can perform these RPCs. - Policies for the middleware can be handled via separate service. - - Each operation is performed sequentially, meaning that - for the N-th request, N-th response will be returned. - If an error occurs, the stream will be cancelled. - """ - - def Push(self, request_iterator, context): - """Push performs write operation for given records. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def Pull(self, request_iterator, context): - """Pull performs read operation for given records. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def Lookup(self, request_iterator, context): - """Lookup resolves basic metadata for the records. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def Delete(self, request_iterator, context): - """Remove performs delete operation for the records. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def PushReferrer(self, request_iterator, context): - """PushReferrer performs write operation for record referrers. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def PullReferrer(self, request_iterator, context): - """PullReferrer performs read operation for record referrers. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_StoreServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - 'Push': grpc.stream_stream_rpc_method_handler( - servicer.Push, - request_deserializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.Record.FromString, - response_serializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.SerializeToString, - ), - 'Pull': grpc.stream_stream_rpc_method_handler( - servicer.Pull, - request_deserializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.FromString, - response_serializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.Record.SerializeToString, - ), - 'Lookup': grpc.stream_stream_rpc_method_handler( - servicer.Lookup, - request_deserializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.FromString, - response_serializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordMeta.SerializeToString, - ), - 'Delete': grpc.stream_unary_rpc_method_handler( - servicer.Delete, - request_deserializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - 'PushReferrer': grpc.stream_stream_rpc_method_handler( - servicer.PushReferrer, - request_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PushReferrerRequest.FromString, - response_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PushReferrerResponse.SerializeToString, - ), - 'PullReferrer': grpc.stream_stream_rpc_method_handler( - servicer.PullReferrer, - request_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PullReferrerRequest.FromString, - response_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PullReferrerResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'agntcy.dir.store.v1.StoreService', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - server.add_registered_method_handlers('agntcy.dir.store.v1.StoreService', rpc_method_handlers) - - - # This class is part of an EXPERIMENTAL API. -class StoreService(object): - """Defines an interface for content-addressable storage - service for objects. - - Max object size: 4MB (to fully fit in a single request) - Max metadata size: 100KB - - Store service can be implemented by various storage backends, - such as local file system, OCI registry, etc. - - Middleware should be used to control who can perform these RPCs. - Policies for the middleware can be handled via separate service. - - Each operation is performed sequentially, meaning that - for the N-th request, N-th response will be returned. - If an error occurs, the stream will be cancelled. - """ - - @staticmethod - def Push(request_iterator, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.stream_stream( - request_iterator, - target, - '/agntcy.dir.store.v1.StoreService/Push', - agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.Record.SerializeToString, - agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def Pull(request_iterator, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.stream_stream( - request_iterator, - target, - '/agntcy.dir.store.v1.StoreService/Pull', - agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.SerializeToString, - agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.Record.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def Lookup(request_iterator, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.stream_stream( - request_iterator, - target, - '/agntcy.dir.store.v1.StoreService/Lookup', - agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.SerializeToString, - agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordMeta.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def Delete(request_iterator, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.stream_unary( - request_iterator, - target, - '/agntcy.dir.store.v1.StoreService/Delete', - agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def PushReferrer(request_iterator, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.stream_stream( - request_iterator, - target, - '/agntcy.dir.store.v1.StoreService/PushReferrer', - agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PushReferrerRequest.SerializeToString, - agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PushReferrerResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def PullReferrer(request_iterator, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.stream_stream( - request_iterator, - target, - '/agntcy.dir.store.v1.StoreService/PullReferrer', - agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PullReferrerRequest.SerializeToString, - agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PullReferrerResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from agntcy.dir.core.v1 import record_pb2 as agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2 +from agntcy.dir.store.v1 import store_service_pb2 as agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2 +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 + + +class StoreServiceStub(object): + """Defines an interface for content-addressable storage + service for objects. + + Max object size: 4MB (to fully fit in a single request) + Max metadata size: 100KB + + Store service can be implemented by various storage backends, + such as local file system, OCI registry, etc. + + Middleware should be used to control who can perform these RPCs. + Policies for the middleware can be handled via separate service. + + Each operation is performed sequentially, meaning that + for the N-th request, N-th response will be returned. + If an error occurs, the stream will be cancelled. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.Push = channel.stream_stream( + '/agntcy.dir.store.v1.StoreService/Push', + request_serializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.Record.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.FromString, + _registered_method=True) + self.Pull = channel.stream_stream( + '/agntcy.dir.store.v1.StoreService/Pull', + request_serializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.Record.FromString, + _registered_method=True) + self.Lookup = channel.stream_stream( + '/agntcy.dir.store.v1.StoreService/Lookup', + request_serializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordMeta.FromString, + _registered_method=True) + self.Delete = channel.stream_unary( + '/agntcy.dir.store.v1.StoreService/Delete', + request_serializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + _registered_method=True) + self.PushReferrer = channel.stream_stream( + '/agntcy.dir.store.v1.StoreService/PushReferrer', + request_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PushReferrerRequest.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PushReferrerResponse.FromString, + _registered_method=True) + self.PullReferrer = channel.stream_stream( + '/agntcy.dir.store.v1.StoreService/PullReferrer', + request_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PullReferrerRequest.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PullReferrerResponse.FromString, + _registered_method=True) + + +class StoreServiceServicer(object): + """Defines an interface for content-addressable storage + service for objects. + + Max object size: 4MB (to fully fit in a single request) + Max metadata size: 100KB + + Store service can be implemented by various storage backends, + such as local file system, OCI registry, etc. + + Middleware should be used to control who can perform these RPCs. + Policies for the middleware can be handled via separate service. + + Each operation is performed sequentially, meaning that + for the N-th request, N-th response will be returned. + If an error occurs, the stream will be cancelled. + """ + + def Push(self, request_iterator, context): + """Push performs write operation for given records. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Pull(self, request_iterator, context): + """Pull performs read operation for given records. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Lookup(self, request_iterator, context): + """Lookup resolves basic metadata for the records. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Delete(self, request_iterator, context): + """Remove performs delete operation for the records. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PushReferrer(self, request_iterator, context): + """PushReferrer performs write operation for record referrers. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PullReferrer(self, request_iterator, context): + """PullReferrer performs read operation for record referrers. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_StoreServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'Push': grpc.stream_stream_rpc_method_handler( + servicer.Push, + request_deserializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.Record.FromString, + response_serializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.SerializeToString, + ), + 'Pull': grpc.stream_stream_rpc_method_handler( + servicer.Pull, + request_deserializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.FromString, + response_serializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.Record.SerializeToString, + ), + 'Lookup': grpc.stream_stream_rpc_method_handler( + servicer.Lookup, + request_deserializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.FromString, + response_serializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordMeta.SerializeToString, + ), + 'Delete': grpc.stream_unary_rpc_method_handler( + servicer.Delete, + request_deserializer=agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + 'PushReferrer': grpc.stream_stream_rpc_method_handler( + servicer.PushReferrer, + request_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PushReferrerRequest.FromString, + response_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PushReferrerResponse.SerializeToString, + ), + 'PullReferrer': grpc.stream_stream_rpc_method_handler( + servicer.PullReferrer, + request_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PullReferrerRequest.FromString, + response_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PullReferrerResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'agntcy.dir.store.v1.StoreService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('agntcy.dir.store.v1.StoreService', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class StoreService(object): + """Defines an interface for content-addressable storage + service for objects. + + Max object size: 4MB (to fully fit in a single request) + Max metadata size: 100KB + + Store service can be implemented by various storage backends, + such as local file system, OCI registry, etc. + + Middleware should be used to control who can perform these RPCs. + Policies for the middleware can be handled via separate service. + + Each operation is performed sequentially, meaning that + for the N-th request, N-th response will be returned. + If an error occurs, the stream will be cancelled. + """ + + @staticmethod + def Push(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_stream( + request_iterator, + target, + '/agntcy.dir.store.v1.StoreService/Push', + agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.Record.SerializeToString, + agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Pull(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_stream( + request_iterator, + target, + '/agntcy.dir.store.v1.StoreService/Pull', + agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.SerializeToString, + agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.Record.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Lookup(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_stream( + request_iterator, + target, + '/agntcy.dir.store.v1.StoreService/Lookup', + agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.SerializeToString, + agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordMeta.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def Delete(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_unary( + request_iterator, + target, + '/agntcy.dir.store.v1.StoreService/Delete', + agntcy_dot_dir_dot_core_dot_v1_dot_record__pb2.RecordRef.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def PushReferrer(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_stream( + request_iterator, + target, + '/agntcy.dir.store.v1.StoreService/PushReferrer', + agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PushReferrerRequest.SerializeToString, + agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PushReferrerResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def PullReferrer(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_stream( + request_iterator, + target, + '/agntcy.dir.store.v1.StoreService/PullReferrer', + agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PullReferrerRequest.SerializeToString, + agntcy_dot_dir_dot_store_dot_v1_dot_store__service__pb2.PullReferrerResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/sdk/dir-py/agntcy/dir/store/v1/sync_service_pb2.py b/sdk/dir-py/agntcy/dir/store/v1/sync_service_pb2.py index 0e936eb11..9b12d5624 100644 --- a/sdk/dir-py/agntcy/dir/store/v1/sync_service_pb2.py +++ b/sdk/dir-py/agntcy/dir/store/v1/sync_service_pb2.py @@ -1,61 +1,61 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: agntcy/dir/store/v1/sync_service.proto -# Protobuf Python Version: 6.32.1 -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, - 6, - 32, - 1, - '', - 'agntcy/dir/store/v1/sync_service.proto' -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n&agntcy/dir/store/v1/sync_service.proto\x12\x13\x61gntcy.dir.store.v1\"Y\n\x11\x43reateSyncRequest\x12\x30\n\x14remote_directory_url\x18\x01 \x01(\tR\x12remoteDirectoryUrl\x12\x12\n\x04\x63ids\x18\x02 \x03(\tR\x04\x63ids\"-\n\x12\x43reateSyncResponse\x12\x17\n\x07sync_id\x18\x01 \x01(\tR\x06syncId\"_\n\x10ListSyncsRequest\x12\x19\n\x05limit\x18\x02 \x01(\rH\x00R\x05limit\x88\x01\x01\x12\x1b\n\x06offset\x18\x03 \x01(\rH\x01R\x06offset\x88\x01\x01\x42\x08\n\x06_limitB\t\n\x07_offset\"\x93\x01\n\rListSyncsItem\x12\x17\n\x07sync_id\x18\x01 \x01(\tR\x06syncId\x12\x37\n\x06status\x18\x02 \x01(\x0e\x32\x1f.agntcy.dir.store.v1.SyncStatusR\x06status\x12\x30\n\x14remote_directory_url\x18\x03 \x01(\tR\x12remoteDirectoryUrl\")\n\x0eGetSyncRequest\x12\x17\n\x07sync_id\x18\x01 \x01(\tR\x06syncId\"\xe2\x01\n\x0fGetSyncResponse\x12\x17\n\x07sync_id\x18\x01 \x01(\tR\x06syncId\x12\x37\n\x06status\x18\x02 \x01(\x0e\x32\x1f.agntcy.dir.store.v1.SyncStatusR\x06status\x12\x30\n\x14remote_directory_url\x18\x03 \x01(\tR\x12remoteDirectoryUrl\x12!\n\x0c\x63reated_time\x18\x04 \x01(\tR\x0b\x63reatedTime\x12(\n\x10last_update_time\x18\x05 \x01(\tR\x0elastUpdateTime\",\n\x11\x44\x65leteSyncRequest\x12\x17\n\x07sync_id\x18\x01 \x01(\tR\x06syncId\"\x14\n\x12\x44\x65leteSyncResponse\"Q\n!RequestRegistryCredentialsRequest\x12,\n\x12requesting_node_id\x18\x01 \x01(\tR\x10requestingNodeId\"\xee\x01\n\"RequestRegistryCredentialsResponse\x12\x18\n\x07success\x18\x01 \x01(\x08R\x07success\x12#\n\rerror_message\x18\x02 \x01(\tR\x0c\x65rrorMessage\x12.\n\x13remote_registry_url\x18\x03 \x01(\tR\x11remoteRegistryUrl\x12J\n\nbasic_auth\x18\x04 \x01(\x0b\x32).agntcy.dir.store.v1.BasicAuthCredentialsH\x00R\tbasicAuthB\r\n\x0b\x63redentials\"N\n\x14\x42\x61sicAuthCredentials\x12\x1a\n\x08username\x18\x01 \x01(\tR\x08username\x12\x1a\n\x08password\x18\x02 \x01(\tR\x08password*\xb0\x01\n\nSyncStatus\x12\x1b\n\x17SYNC_STATUS_UNSPECIFIED\x10\x00\x12\x17\n\x13SYNC_STATUS_PENDING\x10\x01\x12\x1b\n\x17SYNC_STATUS_IN_PROGRESS\x10\x02\x12\x16\n\x12SYNC_STATUS_FAILED\x10\x03\x12\x1e\n\x1aSYNC_STATUS_DELETE_PENDING\x10\x04\x12\x17\n\x13SYNC_STATUS_DELETED\x10\x05\x32\x8b\x04\n\x0bSyncService\x12]\n\nCreateSync\x12&.agntcy.dir.store.v1.CreateSyncRequest\x1a\'.agntcy.dir.store.v1.CreateSyncResponse\x12X\n\tListSyncs\x12%.agntcy.dir.store.v1.ListSyncsRequest\x1a\".agntcy.dir.store.v1.ListSyncsItem0\x01\x12T\n\x07GetSync\x12#.agntcy.dir.store.v1.GetSyncRequest\x1a$.agntcy.dir.store.v1.GetSyncResponse\x12]\n\nDeleteSync\x12&.agntcy.dir.store.v1.DeleteSyncRequest\x1a\'.agntcy.dir.store.v1.DeleteSyncResponse\x12\x8d\x01\n\x1aRequestRegistryCredentials\x12\x36.agntcy.dir.store.v1.RequestRegistryCredentialsRequest\x1a\x37.agntcy.dir.store.v1.RequestRegistryCredentialsResponseB\xbe\x01\n\x17\x63om.agntcy.dir.store.v1B\x10SyncServiceProtoP\x01Z\"github.com/agntcy/dir/api/store/v1\xa2\x02\x03\x41\x44S\xaa\x02\x13\x41gntcy.Dir.Store.V1\xca\x02\x13\x41gntcy\\Dir\\Store\\V1\xe2\x02\x1f\x41gntcy\\Dir\\Store\\V1\\GPBMetadata\xea\x02\x16\x41gntcy::Dir::Store::V1b\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.store.v1.sync_service_pb2', _globals) -if not _descriptor._USE_C_DESCRIPTORS: - _globals['DESCRIPTOR']._loaded_options = None - _globals['DESCRIPTOR']._serialized_options = b'\n\027com.agntcy.dir.store.v1B\020SyncServiceProtoP\001Z\"github.com/agntcy/dir/api/store/v1\242\002\003ADS\252\002\023Agntcy.Dir.Store.V1\312\002\023Agntcy\\Dir\\Store\\V1\342\002\037Agntcy\\Dir\\Store\\V1\\GPBMetadata\352\002\026Agntcy::Dir::Store::V1' - _globals['_SYNCSTATUS']._serialized_start=1193 - _globals['_SYNCSTATUS']._serialized_end=1369 - _globals['_CREATESYNCREQUEST']._serialized_start=63 - _globals['_CREATESYNCREQUEST']._serialized_end=152 - _globals['_CREATESYNCRESPONSE']._serialized_start=154 - _globals['_CREATESYNCRESPONSE']._serialized_end=199 - _globals['_LISTSYNCSREQUEST']._serialized_start=201 - _globals['_LISTSYNCSREQUEST']._serialized_end=296 - _globals['_LISTSYNCSITEM']._serialized_start=299 - _globals['_LISTSYNCSITEM']._serialized_end=446 - _globals['_GETSYNCREQUEST']._serialized_start=448 - _globals['_GETSYNCREQUEST']._serialized_end=489 - _globals['_GETSYNCRESPONSE']._serialized_start=492 - _globals['_GETSYNCRESPONSE']._serialized_end=718 - _globals['_DELETESYNCREQUEST']._serialized_start=720 - _globals['_DELETESYNCREQUEST']._serialized_end=764 - _globals['_DELETESYNCRESPONSE']._serialized_start=766 - _globals['_DELETESYNCRESPONSE']._serialized_end=786 - _globals['_REQUESTREGISTRYCREDENTIALSREQUEST']._serialized_start=788 - _globals['_REQUESTREGISTRYCREDENTIALSREQUEST']._serialized_end=869 - _globals['_REQUESTREGISTRYCREDENTIALSRESPONSE']._serialized_start=872 - _globals['_REQUESTREGISTRYCREDENTIALSRESPONSE']._serialized_end=1110 - _globals['_BASICAUTHCREDENTIALS']._serialized_start=1112 - _globals['_BASICAUTHCREDENTIALS']._serialized_end=1190 - _globals['_SYNCSERVICE']._serialized_start=1372 - _globals['_SYNCSERVICE']._serialized_end=1895 -# @@protoc_insertion_point(module_scope) +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: agntcy/dir/store/v1/sync_service.proto +# Protobuf Python Version: 6.32.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 6, + 32, + 1, + '', + 'agntcy/dir/store/v1/sync_service.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n&agntcy/dir/store/v1/sync_service.proto\x12\x13\x61gntcy.dir.store.v1\"Y\n\x11\x43reateSyncRequest\x12\x30\n\x14remote_directory_url\x18\x01 \x01(\tR\x12remoteDirectoryUrl\x12\x12\n\x04\x63ids\x18\x02 \x03(\tR\x04\x63ids\"-\n\x12\x43reateSyncResponse\x12\x17\n\x07sync_id\x18\x01 \x01(\tR\x06syncId\"_\n\x10ListSyncsRequest\x12\x19\n\x05limit\x18\x02 \x01(\rH\x00R\x05limit\x88\x01\x01\x12\x1b\n\x06offset\x18\x03 \x01(\rH\x01R\x06offset\x88\x01\x01\x42\x08\n\x06_limitB\t\n\x07_offset\"\x93\x01\n\rListSyncsItem\x12\x17\n\x07sync_id\x18\x01 \x01(\tR\x06syncId\x12\x37\n\x06status\x18\x02 \x01(\x0e\x32\x1f.agntcy.dir.store.v1.SyncStatusR\x06status\x12\x30\n\x14remote_directory_url\x18\x03 \x01(\tR\x12remoteDirectoryUrl\")\n\x0eGetSyncRequest\x12\x17\n\x07sync_id\x18\x01 \x01(\tR\x06syncId\"\xe2\x01\n\x0fGetSyncResponse\x12\x17\n\x07sync_id\x18\x01 \x01(\tR\x06syncId\x12\x37\n\x06status\x18\x02 \x01(\x0e\x32\x1f.agntcy.dir.store.v1.SyncStatusR\x06status\x12\x30\n\x14remote_directory_url\x18\x03 \x01(\tR\x12remoteDirectoryUrl\x12!\n\x0c\x63reated_time\x18\x04 \x01(\tR\x0b\x63reatedTime\x12(\n\x10last_update_time\x18\x05 \x01(\tR\x0elastUpdateTime\",\n\x11\x44\x65leteSyncRequest\x12\x17\n\x07sync_id\x18\x01 \x01(\tR\x06syncId\"\x14\n\x12\x44\x65leteSyncResponse\"Q\n!RequestRegistryCredentialsRequest\x12,\n\x12requesting_node_id\x18\x01 \x01(\tR\x10requestingNodeId\"\xee\x01\n\"RequestRegistryCredentialsResponse\x12\x18\n\x07success\x18\x01 \x01(\x08R\x07success\x12#\n\rerror_message\x18\x02 \x01(\tR\x0c\x65rrorMessage\x12.\n\x13remote_registry_url\x18\x03 \x01(\tR\x11remoteRegistryUrl\x12J\n\nbasic_auth\x18\x04 \x01(\x0b\x32).agntcy.dir.store.v1.BasicAuthCredentialsH\x00R\tbasicAuthB\r\n\x0b\x63redentials\"N\n\x14\x42\x61sicAuthCredentials\x12\x1a\n\x08username\x18\x01 \x01(\tR\x08username\x12\x1a\n\x08password\x18\x02 \x01(\tR\x08password*\xb0\x01\n\nSyncStatus\x12\x1b\n\x17SYNC_STATUS_UNSPECIFIED\x10\x00\x12\x17\n\x13SYNC_STATUS_PENDING\x10\x01\x12\x1b\n\x17SYNC_STATUS_IN_PROGRESS\x10\x02\x12\x16\n\x12SYNC_STATUS_FAILED\x10\x03\x12\x1e\n\x1aSYNC_STATUS_DELETE_PENDING\x10\x04\x12\x17\n\x13SYNC_STATUS_DELETED\x10\x05\x32\x8b\x04\n\x0bSyncService\x12]\n\nCreateSync\x12&.agntcy.dir.store.v1.CreateSyncRequest\x1a\'.agntcy.dir.store.v1.CreateSyncResponse\x12X\n\tListSyncs\x12%.agntcy.dir.store.v1.ListSyncsRequest\x1a\".agntcy.dir.store.v1.ListSyncsItem0\x01\x12T\n\x07GetSync\x12#.agntcy.dir.store.v1.GetSyncRequest\x1a$.agntcy.dir.store.v1.GetSyncResponse\x12]\n\nDeleteSync\x12&.agntcy.dir.store.v1.DeleteSyncRequest\x1a\'.agntcy.dir.store.v1.DeleteSyncResponse\x12\x8d\x01\n\x1aRequestRegistryCredentials\x12\x36.agntcy.dir.store.v1.RequestRegistryCredentialsRequest\x1a\x37.agntcy.dir.store.v1.RequestRegistryCredentialsResponseB\xbe\x01\n\x17\x63om.agntcy.dir.store.v1B\x10SyncServiceProtoP\x01Z\"github.com/agntcy/dir/api/store/v1\xa2\x02\x03\x41\x44S\xaa\x02\x13\x41gntcy.Dir.Store.V1\xca\x02\x13\x41gntcy\\Dir\\Store\\V1\xe2\x02\x1f\x41gntcy\\Dir\\Store\\V1\\GPBMetadata\xea\x02\x16\x41gntcy::Dir::Store::V1b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'agntcy.dir.store.v1.sync_service_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\027com.agntcy.dir.store.v1B\020SyncServiceProtoP\001Z\"github.com/agntcy/dir/api/store/v1\242\002\003ADS\252\002\023Agntcy.Dir.Store.V1\312\002\023Agntcy\\Dir\\Store\\V1\342\002\037Agntcy\\Dir\\Store\\V1\\GPBMetadata\352\002\026Agntcy::Dir::Store::V1' + _globals['_SYNCSTATUS']._serialized_start=1193 + _globals['_SYNCSTATUS']._serialized_end=1369 + _globals['_CREATESYNCREQUEST']._serialized_start=63 + _globals['_CREATESYNCREQUEST']._serialized_end=152 + _globals['_CREATESYNCRESPONSE']._serialized_start=154 + _globals['_CREATESYNCRESPONSE']._serialized_end=199 + _globals['_LISTSYNCSREQUEST']._serialized_start=201 + _globals['_LISTSYNCSREQUEST']._serialized_end=296 + _globals['_LISTSYNCSITEM']._serialized_start=299 + _globals['_LISTSYNCSITEM']._serialized_end=446 + _globals['_GETSYNCREQUEST']._serialized_start=448 + _globals['_GETSYNCREQUEST']._serialized_end=489 + _globals['_GETSYNCRESPONSE']._serialized_start=492 + _globals['_GETSYNCRESPONSE']._serialized_end=718 + _globals['_DELETESYNCREQUEST']._serialized_start=720 + _globals['_DELETESYNCREQUEST']._serialized_end=764 + _globals['_DELETESYNCRESPONSE']._serialized_start=766 + _globals['_DELETESYNCRESPONSE']._serialized_end=786 + _globals['_REQUESTREGISTRYCREDENTIALSREQUEST']._serialized_start=788 + _globals['_REQUESTREGISTRYCREDENTIALSREQUEST']._serialized_end=869 + _globals['_REQUESTREGISTRYCREDENTIALSRESPONSE']._serialized_start=872 + _globals['_REQUESTREGISTRYCREDENTIALSRESPONSE']._serialized_end=1110 + _globals['_BASICAUTHCREDENTIALS']._serialized_start=1112 + _globals['_BASICAUTHCREDENTIALS']._serialized_end=1190 + _globals['_SYNCSERVICE']._serialized_start=1372 + _globals['_SYNCSERVICE']._serialized_end=1895 +# @@protoc_insertion_point(module_scope) diff --git a/sdk/dir-py/agntcy/dir/store/v1/sync_service_pb2.pyi b/sdk/dir-py/agntcy/dir/store/v1/sync_service_pb2.pyi index 9c838d1a8..2c5c501ab 100644 --- a/sdk/dir-py/agntcy/dir/store/v1/sync_service_pb2.pyi +++ b/sdk/dir-py/agntcy/dir/store/v1/sync_service_pb2.pyi @@ -1,110 +1,110 @@ -from google.protobuf.internal import containers as _containers -from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union - -DESCRIPTOR: _descriptor.FileDescriptor - -class SyncStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): - __slots__ = () - SYNC_STATUS_UNSPECIFIED: _ClassVar[SyncStatus] - SYNC_STATUS_PENDING: _ClassVar[SyncStatus] - SYNC_STATUS_IN_PROGRESS: _ClassVar[SyncStatus] - SYNC_STATUS_FAILED: _ClassVar[SyncStatus] - SYNC_STATUS_DELETE_PENDING: _ClassVar[SyncStatus] - SYNC_STATUS_DELETED: _ClassVar[SyncStatus] -SYNC_STATUS_UNSPECIFIED: SyncStatus -SYNC_STATUS_PENDING: SyncStatus -SYNC_STATUS_IN_PROGRESS: SyncStatus -SYNC_STATUS_FAILED: SyncStatus -SYNC_STATUS_DELETE_PENDING: SyncStatus -SYNC_STATUS_DELETED: SyncStatus - -class CreateSyncRequest(_message.Message): - __slots__ = ("remote_directory_url", "cids") - REMOTE_DIRECTORY_URL_FIELD_NUMBER: _ClassVar[int] - CIDS_FIELD_NUMBER: _ClassVar[int] - remote_directory_url: str - cids: _containers.RepeatedScalarFieldContainer[str] - def __init__(self, remote_directory_url: _Optional[str] = ..., cids: _Optional[_Iterable[str]] = ...) -> None: ... - -class CreateSyncResponse(_message.Message): - __slots__ = ("sync_id",) - SYNC_ID_FIELD_NUMBER: _ClassVar[int] - sync_id: str - def __init__(self, sync_id: _Optional[str] = ...) -> None: ... - -class ListSyncsRequest(_message.Message): - __slots__ = ("limit", "offset") - LIMIT_FIELD_NUMBER: _ClassVar[int] - OFFSET_FIELD_NUMBER: _ClassVar[int] - limit: int - offset: int - def __init__(self, limit: _Optional[int] = ..., offset: _Optional[int] = ...) -> None: ... - -class ListSyncsItem(_message.Message): - __slots__ = ("sync_id", "status", "remote_directory_url") - SYNC_ID_FIELD_NUMBER: _ClassVar[int] - STATUS_FIELD_NUMBER: _ClassVar[int] - REMOTE_DIRECTORY_URL_FIELD_NUMBER: _ClassVar[int] - sync_id: str - status: SyncStatus - remote_directory_url: str - def __init__(self, sync_id: _Optional[str] = ..., status: _Optional[_Union[SyncStatus, str]] = ..., remote_directory_url: _Optional[str] = ...) -> None: ... - -class GetSyncRequest(_message.Message): - __slots__ = ("sync_id",) - SYNC_ID_FIELD_NUMBER: _ClassVar[int] - sync_id: str - def __init__(self, sync_id: _Optional[str] = ...) -> None: ... - -class GetSyncResponse(_message.Message): - __slots__ = ("sync_id", "status", "remote_directory_url", "created_time", "last_update_time") - SYNC_ID_FIELD_NUMBER: _ClassVar[int] - STATUS_FIELD_NUMBER: _ClassVar[int] - REMOTE_DIRECTORY_URL_FIELD_NUMBER: _ClassVar[int] - CREATED_TIME_FIELD_NUMBER: _ClassVar[int] - LAST_UPDATE_TIME_FIELD_NUMBER: _ClassVar[int] - sync_id: str - status: SyncStatus - remote_directory_url: str - created_time: str - last_update_time: str - def __init__(self, sync_id: _Optional[str] = ..., status: _Optional[_Union[SyncStatus, str]] = ..., remote_directory_url: _Optional[str] = ..., created_time: _Optional[str] = ..., last_update_time: _Optional[str] = ...) -> None: ... - -class DeleteSyncRequest(_message.Message): - __slots__ = ("sync_id",) - SYNC_ID_FIELD_NUMBER: _ClassVar[int] - sync_id: str - def __init__(self, sync_id: _Optional[str] = ...) -> None: ... - -class DeleteSyncResponse(_message.Message): - __slots__ = () - def __init__(self) -> None: ... - -class RequestRegistryCredentialsRequest(_message.Message): - __slots__ = ("requesting_node_id",) - REQUESTING_NODE_ID_FIELD_NUMBER: _ClassVar[int] - requesting_node_id: str - def __init__(self, requesting_node_id: _Optional[str] = ...) -> None: ... - -class RequestRegistryCredentialsResponse(_message.Message): - __slots__ = ("success", "error_message", "remote_registry_url", "basic_auth") - SUCCESS_FIELD_NUMBER: _ClassVar[int] - ERROR_MESSAGE_FIELD_NUMBER: _ClassVar[int] - REMOTE_REGISTRY_URL_FIELD_NUMBER: _ClassVar[int] - BASIC_AUTH_FIELD_NUMBER: _ClassVar[int] - success: bool - error_message: str - remote_registry_url: str - basic_auth: BasicAuthCredentials - def __init__(self, success: bool = ..., error_message: _Optional[str] = ..., remote_registry_url: _Optional[str] = ..., basic_auth: _Optional[_Union[BasicAuthCredentials, _Mapping]] = ...) -> None: ... - -class BasicAuthCredentials(_message.Message): - __slots__ = ("username", "password") - USERNAME_FIELD_NUMBER: _ClassVar[int] - PASSWORD_FIELD_NUMBER: _ClassVar[int] - username: str - password: str - def __init__(self, username: _Optional[str] = ..., password: _Optional[str] = ...) -> None: ... +from google.protobuf.internal import containers as _containers +from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class SyncStatus(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + SYNC_STATUS_UNSPECIFIED: _ClassVar[SyncStatus] + SYNC_STATUS_PENDING: _ClassVar[SyncStatus] + SYNC_STATUS_IN_PROGRESS: _ClassVar[SyncStatus] + SYNC_STATUS_FAILED: _ClassVar[SyncStatus] + SYNC_STATUS_DELETE_PENDING: _ClassVar[SyncStatus] + SYNC_STATUS_DELETED: _ClassVar[SyncStatus] +SYNC_STATUS_UNSPECIFIED: SyncStatus +SYNC_STATUS_PENDING: SyncStatus +SYNC_STATUS_IN_PROGRESS: SyncStatus +SYNC_STATUS_FAILED: SyncStatus +SYNC_STATUS_DELETE_PENDING: SyncStatus +SYNC_STATUS_DELETED: SyncStatus + +class CreateSyncRequest(_message.Message): + __slots__ = ("remote_directory_url", "cids") + REMOTE_DIRECTORY_URL_FIELD_NUMBER: _ClassVar[int] + CIDS_FIELD_NUMBER: _ClassVar[int] + remote_directory_url: str + cids: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, remote_directory_url: _Optional[str] = ..., cids: _Optional[_Iterable[str]] = ...) -> None: ... + +class CreateSyncResponse(_message.Message): + __slots__ = ("sync_id",) + SYNC_ID_FIELD_NUMBER: _ClassVar[int] + sync_id: str + def __init__(self, sync_id: _Optional[str] = ...) -> None: ... + +class ListSyncsRequest(_message.Message): + __slots__ = ("limit", "offset") + LIMIT_FIELD_NUMBER: _ClassVar[int] + OFFSET_FIELD_NUMBER: _ClassVar[int] + limit: int + offset: int + def __init__(self, limit: _Optional[int] = ..., offset: _Optional[int] = ...) -> None: ... + +class ListSyncsItem(_message.Message): + __slots__ = ("sync_id", "status", "remote_directory_url") + SYNC_ID_FIELD_NUMBER: _ClassVar[int] + STATUS_FIELD_NUMBER: _ClassVar[int] + REMOTE_DIRECTORY_URL_FIELD_NUMBER: _ClassVar[int] + sync_id: str + status: SyncStatus + remote_directory_url: str + def __init__(self, sync_id: _Optional[str] = ..., status: _Optional[_Union[SyncStatus, str]] = ..., remote_directory_url: _Optional[str] = ...) -> None: ... + +class GetSyncRequest(_message.Message): + __slots__ = ("sync_id",) + SYNC_ID_FIELD_NUMBER: _ClassVar[int] + sync_id: str + def __init__(self, sync_id: _Optional[str] = ...) -> None: ... + +class GetSyncResponse(_message.Message): + __slots__ = ("sync_id", "status", "remote_directory_url", "created_time", "last_update_time") + SYNC_ID_FIELD_NUMBER: _ClassVar[int] + STATUS_FIELD_NUMBER: _ClassVar[int] + REMOTE_DIRECTORY_URL_FIELD_NUMBER: _ClassVar[int] + CREATED_TIME_FIELD_NUMBER: _ClassVar[int] + LAST_UPDATE_TIME_FIELD_NUMBER: _ClassVar[int] + sync_id: str + status: SyncStatus + remote_directory_url: str + created_time: str + last_update_time: str + def __init__(self, sync_id: _Optional[str] = ..., status: _Optional[_Union[SyncStatus, str]] = ..., remote_directory_url: _Optional[str] = ..., created_time: _Optional[str] = ..., last_update_time: _Optional[str] = ...) -> None: ... + +class DeleteSyncRequest(_message.Message): + __slots__ = ("sync_id",) + SYNC_ID_FIELD_NUMBER: _ClassVar[int] + sync_id: str + def __init__(self, sync_id: _Optional[str] = ...) -> None: ... + +class DeleteSyncResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class RequestRegistryCredentialsRequest(_message.Message): + __slots__ = ("requesting_node_id",) + REQUESTING_NODE_ID_FIELD_NUMBER: _ClassVar[int] + requesting_node_id: str + def __init__(self, requesting_node_id: _Optional[str] = ...) -> None: ... + +class RequestRegistryCredentialsResponse(_message.Message): + __slots__ = ("success", "error_message", "remote_registry_url", "basic_auth") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + ERROR_MESSAGE_FIELD_NUMBER: _ClassVar[int] + REMOTE_REGISTRY_URL_FIELD_NUMBER: _ClassVar[int] + BASIC_AUTH_FIELD_NUMBER: _ClassVar[int] + success: bool + error_message: str + remote_registry_url: str + basic_auth: BasicAuthCredentials + def __init__(self, success: bool = ..., error_message: _Optional[str] = ..., remote_registry_url: _Optional[str] = ..., basic_auth: _Optional[_Union[BasicAuthCredentials, _Mapping]] = ...) -> None: ... + +class BasicAuthCredentials(_message.Message): + __slots__ = ("username", "password") + USERNAME_FIELD_NUMBER: _ClassVar[int] + PASSWORD_FIELD_NUMBER: _ClassVar[int] + username: str + password: str + def __init__(self, username: _Optional[str] = ..., password: _Optional[str] = ...) -> None: ... diff --git a/sdk/dir-py/agntcy/dir/store/v1/sync_service_pb2_grpc.py b/sdk/dir-py/agntcy/dir/store/v1/sync_service_pb2_grpc.py index 7e4e911f3..603f8e156 100644 --- a/sdk/dir-py/agntcy/dir/store/v1/sync_service_pb2_grpc.py +++ b/sdk/dir-py/agntcy/dir/store/v1/sync_service_pb2_grpc.py @@ -1,277 +1,277 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from agntcy.dir.store.v1 import sync_service_pb2 as agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2 - - -class SyncServiceStub(object): - """SyncService provides functionality for synchronizing objects between Directory nodes. - - This service enables one-way synchronization from a remote Directory node to the local node, - allowing distributed Directory instances to share and replicate objects. The service supports - both on-demand synchronization and tracking of sync operations through their lifecycle. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateSync = channel.unary_unary( - '/agntcy.dir.store.v1.SyncService/CreateSync', - request_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.CreateSyncRequest.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.CreateSyncResponse.FromString, - _registered_method=True) - self.ListSyncs = channel.unary_stream( - '/agntcy.dir.store.v1.SyncService/ListSyncs', - request_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.ListSyncsRequest.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.ListSyncsItem.FromString, - _registered_method=True) - self.GetSync = channel.unary_unary( - '/agntcy.dir.store.v1.SyncService/GetSync', - request_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.GetSyncRequest.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.GetSyncResponse.FromString, - _registered_method=True) - self.DeleteSync = channel.unary_unary( - '/agntcy.dir.store.v1.SyncService/DeleteSync', - request_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.DeleteSyncRequest.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.DeleteSyncResponse.FromString, - _registered_method=True) - self.RequestRegistryCredentials = channel.unary_unary( - '/agntcy.dir.store.v1.SyncService/RequestRegistryCredentials', - request_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.RequestRegistryCredentialsRequest.SerializeToString, - response_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.RequestRegistryCredentialsResponse.FromString, - _registered_method=True) - - -class SyncServiceServicer(object): - """SyncService provides functionality for synchronizing objects between Directory nodes. - - This service enables one-way synchronization from a remote Directory node to the local node, - allowing distributed Directory instances to share and replicate objects. The service supports - both on-demand synchronization and tracking of sync operations through their lifecycle. - """ - - def CreateSync(self, request, context): - """CreateSync initiates a new synchronization operation from a remote Directory node. - - The operation is non-blocking and returns immediately with a sync ID that can be used - to track progress and manage the sync operation. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def ListSyncs(self, request, context): - """ListSyncs returns a stream of all sync operations known to the system. - - This includes active, completed, and failed synchronizations. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def GetSync(self, request, context): - """GetSync retrieves detailed status information for a specific synchronization. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def DeleteSync(self, request, context): - """DeleteSync removes a synchronization operation from the system. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def RequestRegistryCredentials(self, request, context): - """RequestRegistryCredentials requests registry credentials between two Directory nodes. - - This RPC allows a requesting node to authenticate with this node and obtain - temporary registry credentials for secure Zot-based synchronization. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_SyncServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - 'CreateSync': grpc.unary_unary_rpc_method_handler( - servicer.CreateSync, - request_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.CreateSyncRequest.FromString, - response_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.CreateSyncResponse.SerializeToString, - ), - 'ListSyncs': grpc.unary_stream_rpc_method_handler( - servicer.ListSyncs, - request_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.ListSyncsRequest.FromString, - response_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.ListSyncsItem.SerializeToString, - ), - 'GetSync': grpc.unary_unary_rpc_method_handler( - servicer.GetSync, - request_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.GetSyncRequest.FromString, - response_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.GetSyncResponse.SerializeToString, - ), - 'DeleteSync': grpc.unary_unary_rpc_method_handler( - servicer.DeleteSync, - request_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.DeleteSyncRequest.FromString, - response_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.DeleteSyncResponse.SerializeToString, - ), - 'RequestRegistryCredentials': grpc.unary_unary_rpc_method_handler( - servicer.RequestRegistryCredentials, - request_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.RequestRegistryCredentialsRequest.FromString, - response_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.RequestRegistryCredentialsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'agntcy.dir.store.v1.SyncService', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - server.add_registered_method_handlers('agntcy.dir.store.v1.SyncService', rpc_method_handlers) - - - # This class is part of an EXPERIMENTAL API. -class SyncService(object): - """SyncService provides functionality for synchronizing objects between Directory nodes. - - This service enables one-way synchronization from a remote Directory node to the local node, - allowing distributed Directory instances to share and replicate objects. The service supports - both on-demand synchronization and tracking of sync operations through their lifecycle. - """ - - @staticmethod - def CreateSync(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/agntcy.dir.store.v1.SyncService/CreateSync', - agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.CreateSyncRequest.SerializeToString, - agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.CreateSyncResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def ListSyncs(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_stream( - request, - target, - '/agntcy.dir.store.v1.SyncService/ListSyncs', - agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.ListSyncsRequest.SerializeToString, - agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.ListSyncsItem.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def GetSync(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/agntcy.dir.store.v1.SyncService/GetSync', - agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.GetSyncRequest.SerializeToString, - agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.GetSyncResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def DeleteSync(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/agntcy.dir.store.v1.SyncService/DeleteSync', - agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.DeleteSyncRequest.SerializeToString, - agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.DeleteSyncResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) - - @staticmethod - def RequestRegistryCredentials(request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.unary_unary( - request, - target, - '/agntcy.dir.store.v1.SyncService/RequestRegistryCredentials', - agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.RequestRegistryCredentialsRequest.SerializeToString, - agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.RequestRegistryCredentialsResponse.FromString, - options, - channel_credentials, - insecure, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - _registered_method=True) +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from agntcy.dir.store.v1 import sync_service_pb2 as agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2 + + +class SyncServiceStub(object): + """SyncService provides functionality for synchronizing objects between Directory nodes. + + This service enables one-way synchronization from a remote Directory node to the local node, + allowing distributed Directory instances to share and replicate objects. The service supports + both on-demand synchronization and tracking of sync operations through their lifecycle. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateSync = channel.unary_unary( + '/agntcy.dir.store.v1.SyncService/CreateSync', + request_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.CreateSyncRequest.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.CreateSyncResponse.FromString, + _registered_method=True) + self.ListSyncs = channel.unary_stream( + '/agntcy.dir.store.v1.SyncService/ListSyncs', + request_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.ListSyncsRequest.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.ListSyncsItem.FromString, + _registered_method=True) + self.GetSync = channel.unary_unary( + '/agntcy.dir.store.v1.SyncService/GetSync', + request_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.GetSyncRequest.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.GetSyncResponse.FromString, + _registered_method=True) + self.DeleteSync = channel.unary_unary( + '/agntcy.dir.store.v1.SyncService/DeleteSync', + request_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.DeleteSyncRequest.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.DeleteSyncResponse.FromString, + _registered_method=True) + self.RequestRegistryCredentials = channel.unary_unary( + '/agntcy.dir.store.v1.SyncService/RequestRegistryCredentials', + request_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.RequestRegistryCredentialsRequest.SerializeToString, + response_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.RequestRegistryCredentialsResponse.FromString, + _registered_method=True) + + +class SyncServiceServicer(object): + """SyncService provides functionality for synchronizing objects between Directory nodes. + + This service enables one-way synchronization from a remote Directory node to the local node, + allowing distributed Directory instances to share and replicate objects. The service supports + both on-demand synchronization and tracking of sync operations through their lifecycle. + """ + + def CreateSync(self, request, context): + """CreateSync initiates a new synchronization operation from a remote Directory node. + + The operation is non-blocking and returns immediately with a sync ID that can be used + to track progress and manage the sync operation. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListSyncs(self, request, context): + """ListSyncs returns a stream of all sync operations known to the system. + + This includes active, completed, and failed synchronizations. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetSync(self, request, context): + """GetSync retrieves detailed status information for a specific synchronization. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteSync(self, request, context): + """DeleteSync removes a synchronization operation from the system. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RequestRegistryCredentials(self, request, context): + """RequestRegistryCredentials requests registry credentials between two Directory nodes. + + This RPC allows a requesting node to authenticate with this node and obtain + temporary registry credentials for secure Zot-based synchronization. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_SyncServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateSync': grpc.unary_unary_rpc_method_handler( + servicer.CreateSync, + request_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.CreateSyncRequest.FromString, + response_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.CreateSyncResponse.SerializeToString, + ), + 'ListSyncs': grpc.unary_stream_rpc_method_handler( + servicer.ListSyncs, + request_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.ListSyncsRequest.FromString, + response_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.ListSyncsItem.SerializeToString, + ), + 'GetSync': grpc.unary_unary_rpc_method_handler( + servicer.GetSync, + request_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.GetSyncRequest.FromString, + response_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.GetSyncResponse.SerializeToString, + ), + 'DeleteSync': grpc.unary_unary_rpc_method_handler( + servicer.DeleteSync, + request_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.DeleteSyncRequest.FromString, + response_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.DeleteSyncResponse.SerializeToString, + ), + 'RequestRegistryCredentials': grpc.unary_unary_rpc_method_handler( + servicer.RequestRegistryCredentials, + request_deserializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.RequestRegistryCredentialsRequest.FromString, + response_serializer=agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.RequestRegistryCredentialsResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'agntcy.dir.store.v1.SyncService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('agntcy.dir.store.v1.SyncService', rpc_method_handlers) + + + # This class is part of an EXPERIMENTAL API. +class SyncService(object): + """SyncService provides functionality for synchronizing objects between Directory nodes. + + This service enables one-way synchronization from a remote Directory node to the local node, + allowing distributed Directory instances to share and replicate objects. The service supports + both on-demand synchronization and tracking of sync operations through their lifecycle. + """ + + @staticmethod + def CreateSync(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/agntcy.dir.store.v1.SyncService/CreateSync', + agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.CreateSyncRequest.SerializeToString, + agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.CreateSyncResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ListSyncs(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream( + request, + target, + '/agntcy.dir.store.v1.SyncService/ListSyncs', + agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.ListSyncsRequest.SerializeToString, + agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.ListSyncsItem.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def GetSync(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/agntcy.dir.store.v1.SyncService/GetSync', + agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.GetSyncRequest.SerializeToString, + agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.GetSyncResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def DeleteSync(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/agntcy.dir.store.v1.SyncService/DeleteSync', + agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.DeleteSyncRequest.SerializeToString, + agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.DeleteSyncResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def RequestRegistryCredentials(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/agntcy.dir.store.v1.SyncService/RequestRegistryCredentials', + agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.RequestRegistryCredentialsRequest.SerializeToString, + agntcy_dot_dir_dot_store_dot_v1_dot_sync__service__pb2.RequestRegistryCredentialsResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/sdk/dir-py/agntcy/dir_sdk/client/__init__.py b/sdk/dir-py/agntcy/dir_sdk/client/__init__.py index ea2520f6a..ffe175c82 100644 --- a/sdk/dir-py/agntcy/dir_sdk/client/__init__.py +++ b/sdk/dir-py/agntcy/dir_sdk/client/__init__.py @@ -1,5 +1,5 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -from agntcy.dir_sdk.client.client import Client as Client -from agntcy.dir_sdk.client.config import Config as Config +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +from agntcy.dir_sdk.client.client import Client as Client +from agntcy.dir_sdk.client.config import Config as Config diff --git a/sdk/dir-py/agntcy/dir_sdk/client/client.py b/sdk/dir-py/agntcy/dir_sdk/client/client.py index 40949e704..a23d0d727 100644 --- a/sdk/dir-py/agntcy/dir_sdk/client/client.py +++ b/sdk/dir-py/agntcy/dir_sdk/client/client.py @@ -1,1264 +1,1264 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -"""Client module for the AGNTCY Directory service. - -This module provides a high-level Python client for interacting with the AGNTCY -Directory services including routing, search, store, and signing operations. -""" - -import builtins -import logging -import os -from typing import List -import subprocess -import tempfile -from collections.abc import Sequence - -import grpc -from cryptography.hazmat.primitives import serialization -from spiffe import WorkloadApiClient, X509Source - -from agntcy.dir_sdk.client.config import Config -from agntcy.dir_sdk.models import ( - core_v1, - events_v1, - routing_v1, - search_v1, - sign_v1, - store_v1, -) - -logger = logging.getLogger("client") - - -class JWTAuthInterceptor(grpc.UnaryUnaryClientInterceptor, grpc.UnaryStreamClientInterceptor, - grpc.StreamUnaryClientInterceptor, grpc.StreamStreamClientInterceptor): - """gRPC interceptor that adds JWT-SVID authentication to requests.""" - - def __init__(self, socket_path: str, audience: str) -> None: - """Initialize the JWT auth interceptor. - - Args: - socket_path: Path to the SPIFFE Workload API socket - audience: JWT audience claim for token validation - - """ - self.socket_path = socket_path - self.audience = audience - self._workload_client = WorkloadApiClient(socket_path=socket_path) - - def _get_jwt_token(self) -> str: - """Fetch a JWT-SVID from the SPIRE Workload API. - - Returns: - JWT token string - - Raises: - RuntimeError: If unable to fetch JWT-SVID - - """ - try: - # Fetch JWT-SVID with the configured audience - jwt_svid = self._workload_client.fetch_jwt_svid(audience=[self.audience]) - if jwt_svid and jwt_svid.token: - return jwt_svid.token - msg = "Failed to fetch JWT-SVID: empty token" - raise RuntimeError(msg) - except Exception as e: - msg = f"Failed to fetch JWT-SVID: {e}" - raise RuntimeError(msg) from e - - def _add_jwt_metadata(self, client_call_details): - """Add JWT token to request metadata.""" - token = self._get_jwt_token() - metadata = [] - if client_call_details.metadata is not None: - metadata = list(client_call_details.metadata) - metadata.append(("authorization", f"Bearer {token}")) - - return grpc._interceptor._ClientCallDetails( - method=client_call_details.method, - timeout=client_call_details.timeout, - metadata=metadata, - credentials=client_call_details.credentials, - wait_for_ready=client_call_details.wait_for_ready, - compression=client_call_details.compression, - ) - - def intercept_unary_unary(self, continuation, client_call_details, request): - """Intercept unary-unary RPC calls.""" - new_details = self._add_jwt_metadata(client_call_details) - return continuation(new_details, request) - - def intercept_unary_stream(self, continuation, client_call_details, request): - """Intercept unary-stream RPC calls.""" - new_details = self._add_jwt_metadata(client_call_details) - return continuation(new_details, request) - - def intercept_stream_unary(self, continuation, client_call_details, request_iterator): - """Intercept stream-unary RPC calls.""" - new_details = self._add_jwt_metadata(client_call_details) - return continuation(new_details, request_iterator) - - def intercept_stream_stream(self, continuation, client_call_details, request_iterator): - """Intercept stream-stream RPC calls.""" - new_details = self._add_jwt_metadata(client_call_details) - return continuation(new_details, request_iterator) - - -class Client: - """High-level client for interacting with AGNTCY Directory services. - - This client provides a unified interface for operations across Dir API. - It handles gRPC communication and provides convenient methods for common operations. - - Example: - >>> config = Config.load_from_env() - >>> client = Client(config) - >>> # Use client for operations... - - """ - - def __init__(self, config: Config | None = None) -> None: - """Initialize the client with the given configuration. - - Args: - config: Optional client configuration. If None, loads from environment - variables using Config.load_from_env(). - - Raises: - grpc.RpcError: If unable to establish connection to the server - ValueError: If configuration is invalid - - """ - # Load config if unset - if config is None: - config = Config.load_from_env() - self.config = config - - # Create gRPC channel - channel = self.__create_grpc_channel() - - # Initialize service clients - self.store_client = store_v1.StoreServiceStub(channel) - self.routing_client = routing_v1.RoutingServiceStub(channel) - self.publication_client = routing_v1.PublicationServiceStub(channel) - self.search_client = search_v1.SearchServiceStub(channel) - self.sign_client = sign_v1.SignServiceStub(channel) - self.sync_client = store_v1.SyncServiceStub(channel) - self.event_client = events_v1.EventServiceStub(channel) - - def __create_grpc_channel(self) -> grpc.Channel: - # Handle different authentication modes - if self.config.auth_mode == "": - return grpc.insecure_channel(self.config.server_address) - elif self.config.auth_mode == "jwt": - return self.__create_jwt_channel() - elif self.config.auth_mode == "x509": - return self.__create_x509_channel() - elif self.config.auth_mode == "tls": - return self.__create_tls_channel() - else: - msg = f"Unsupported auth mode: {self.config.auth_mode}" - raise ValueError(msg) - - def __create_x509_channel(self) -> grpc.Channel: - """Create a secure gRPC channel using SPIFFE X.509.""" - if self.config.spiffe_socket_path == "": - msg = "SPIFFE socket path is required for X.509 authentication" - raise ValueError(msg) - - # Create secure gRPC channel using SPIFFE X.509 - workload_client = WorkloadApiClient(socket_path=self.config.spiffe_socket_path) - x509_src = X509Source( - workload_api_client=workload_client, - socket_path=self.config.spiffe_socket_path, - timeout_in_seconds=60, - ) - - root_ca = b"" - for b in x509_src.bundles: - for a in b.x509_authorities: - root_ca += a.public_bytes(encoding=serialization.Encoding.PEM) - - private_key = x509_src.svid.private_key.private_bytes( - encoding=serialization.Encoding.PEM, - format=serialization.PrivateFormat.PKCS8, - encryption_algorithm=serialization.NoEncryption(), - ) - - public_leaf = x509_src.svid.leaf.public_bytes( - encoding=serialization.Encoding.PEM - ) - - credentials = grpc.ssl_channel_credentials( - root_certificates=root_ca, - private_key=private_key, - certificate_chain=public_leaf, - ) - - channel = grpc.secure_channel( - target=self.config.server_address, - credentials=credentials, - ) - - return channel - - def __create_jwt_channel(self) -> grpc.Channel: - """Create a gRPC channel with JWT authentication.""" - if self.config.spiffe_socket_path == "": - msg = "SPIFFE socket path is required for JWT authentication" - raise ValueError(msg) - - if self.config.jwt_audience == "": - msg = "JWT audience is required for JWT authentication" - raise ValueError(msg) - - # Create X509Source to get the SPIFFE bundle for TLS verification - # In JWT mode, the server presents its X.509-SVID via TLS for transport security - # The X509Source will handle fetching the bundle from the Workload API - workload_client = WorkloadApiClient(socket_path=self.config.spiffe_socket_path) - x509_source = X509Source( - workload_api_client=workload_client, - socket_path=self.config.spiffe_socket_path, - timeout_in_seconds=60, - ) - - # Extract the CA certificates from all bundles - root_ca = b"" - for bundle in x509_source.bundles: - for authority in bundle.x509_authorities: - root_ca += authority.public_bytes(encoding=serialization.Encoding.PEM) - - if not root_ca: - msg = "Failed to fetch X.509 bundle from SPIRE: no bundles returned" - raise RuntimeError(msg) - - # Create JWT interceptor - jwt_interceptor = JWTAuthInterceptor( - socket_path=self.config.spiffe_socket_path, - audience=self.config.jwt_audience - ) - - # Create secure channel with JWT interceptor and TLS using SPIFFE bundle - # For JWT mode: Server presents X.509-SVID via TLS, clients authenticate with JWT-SVID - credentials = grpc.ssl_channel_credentials(root_certificates=root_ca) - channel = grpc.secure_channel( - target=self.config.server_address, - credentials=credentials, - ) - channel = grpc.intercept_channel(channel, jwt_interceptor) - - # Close the X509Source since we only needed it to get the bundle - x509_source.close() - - return channel - - def __create_tls_channel(self) -> grpc.Channel: - if not self.config.tls_ca_file: - msg = "TLS CA file is required for TLS authentication" - raise ValueError(msg) - if not self.config.tls_cert_file: - msg = "TLS certificate file is required for TLS authentication" - raise ValueError(msg) - if not self.config.tls_key_file: - msg = "TLS key file is required for TLS authentication" - raise ValueError(msg) - - try: - with open(self.config.tls_ca_file, "rb") as f: - root_ca = f.read() - with open(self.config.tls_cert_file, "rb") as f: - cert_chain = f.read() - with open(self.config.tls_key_file, "rb") as f: - private_key = f.read() - except OSError as e: - msg = f"Failed to read TLS files: {e}" - raise RuntimeError(msg) from e - - credentials = grpc.ssl_channel_credentials( - root_certificates=root_ca, - private_key=private_key, - certificate_chain=cert_chain, - ) - - channel = grpc.secure_channel( - target=self.config.server_address, - credentials=credentials, - ) - - return channel - - def publish( - self, - req: routing_v1.PublishRequest, - metadata: Sequence[tuple[str, str]] | None = None, - ) -> None: - """Publish objects to the Routing API matching the specified criteria. - - Makes the specified objects available for discovery and retrieval by other - clients in the network. The objects must already exist in the store before - they can be published. - - Args: - req: Publish request containing the query for the objects to publish - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the object is not found or cannot be published - - Example: - >>> ref = routing_v1.RecordRef(cid="QmExample123") - >>> req = routing_v1.PublishRequest(record_refs=[ref]) - >>> client.publish(req) - - """ - try: - self.routing_client.Publish(req, metadata=metadata) - except grpc.RpcError as e: - logger.exception("gRPC error during publish: %s", e) - raise - except Exception as e: - logger.exception("Unexpected error during publish: %s", e) - msg = f"Failed to publish object: {e}" - raise RuntimeError(msg) from e - - def list( - self, - req: routing_v1.ListRequest, - metadata: Sequence[tuple[str, str]] | None = None, - ) -> list[routing_v1.ListResponse]: - """List objects from the Routing API matching the specified criteria. - - Returns a list of objects that match the filtering and - query criteria specified in the request. - - Args: - req: List request specifying filtering criteria, pagination, etc. - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Returns: - List[routing_v1.ListResponse]: List of items matching the criteria - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the list operation fails - - Example: - >>> req = routing_v1.ListRequest(limit=10) - >>> responses = client.list(req) - >>> for response in responses: - ... print(f"Found object: {response.cid}") - - """ - results: list[routing_v1.ListResponse] = [] - - try: - stream = self.routing_client.List(req, metadata=metadata) - results.extend(stream) - except grpc.RpcError as e: - logger.exception("gRPC error during list: %s", e) - raise - except Exception as e: - logger.exception("Error receiving objects: %s", e) - msg = f"Failed to list objects: {e}" - raise RuntimeError(msg) from e - - return results - - def search_cids( - self, - req: search_v1.SearchCIDsRequest, - metadata: Sequence[tuple[str, str]] | None = None, - ) -> builtins.list[search_v1.SearchCIDsResponse]: - """Search for record CIDs matching the specified queries. - - Performs a search across the storage using the provided search queries - and returns a list of matching CIDs. This is efficient for lookups - where only the CIDs are needed. - - Args: - req: Search request containing queries, filters, and search options - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Returns: - List[search_v1.SearchCIDsResponse]: List of CIDs matching the queries - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the search operation fails - - Example: - >>> req = search_v1.SearchCIDsRequest(queries=[query], limit=10) - >>> responses = client.search_cids(req) - >>> for response in responses: - ... print(f"Found CID: {response.record_cid}") - - """ - results: list[search_v1.SearchCIDsResponse] = [] - - try: - stream = self.search_client.SearchCIDs(req, metadata=metadata) - results.extend(stream) - except grpc.RpcError as e: - logger.exception("gRPC error during search: %s", e) - raise - except Exception as e: - logger.exception("Error receiving search results: %s", e) - msg = f"Failed to search CIDs: {e}" - raise RuntimeError(msg) from e - - return results - - def search_records( - self, - req: search_v1.SearchRecordsRequest, - metadata: Sequence[tuple[str, str]] | None = None, - ) -> builtins.list[search_v1.SearchRecordsResponse]: - """Search for full records matching the specified queries. - - Performs a search across the storage using the provided search queries - and returns a list of full records with all metadata. - - Args: - req: Search request containing queries, filters, and search options - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Returns: - List[search_v1.SearchRecordsResponse]: List of records matching the queries - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the search operation fails - - Example: - >>> req = search_v1.SearchRecordsRequest(queries=[query], limit=10) - >>> responses = client.search_records(req) - >>> for response in responses: - ... print(f"Found: {response.record.name}") - - """ - results: list[search_v1.SearchRecordsResponse] = [] - - try: - stream = self.search_client.SearchRecords(req, metadata=metadata) - results.extend(stream) - except grpc.RpcError as e: - logger.exception("gRPC error during search: %s", e) - raise - except Exception as e: - logger.exception("Error receiving search results: %s", e) - msg = f"Failed to search records: {e}" - raise RuntimeError(msg) from e - - return results - - def unpublish( - self, - req: routing_v1.UnpublishRequest, - metadata: Sequence[tuple[str, str]] | None = None, - ) -> None: - """Unpublish objects from the Routing API matching the specified criteria. - - Removes the specified objects from the public network, making them no - longer discoverable by other clients. The objects remain in the local - store but are not available for network discovery. - - Args: - req: Unpublish request containing the query for the objects to unpublish - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the objects cannot be unpublished - - Example: - >>> ref = routing_v1.RecordRef(cid="QmExample123") - >>> req = routing_v1.UnpublishRequest(record_refs=[ref]) - >>> client.unpublish(req) - - """ - try: - self.routing_client.Unpublish(req, metadata=metadata) - except grpc.RpcError as e: - logger.exception("gRPC error during unpublish: %s", e) - raise - except Exception as e: - logger.exception("Unexpected error during unpublish: %s", e) - msg = f"Failed to unpublish object: {e}" - raise RuntimeError(msg) from e - - def push( - self, - records: builtins.list[core_v1.Record], - metadata: Sequence[tuple[str, str]] | None = None, - ) -> builtins.list[core_v1.RecordRef]: - """Push records to the Store API. - - Uploads one or more records to the content store, making them available - for retrieval and reference. Each record is assigned a unique content - identifier (CID) based on its content hash. - - Args: - records: List of Record objects to push to the store - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Returns: - List[core_v1.RecordRef]: List of objects containing the CIDs of the pushed records - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the push operation fails - - Example: - >>> records = [create_record("example")] - >>> refs = client.push(records) - >>> print(f"Pushed with CID: {refs[0].cid}") - - """ - results: list[core_v1.RecordRef] = [] - - try: - response = self.store_client.Push(iter(records), metadata=metadata) - results.extend(response) - except grpc.RpcError as e: - logger.exception("gRPC error during push: %s", e) - raise - except Exception as e: - logger.exception("Unexpected error during push: %s", e) - msg = f"Failed to push object: {e}" - raise RuntimeError(msg) from e - - return results - - def push_referrer( - self, - req: builtins.list[store_v1.PushReferrerRequest], - metadata: Sequence[tuple[str, str]] | None = None, - ) -> builtins.list[store_v1.PushReferrerResponse]: - """Push records with referrer metadata to the Store API. - - Uploads records along with optional artifacts and referrer information. - This is useful for pushing complex objects that include additional - metadata or associated artifacts. - - Args: - req: List of PushReferrerRequest objects containing records and - optional artifacts - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Returns: - List[store_v1.PushReferrerResponse]: List of objects containing the details of pushed artifacts - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the push operation fails - - Example: - >>> requests = [store_v1.PushReferrerRequest(record=record)] - >>> responses = client.push_referrer(requests) - - """ - results: list[store_v1.PushReferrerResponse] = [] - - try: - response = self.store_client.PushReferrer(iter(req), metadata=metadata) - results.extend(response) - except grpc.RpcError as e: - logger.exception("gRPC error during push_referrer: %s", e) - raise - except Exception as e: - logger.exception("Unexpected error during push_referrer: %s", e) - msg = f"Failed to push object: {e}" - raise RuntimeError(msg) from e - - return results - - def pull( - self, - refs: builtins.list[core_v1.RecordRef], - metadata: Sequence[tuple[str, str]] | None = None, - ) -> builtins.list[core_v1.Record]: - """Pull records from the Store API by their references. - - Retrieves one or more records from the content store using their - content identifiers (CIDs). - - Args: - refs: List of RecordRef objects containing the CIDs to retrieve - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Returns: - List[core_v1.Record]: List of record objects retrieved from the store - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the pull operation fails - - Example: - >>> refs = [core_v1.RecordRef(cid="QmExample123")] - >>> records = client.pull(refs) - >>> for record in records: - ... print(f"Retrieved record: {record}") - - """ - results: list[core_v1.Record] = [] - - try: - response = self.store_client.Pull(iter(refs), metadata=metadata) - results.extend(response) - except grpc.RpcError as e: - logger.exception("gRPC error during pull: %s", e) - raise - except Exception as e: - logger.exception("Unexpected error during pull: %s", e) - msg = f"Failed to pull object: {e}" - raise RuntimeError(msg) from e - - return results - - def pull_referrer( - self, - req: builtins.list[store_v1.PullReferrerRequest], - metadata: Sequence[tuple[str, str]] | None = None, - ) -> builtins.list[store_v1.PullReferrerResponse]: - """Pull records with referrer metadata from the Store API. - - Retrieves records along with their associated artifacts and referrer - information. This provides access to complex objects that include - additional metadata or associated artifacts. - - Args: - req: List of PullReferrerRequest objects containing records and - optional artifacts for pull operations - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Returns: - List[store_v1.PullReferrerResponse]: List of objects containing the retrieved records - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the pull operation fails - - Example: - >>> requests = [store_v1.PullReferrerRequest(ref=ref)] - >>> responses = client.pull_referrer(requests) - >>> for response in responses: - ... print(f"Retrieved: {response}") - - """ - results: list[store_v1.PullReferrerResponse] = [] - - try: - response = self.store_client.PullReferrer(iter(req), metadata=metadata) - results.extend(response) - except grpc.RpcError as e: - logger.exception("gRPC error during pull_referrer: %s", e) - raise - except Exception as e: - logger.exception("Unexpected error during pull_referrer: %s", e) - msg = f"Failed to pull referrer object: {e}" - raise RuntimeError(msg) from e - - return results - - def lookup( - self, - refs: builtins.list[core_v1.RecordRef], - metadata: Sequence[tuple[str, str]] | None = None, - ) -> builtins.list[core_v1.RecordMeta]: - """Look up metadata for records in the Store API. - - Retrieves metadata information for one or more records without - downloading the full record content. This is useful for checking - if records exist and getting basic information about them. - - Args: - refs: List of RecordRef objects containing the CIDs to look up - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Returns: - List[core_v1.RecordMeta]: List of objects containing metadata for the records - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the lookup operation fails - - Example: - >>> refs = [core_v1.RecordRef(cid="QmExample123")] - >>> metadatas = client.lookup(refs) - >>> for meta in metadatas: - ... print(f"Record size: {meta.size}") - - """ - results: list[core_v1.RecordMeta] = [] - - try: - response = self.store_client.Lookup(iter(refs), metadata=metadata) - results.extend(response) - except grpc.RpcError as e: - logger.exception("gRPC error during lookup: %s", e) - raise - except Exception as e: - logger.exception("Unexpected error during lookup: %s", e) - msg = f"Failed to lookup object: {e}" - raise RuntimeError(msg) from e - - return results - - def delete( - self, - refs: builtins.list[core_v1.RecordRef], - metadata: Sequence[tuple[str, str]] | None = None, - ) -> None: - """Delete records from the Store API. - - Permanently removes one or more records from the content store using - their content identifiers (CIDs). This operation cannot be undone. - - Args: - refs: List of RecordRef objects containing the CIDs to delete - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the delete operation fails - - Example: - >>> refs = [core_v1.RecordRef(cid="QmExample123")] - >>> client.delete(refs) - - """ - try: - self.store_client.Delete(iter(refs), metadata=metadata) - except grpc.RpcError as e: - logger.exception("gRPC error during delete: %s", e) - raise - except Exception as e: - logger.exception("Unexpected error during delete: %s", e) - msg = f"Failed to delete object: {e}" - raise RuntimeError(msg) from e - - def create_sync( - self, - req: store_v1.CreateSyncRequest, - metadata: Sequence[tuple[str, str]] | None = None, - ) -> store_v1.CreateSyncResponse: - """Create a new synchronization configuration. - - Creates a new sync configuration that defines how data should be - synchronized between different Directory servers. This allows for - automated data replication and consistency across multiple locations. - - Args: - req: CreateSyncRequest containing the sync configuration details - including source, target, and synchronization parameters - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Returns: - store_v1.CreateSyncResponse: Response containing the created sync details - including the sync ID and configuration - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the sync creation fails - - Example: - >>> req = store_v1.CreateSyncRequest() - >>> response = client.create_sync(req) - >>> print(f"Created sync with ID: {response.sync_id}") - - """ - try: - response = self.sync_client.CreateSync(req, metadata=metadata) - except grpc.RpcError as e: - logger.exception("gRPC error during create_sync: %s", e) - raise - except Exception as e: - logger.exception("Unexpected error during create_sync: %s", e) - msg = f"Failed to create sync: {e}" - raise RuntimeError(msg) from e - - return response - - def list_syncs( - self, - req: store_v1.ListSyncsRequest, - metadata: Sequence[tuple[str, str]] | None = None, - ) -> builtins.list[store_v1.ListSyncsItem]: - """List existing synchronization configurations. - - Retrieves a list of all sync configurations that have been created, - with optional filtering and pagination support. This allows you to - monitor and manage multiple synchronization processes. - - Args: - req: ListSyncsRequest containing filtering criteria, pagination options, - and other query parameters - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Returns: - list[store_v1.ListSyncsItem]: List of sync configuration items with - their details including ID, name, status, - and configuration parameters - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the list operation fails - - Example: - >>> req = store_v1.ListSyncsRequest(limit=10) - >>> syncs = client.list_syncs(req) - >>> for sync in syncs: - ... print(f"Sync: {sync}") - - """ - results: list[store_v1.ListSyncsItem] = [] - - try: - stream = self.sync_client.ListSyncs(req, metadata=metadata) - results.extend(stream) - except grpc.RpcError as e: - logger.exception("gRPC error during list_syncs: %s", e) - raise - except Exception as e: - logger.exception("Unexpected error during list_syncs: %s", e) - msg = f"Failed to list syncs: {e}" - raise RuntimeError(msg) from e - - return results - - def get_sync( - self, - req: store_v1.GetSyncRequest, - metadata: Sequence[tuple[str, str]] | None = None, - ) -> store_v1.GetSyncResponse: - """Retrieve detailed information about a specific synchronization configuration. - - Gets comprehensive details about a specific sync configuration including - its current status, configuration parameters, performance metrics, - and any recent errors or warnings. - - Args: - req: GetSyncRequest containing the sync ID or identifier to retrieve - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Returns: - store_v1.GetSyncResponse: Detailed information about the sync configuration - including status, metrics, configuration, and logs - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the get operation fails - - Example: - >>> req = store_v1.GetSyncRequest(sync_id="sync-123") - >>> response = client.get_sync(req) - >>> print(f"Sync status: {response.status}") - >>> print(f"Last update: {response.last_update_time}") - - """ - try: - response = self.sync_client.GetSync(req, metadata=metadata) - except grpc.RpcError as e: - logger.exception("gRPC error during get_sync: %s", e) - raise - except Exception as e: - logger.exception("Unexpected error during get_sync: %s", e) - msg = f"Failed to get sync: {e}" - raise RuntimeError(msg) from e - - return response - - def delete_sync( - self, - req: store_v1.DeleteSyncRequest, - metadata: Sequence[tuple[str, str]] | None = None, - ) -> None: - """Delete a synchronization configuration. - - Permanently removes a sync configuration and stops any ongoing - synchronization processes. This operation cannot be undone and - will halt all data synchronization for the specified configuration. - - Args: - req: DeleteSyncRequest containing the sync ID or identifier to delete - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the delete operation fails - - Example: - >>> req = store_v1.DeleteSyncRequest(sync_id="sync-123") - >>> client.delete_sync(req) - >>> print(f"Sync deleted") - - """ - try: - self.sync_client.DeleteSync(req, metadata=metadata) - except grpc.RpcError as e: - logger.exception("gRPC error during delete_sync: %s", e) - raise - except Exception as e: - logger.exception("Unexpected error during delete_sync: %s", e) - msg = f"Failed to delete sync: {e}" - raise RuntimeError(msg) from e - - def listen( - self, - req: events_v1.ListenRequest, - metadata: Sequence[tuple[str, str]] | None = None, - ) -> grpc.UnaryStreamMultiCallable: - """ - Listen establishes a streaming connection to receive events. - Events are only delivered while the stream is active. - On disconnect, missed events are not recoverable. - - Args: - req: ListenRequest specifies filters for event subscription. - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Returns: - A grpc stream which can read and closed. - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the listen operation fails - """ - - try: - stream = self.event_client.Listen(req, metadata=metadata) - except grpc.RpcError as e: - if e.code() == grpc.StatusCode.CANCELLED: - logger.exception("gRPC listen stream was canceled: %s", e) - raise - else: - logger.exception("gRPC error during listen: %s", e) - raise - except Exception as e: - logger.exception("Unexpected error during listen: %s", e) - msg = f"Failed to listen: {e}" - raise RuntimeError(msg) from e - - return stream - - def create_publication( - self, - req: routing_v1.PublishRequest, - metadata: Sequence[tuple[str, str]] | None = None, - ) -> routing_v1.CreatePublicationResponse: - """ - Create publication creates a new publication request that will be processed by the PublicationWorker. - The publication request can specify either a query, a list of specific CIDs, - or all records to be announced to the DHT. - - Args: - req: PublishRequest specifies the record references and queries for publication. - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Returns: - CreatePublicationResponse returns the result of creating a publication request. - This includes the publication ID and any relevant metadata. - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the publication operation create fails - """ - try: - response = self.publication_client.CreatePublication(req, metadata=metadata) - except grpc.RpcError as e: - logger.exception("gRPC error during create_publication: %s", e) - raise - except Exception as e: - logger.exception("Unexpected error during create_publication: %s", e) - msg = f"Failed to create publication: {e}" - raise RuntimeError(msg) from e - - return response - - def get_publication( - self, - req: routing_v1.GetPublicationRequest, - metadata: Sequence[tuple[str, str]] | None = None, - ) -> routing_v1.GetPublicationResponse: - """ - GetPublication retrieves details of a specific publication request by its identifier. - This includes the current status and any associated metadata. - - Args: - req: GetPublicationRequest specifies which publication to retrieve by its identifier. - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Returns: - GetPublicationResponse contains the full details of a specific publication request. - Includes status, progress information, and any error details if applicable. - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the publication get operation fails - """ - try: - response = self.publication_client.GetPublication(req, metadata=metadata) - except grpc.RpcError as e: - logger.exception("gRPC error during get_publication: %s", e) - raise - except Exception as e: - logger.exception("Unexpected error during get_publication: %s", e) - msg = f"Failed to get publication: {e}" - raise RuntimeError(msg) from e - - return response - - def list_publication( - self, - req: routing_v1.ListPublicationsRequest, - metadata: Sequence[tuple[str, str]] | None = None, - ) -> builtins.list[routing_v1.ListPublicationsItem]: - """ - ListPublications returns a stream of all publication requests in the system. - This allows monitoring of pending, processing, and completed publication requests. - - Args: - req: ListPublicationsRequest contains optional filters for listing publication requests. - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Returns: - List of ListPublicationsItem represents a single publication request in the list response. - Contains publication details including ID, status, and creation timestamp. - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the publication list operation fails - """ - - results: list[routing_v1.ListPublicationsItem] = [] - - try: - stream = self.publication_client.ListPublications(req, metadata=metadata) - results.extend(stream) - except grpc.RpcError as e: - logger.exception("gRPC error during list_publication: %s", e) - raise - except Exception as e: - logger.exception("Unexpected error during list_publication: %s", e) - msg = f"Failed to list publication: {e}" - raise RuntimeError(msg) from e - - return results - - - def verify( - self, - req: sign_v1.VerifyRequest, - metadata: Sequence[tuple[str, str]] | None = None, - ) -> sign_v1.VerifyResponse: - """Verify a cryptographic signature on a record. - - Validates the cryptographic signature of a previously signed record - to ensure its authenticity and integrity. This operation verifies - that the record has not been tampered with since signing. - - Args: - req: VerifyRequest containing the record reference and verification - parameters - metadata: Optional gRPC metadata headers as sequence of key-value pairs - - Returns: - VerifyResponse containing the verification result and details - - Raises: - grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) - RuntimeError: If the verification operation fails - - Example: - >>> req = sign_v1.VerifyRequest( - ... record_ref=core_v1.RecordRef(cid="QmExample123") - ... ) - >>> response = client.verify(req) - >>> print(f"Signature valid: {response.valid}") - - """ - try: - response = self.sign_client.Verify(req, metadata=metadata) - except grpc.RpcError as e: - logger.exception("gRPC error during verify: %s", e) - raise - except Exception as e: - logger.exception("Unexpected error during verify: %s", e) - msg = f"Failed to verify the object: {e}" - raise RuntimeError(msg) from e - - return response - - def sign( - self, - req: sign_v1.SignRequest, - oidc_client_id: str | None = "sigstore", - ) -> None: - """Sign a record with a cryptographic signature. - - Creates a cryptographic signature for a record using either a private - key or OIDC-based signing. The signing process uses the external dirctl - command-line tool to perform the actual cryptographic operations. - - Args: - req: SignRequest containing the record reference and signing provider - configuration. The provider can specify either key-based signing - (with a private key) or OIDC-based signing - oidc_client_id: OIDC client identifier for OIDC-based signing. - Defaults to "sigstore" - - Raises: - RuntimeError: If the signing operation fails - - Example: - >>> req = sign_v1.SignRequest( - ... record_ref=core_v1.RecordRef(cid="QmExample123"), - ... provider=sign_v1.SignProvider(key=key_config) - ... ) - >>> client.sign(req) - >>> print(f"Signing completed!") - - """ - try: - if len(req.provider.key.private_key) > 0: - self._sign_with_key(req.record_ref, req.provider.key) - else: - self._sign_with_oidc(req.record_ref, req.provider.oidc, oidc_client_id) - except RuntimeError as e: - msg = f"Failed to sign the object: {e}" - raise RuntimeError(msg) from e - except Exception as e: - logger.exception("Signing operation failed: %s", e) - msg = f"Failed to sign the object: {e}" - raise RuntimeError(msg) from e - - def _sign_with_key( - self, - record_ref: core_v1.RecordRef, - key_signer: sign_v1.SignWithKey, - ) -> None: - """Sign a record using a private key. - - This private method handles key-based signing by writing the private key - to a temporary file and executing the dirctl command with the key file. - - Args: - req: SignRequest containing the record reference and key provider - - Raises: - RuntimeError: If any other error occurs during signing - - """ - try: - # Create temporary file for the private key - with tempfile.NamedTemporaryFile(delete=False) as tmp_key_file: - tmp_key_file.write(key_signer.private_key) - tmp_key_file.flush() - - # Set up environment with password - shell_env = os.environ.copy() - shell_env["COSIGN_PASSWORD"] = key_signer.password.decode("utf-8") - - # Build and execute the signing command - command = [ - self.config.dirctl_path, - "sign", - record_ref.cid, - "--key", - tmp_key_file.name, - ] - - if self.config.spiffe_socket_path != "": - command.extend(["--spiffe-socket-path", self.config.spiffe_socket_path]) - - subprocess.run( - command, - check=True, - capture_output=True, - env=shell_env, - timeout=60, # 1 minute timeout - ) - - except OSError as e: - msg = f"Failed to write key file to disk: {e}" - raise RuntimeError(msg) from e - except subprocess.CalledProcessError as e: - msg = f"dirctl signing failed with return code {e.returncode}: {e.stderr.decode('utf-8', errors='ignore')}" - raise RuntimeError(msg) from e - except subprocess.TimeoutExpired as e: - msg = "dirctl signing timed out" - raise RuntimeError(msg) from e - except Exception as e: - msg = f"Unexpected error during key-based signing: {e}" - raise RuntimeError(msg) from e - - def _sign_with_oidc( - self, - record_ref: core_v1.RecordRef, - oidc_signer: sign_v1.SignWithOIDC, - oidc_client_id: str = "sigstore", - ) -> None: - """Sign a record using OIDC-based authentication. - - This private method handles OIDC-based signing by building the appropriate - dirctl command with OIDC parameters and executing it. - - Args: - req: SignRequest containing the record reference and OIDC provider - oidc_client_id: OIDC client identifier for authentication - - Raises: - RuntimeError: If any other error occurs during signing - - """ - try: - shell_env = os.environ.copy() - - # Build base command - command = [self.config.dirctl_path, "sign", record_ref.cid] - - # Add OIDC-specific parameters - if oidc_signer.id_token: - command.extend(["--oidc-token", oidc_signer.id_token]) - if oidc_signer.options.oidc_provider_url: - command.extend( - [ - "--oidc-provider-url", - oidc_signer.options.oidc_provider_url, - ] - ) - if oidc_signer.options.fulcio_url: - command.extend(["--fulcio-url", oidc_signer.options.fulcio_url]) - if oidc_signer.options.rekor_url: - command.extend(["--rekor-url", oidc_signer.options.rekor_url]) - if oidc_signer.options.timestamp_url: - command.extend(["--timestamp-url", oidc_signer.options.timestamp_url]) - - # Add client ID - command.extend(["--oidc-client-id", oidc_client_id]) - - if self.config.spiffe_socket_path != "": - command.extend(["--spiffe-socket-path", self.config.spiffe_socket_path]) - - # Execute the signing command - subprocess.run( - command, - check=True, - capture_output=True, - env=shell_env, - timeout=60, # 1 minute timeout - ) - - except subprocess.CalledProcessError as e: - msg = f"dirctl signing failed with return code {e.returncode}: {e.stderr.decode('utf-8', errors='ignore')}" - raise RuntimeError(msg) from e - except subprocess.TimeoutExpired as e: - msg = "dirctl signing timed out" - raise RuntimeError(msg) from e - except Exception as e: - msg = f"Unexpected error during OIDC signing: {e}" - raise RuntimeError(msg) from e +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +"""Client module for the AGNTCY Directory service. + +This module provides a high-level Python client for interacting with the AGNTCY +Directory services including routing, search, store, and signing operations. +""" + +import builtins +import logging +import os +from typing import List +import subprocess +import tempfile +from collections.abc import Sequence + +import grpc +from cryptography.hazmat.primitives import serialization +from spiffe import WorkloadApiClient, X509Source + +from agntcy.dir_sdk.client.config import Config +from agntcy.dir_sdk.models import ( + core_v1, + events_v1, + routing_v1, + search_v1, + sign_v1, + store_v1, +) + +logger = logging.getLogger("client") + + +class JWTAuthInterceptor(grpc.UnaryUnaryClientInterceptor, grpc.UnaryStreamClientInterceptor, + grpc.StreamUnaryClientInterceptor, grpc.StreamStreamClientInterceptor): + """gRPC interceptor that adds JWT-SVID authentication to requests.""" + + def __init__(self, socket_path: str, audience: str) -> None: + """Initialize the JWT auth interceptor. + + Args: + socket_path: Path to the SPIFFE Workload API socket + audience: JWT audience claim for token validation + + """ + self.socket_path = socket_path + self.audience = audience + self._workload_client = WorkloadApiClient(socket_path=socket_path) + + def _get_jwt_token(self) -> str: + """Fetch a JWT-SVID from the SPIRE Workload API. + + Returns: + JWT token string + + Raises: + RuntimeError: If unable to fetch JWT-SVID + + """ + try: + # Fetch JWT-SVID with the configured audience + jwt_svid = self._workload_client.fetch_jwt_svid(audience=[self.audience]) + if jwt_svid and jwt_svid.token: + return jwt_svid.token + msg = "Failed to fetch JWT-SVID: empty token" + raise RuntimeError(msg) + except Exception as e: + msg = f"Failed to fetch JWT-SVID: {e}" + raise RuntimeError(msg) from e + + def _add_jwt_metadata(self, client_call_details): + """Add JWT token to request metadata.""" + token = self._get_jwt_token() + metadata = [] + if client_call_details.metadata is not None: + metadata = list(client_call_details.metadata) + metadata.append(("authorization", f"Bearer {token}")) + + return grpc._interceptor._ClientCallDetails( + method=client_call_details.method, + timeout=client_call_details.timeout, + metadata=metadata, + credentials=client_call_details.credentials, + wait_for_ready=client_call_details.wait_for_ready, + compression=client_call_details.compression, + ) + + def intercept_unary_unary(self, continuation, client_call_details, request): + """Intercept unary-unary RPC calls.""" + new_details = self._add_jwt_metadata(client_call_details) + return continuation(new_details, request) + + def intercept_unary_stream(self, continuation, client_call_details, request): + """Intercept unary-stream RPC calls.""" + new_details = self._add_jwt_metadata(client_call_details) + return continuation(new_details, request) + + def intercept_stream_unary(self, continuation, client_call_details, request_iterator): + """Intercept stream-unary RPC calls.""" + new_details = self._add_jwt_metadata(client_call_details) + return continuation(new_details, request_iterator) + + def intercept_stream_stream(self, continuation, client_call_details, request_iterator): + """Intercept stream-stream RPC calls.""" + new_details = self._add_jwt_metadata(client_call_details) + return continuation(new_details, request_iterator) + + +class Client: + """High-level client for interacting with AGNTCY Directory services. + + This client provides a unified interface for operations across Dir API. + It handles gRPC communication and provides convenient methods for common operations. + + Example: + >>> config = Config.load_from_env() + >>> client = Client(config) + >>> # Use client for operations... + + """ + + def __init__(self, config: Config | None = None) -> None: + """Initialize the client with the given configuration. + + Args: + config: Optional client configuration. If None, loads from environment + variables using Config.load_from_env(). + + Raises: + grpc.RpcError: If unable to establish connection to the server + ValueError: If configuration is invalid + + """ + # Load config if unset + if config is None: + config = Config.load_from_env() + self.config = config + + # Create gRPC channel + channel = self.__create_grpc_channel() + + # Initialize service clients + self.store_client = store_v1.StoreServiceStub(channel) + self.routing_client = routing_v1.RoutingServiceStub(channel) + self.publication_client = routing_v1.PublicationServiceStub(channel) + self.search_client = search_v1.SearchServiceStub(channel) + self.sign_client = sign_v1.SignServiceStub(channel) + self.sync_client = store_v1.SyncServiceStub(channel) + self.event_client = events_v1.EventServiceStub(channel) + + def __create_grpc_channel(self) -> grpc.Channel: + # Handle different authentication modes + if self.config.auth_mode == "": + return grpc.insecure_channel(self.config.server_address) + elif self.config.auth_mode == "jwt": + return self.__create_jwt_channel() + elif self.config.auth_mode == "x509": + return self.__create_x509_channel() + elif self.config.auth_mode == "tls": + return self.__create_tls_channel() + else: + msg = f"Unsupported auth mode: {self.config.auth_mode}" + raise ValueError(msg) + + def __create_x509_channel(self) -> grpc.Channel: + """Create a secure gRPC channel using SPIFFE X.509.""" + if self.config.spiffe_socket_path == "": + msg = "SPIFFE socket path is required for X.509 authentication" + raise ValueError(msg) + + # Create secure gRPC channel using SPIFFE X.509 + workload_client = WorkloadApiClient(socket_path=self.config.spiffe_socket_path) + x509_src = X509Source( + workload_api_client=workload_client, + socket_path=self.config.spiffe_socket_path, + timeout_in_seconds=60, + ) + + root_ca = b"" + for b in x509_src.bundles: + for a in b.x509_authorities: + root_ca += a.public_bytes(encoding=serialization.Encoding.PEM) + + private_key = x509_src.svid.private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.PKCS8, + encryption_algorithm=serialization.NoEncryption(), + ) + + public_leaf = x509_src.svid.leaf.public_bytes( + encoding=serialization.Encoding.PEM + ) + + credentials = grpc.ssl_channel_credentials( + root_certificates=root_ca, + private_key=private_key, + certificate_chain=public_leaf, + ) + + channel = grpc.secure_channel( + target=self.config.server_address, + credentials=credentials, + ) + + return channel + + def __create_jwt_channel(self) -> grpc.Channel: + """Create a gRPC channel with JWT authentication.""" + if self.config.spiffe_socket_path == "": + msg = "SPIFFE socket path is required for JWT authentication" + raise ValueError(msg) + + if self.config.jwt_audience == "": + msg = "JWT audience is required for JWT authentication" + raise ValueError(msg) + + # Create X509Source to get the SPIFFE bundle for TLS verification + # In JWT mode, the server presents its X.509-SVID via TLS for transport security + # The X509Source will handle fetching the bundle from the Workload API + workload_client = WorkloadApiClient(socket_path=self.config.spiffe_socket_path) + x509_source = X509Source( + workload_api_client=workload_client, + socket_path=self.config.spiffe_socket_path, + timeout_in_seconds=60, + ) + + # Extract the CA certificates from all bundles + root_ca = b"" + for bundle in x509_source.bundles: + for authority in bundle.x509_authorities: + root_ca += authority.public_bytes(encoding=serialization.Encoding.PEM) + + if not root_ca: + msg = "Failed to fetch X.509 bundle from SPIRE: no bundles returned" + raise RuntimeError(msg) + + # Create JWT interceptor + jwt_interceptor = JWTAuthInterceptor( + socket_path=self.config.spiffe_socket_path, + audience=self.config.jwt_audience + ) + + # Create secure channel with JWT interceptor and TLS using SPIFFE bundle + # For JWT mode: Server presents X.509-SVID via TLS, clients authenticate with JWT-SVID + credentials = grpc.ssl_channel_credentials(root_certificates=root_ca) + channel = grpc.secure_channel( + target=self.config.server_address, + credentials=credentials, + ) + channel = grpc.intercept_channel(channel, jwt_interceptor) + + # Close the X509Source since we only needed it to get the bundle + x509_source.close() + + return channel + + def __create_tls_channel(self) -> grpc.Channel: + if not self.config.tls_ca_file: + msg = "TLS CA file is required for TLS authentication" + raise ValueError(msg) + if not self.config.tls_cert_file: + msg = "TLS certificate file is required for TLS authentication" + raise ValueError(msg) + if not self.config.tls_key_file: + msg = "TLS key file is required for TLS authentication" + raise ValueError(msg) + + try: + with open(self.config.tls_ca_file, "rb") as f: + root_ca = f.read() + with open(self.config.tls_cert_file, "rb") as f: + cert_chain = f.read() + with open(self.config.tls_key_file, "rb") as f: + private_key = f.read() + except OSError as e: + msg = f"Failed to read TLS files: {e}" + raise RuntimeError(msg) from e + + credentials = grpc.ssl_channel_credentials( + root_certificates=root_ca, + private_key=private_key, + certificate_chain=cert_chain, + ) + + channel = grpc.secure_channel( + target=self.config.server_address, + credentials=credentials, + ) + + return channel + + def publish( + self, + req: routing_v1.PublishRequest, + metadata: Sequence[tuple[str, str]] | None = None, + ) -> None: + """Publish objects to the Routing API matching the specified criteria. + + Makes the specified objects available for discovery and retrieval by other + clients in the network. The objects must already exist in the store before + they can be published. + + Args: + req: Publish request containing the query for the objects to publish + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the object is not found or cannot be published + + Example: + >>> ref = routing_v1.RecordRef(cid="QmExample123") + >>> req = routing_v1.PublishRequest(record_refs=[ref]) + >>> client.publish(req) + + """ + try: + self.routing_client.Publish(req, metadata=metadata) + except grpc.RpcError as e: + logger.exception("gRPC error during publish: %s", e) + raise + except Exception as e: + logger.exception("Unexpected error during publish: %s", e) + msg = f"Failed to publish object: {e}" + raise RuntimeError(msg) from e + + def list( + self, + req: routing_v1.ListRequest, + metadata: Sequence[tuple[str, str]] | None = None, + ) -> list[routing_v1.ListResponse]: + """List objects from the Routing API matching the specified criteria. + + Returns a list of objects that match the filtering and + query criteria specified in the request. + + Args: + req: List request specifying filtering criteria, pagination, etc. + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Returns: + List[routing_v1.ListResponse]: List of items matching the criteria + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the list operation fails + + Example: + >>> req = routing_v1.ListRequest(limit=10) + >>> responses = client.list(req) + >>> for response in responses: + ... print(f"Found object: {response.cid}") + + """ + results: list[routing_v1.ListResponse] = [] + + try: + stream = self.routing_client.List(req, metadata=metadata) + results.extend(stream) + except grpc.RpcError as e: + logger.exception("gRPC error during list: %s", e) + raise + except Exception as e: + logger.exception("Error receiving objects: %s", e) + msg = f"Failed to list objects: {e}" + raise RuntimeError(msg) from e + + return results + + def search_cids( + self, + req: search_v1.SearchCIDsRequest, + metadata: Sequence[tuple[str, str]] | None = None, + ) -> builtins.list[search_v1.SearchCIDsResponse]: + """Search for record CIDs matching the specified queries. + + Performs a search across the storage using the provided search queries + and returns a list of matching CIDs. This is efficient for lookups + where only the CIDs are needed. + + Args: + req: Search request containing queries, filters, and search options + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Returns: + List[search_v1.SearchCIDsResponse]: List of CIDs matching the queries + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the search operation fails + + Example: + >>> req = search_v1.SearchCIDsRequest(queries=[query], limit=10) + >>> responses = client.search_cids(req) + >>> for response in responses: + ... print(f"Found CID: {response.record_cid}") + + """ + results: list[search_v1.SearchCIDsResponse] = [] + + try: + stream = self.search_client.SearchCIDs(req, metadata=metadata) + results.extend(stream) + except grpc.RpcError as e: + logger.exception("gRPC error during search: %s", e) + raise + except Exception as e: + logger.exception("Error receiving search results: %s", e) + msg = f"Failed to search CIDs: {e}" + raise RuntimeError(msg) from e + + return results + + def search_records( + self, + req: search_v1.SearchRecordsRequest, + metadata: Sequence[tuple[str, str]] | None = None, + ) -> builtins.list[search_v1.SearchRecordsResponse]: + """Search for full records matching the specified queries. + + Performs a search across the storage using the provided search queries + and returns a list of full records with all metadata. + + Args: + req: Search request containing queries, filters, and search options + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Returns: + List[search_v1.SearchRecordsResponse]: List of records matching the queries + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the search operation fails + + Example: + >>> req = search_v1.SearchRecordsRequest(queries=[query], limit=10) + >>> responses = client.search_records(req) + >>> for response in responses: + ... print(f"Found: {response.record.name}") + + """ + results: list[search_v1.SearchRecordsResponse] = [] + + try: + stream = self.search_client.SearchRecords(req, metadata=metadata) + results.extend(stream) + except grpc.RpcError as e: + logger.exception("gRPC error during search: %s", e) + raise + except Exception as e: + logger.exception("Error receiving search results: %s", e) + msg = f"Failed to search records: {e}" + raise RuntimeError(msg) from e + + return results + + def unpublish( + self, + req: routing_v1.UnpublishRequest, + metadata: Sequence[tuple[str, str]] | None = None, + ) -> None: + """Unpublish objects from the Routing API matching the specified criteria. + + Removes the specified objects from the public network, making them no + longer discoverable by other clients. The objects remain in the local + store but are not available for network discovery. + + Args: + req: Unpublish request containing the query for the objects to unpublish + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the objects cannot be unpublished + + Example: + >>> ref = routing_v1.RecordRef(cid="QmExample123") + >>> req = routing_v1.UnpublishRequest(record_refs=[ref]) + >>> client.unpublish(req) + + """ + try: + self.routing_client.Unpublish(req, metadata=metadata) + except grpc.RpcError as e: + logger.exception("gRPC error during unpublish: %s", e) + raise + except Exception as e: + logger.exception("Unexpected error during unpublish: %s", e) + msg = f"Failed to unpublish object: {e}" + raise RuntimeError(msg) from e + + def push( + self, + records: builtins.list[core_v1.Record], + metadata: Sequence[tuple[str, str]] | None = None, + ) -> builtins.list[core_v1.RecordRef]: + """Push records to the Store API. + + Uploads one or more records to the content store, making them available + for retrieval and reference. Each record is assigned a unique content + identifier (CID) based on its content hash. + + Args: + records: List of Record objects to push to the store + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Returns: + List[core_v1.RecordRef]: List of objects containing the CIDs of the pushed records + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the push operation fails + + Example: + >>> records = [create_record("example")] + >>> refs = client.push(records) + >>> print(f"Pushed with CID: {refs[0].cid}") + + """ + results: list[core_v1.RecordRef] = [] + + try: + response = self.store_client.Push(iter(records), metadata=metadata) + results.extend(response) + except grpc.RpcError as e: + logger.exception("gRPC error during push: %s", e) + raise + except Exception as e: + logger.exception("Unexpected error during push: %s", e) + msg = f"Failed to push object: {e}" + raise RuntimeError(msg) from e + + return results + + def push_referrer( + self, + req: builtins.list[store_v1.PushReferrerRequest], + metadata: Sequence[tuple[str, str]] | None = None, + ) -> builtins.list[store_v1.PushReferrerResponse]: + """Push records with referrer metadata to the Store API. + + Uploads records along with optional artifacts and referrer information. + This is useful for pushing complex objects that include additional + metadata or associated artifacts. + + Args: + req: List of PushReferrerRequest objects containing records and + optional artifacts + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Returns: + List[store_v1.PushReferrerResponse]: List of objects containing the details of pushed artifacts + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the push operation fails + + Example: + >>> requests = [store_v1.PushReferrerRequest(record=record)] + >>> responses = client.push_referrer(requests) + + """ + results: list[store_v1.PushReferrerResponse] = [] + + try: + response = self.store_client.PushReferrer(iter(req), metadata=metadata) + results.extend(response) + except grpc.RpcError as e: + logger.exception("gRPC error during push_referrer: %s", e) + raise + except Exception as e: + logger.exception("Unexpected error during push_referrer: %s", e) + msg = f"Failed to push object: {e}" + raise RuntimeError(msg) from e + + return results + + def pull( + self, + refs: builtins.list[core_v1.RecordRef], + metadata: Sequence[tuple[str, str]] | None = None, + ) -> builtins.list[core_v1.Record]: + """Pull records from the Store API by their references. + + Retrieves one or more records from the content store using their + content identifiers (CIDs). + + Args: + refs: List of RecordRef objects containing the CIDs to retrieve + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Returns: + List[core_v1.Record]: List of record objects retrieved from the store + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the pull operation fails + + Example: + >>> refs = [core_v1.RecordRef(cid="QmExample123")] + >>> records = client.pull(refs) + >>> for record in records: + ... print(f"Retrieved record: {record}") + + """ + results: list[core_v1.Record] = [] + + try: + response = self.store_client.Pull(iter(refs), metadata=metadata) + results.extend(response) + except grpc.RpcError as e: + logger.exception("gRPC error during pull: %s", e) + raise + except Exception as e: + logger.exception("Unexpected error during pull: %s", e) + msg = f"Failed to pull object: {e}" + raise RuntimeError(msg) from e + + return results + + def pull_referrer( + self, + req: builtins.list[store_v1.PullReferrerRequest], + metadata: Sequence[tuple[str, str]] | None = None, + ) -> builtins.list[store_v1.PullReferrerResponse]: + """Pull records with referrer metadata from the Store API. + + Retrieves records along with their associated artifacts and referrer + information. This provides access to complex objects that include + additional metadata or associated artifacts. + + Args: + req: List of PullReferrerRequest objects containing records and + optional artifacts for pull operations + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Returns: + List[store_v1.PullReferrerResponse]: List of objects containing the retrieved records + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the pull operation fails + + Example: + >>> requests = [store_v1.PullReferrerRequest(ref=ref)] + >>> responses = client.pull_referrer(requests) + >>> for response in responses: + ... print(f"Retrieved: {response}") + + """ + results: list[store_v1.PullReferrerResponse] = [] + + try: + response = self.store_client.PullReferrer(iter(req), metadata=metadata) + results.extend(response) + except grpc.RpcError as e: + logger.exception("gRPC error during pull_referrer: %s", e) + raise + except Exception as e: + logger.exception("Unexpected error during pull_referrer: %s", e) + msg = f"Failed to pull referrer object: {e}" + raise RuntimeError(msg) from e + + return results + + def lookup( + self, + refs: builtins.list[core_v1.RecordRef], + metadata: Sequence[tuple[str, str]] | None = None, + ) -> builtins.list[core_v1.RecordMeta]: + """Look up metadata for records in the Store API. + + Retrieves metadata information for one or more records without + downloading the full record content. This is useful for checking + if records exist and getting basic information about them. + + Args: + refs: List of RecordRef objects containing the CIDs to look up + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Returns: + List[core_v1.RecordMeta]: List of objects containing metadata for the records + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the lookup operation fails + + Example: + >>> refs = [core_v1.RecordRef(cid="QmExample123")] + >>> metadatas = client.lookup(refs) + >>> for meta in metadatas: + ... print(f"Record size: {meta.size}") + + """ + results: list[core_v1.RecordMeta] = [] + + try: + response = self.store_client.Lookup(iter(refs), metadata=metadata) + results.extend(response) + except grpc.RpcError as e: + logger.exception("gRPC error during lookup: %s", e) + raise + except Exception as e: + logger.exception("Unexpected error during lookup: %s", e) + msg = f"Failed to lookup object: {e}" + raise RuntimeError(msg) from e + + return results + + def delete( + self, + refs: builtins.list[core_v1.RecordRef], + metadata: Sequence[tuple[str, str]] | None = None, + ) -> None: + """Delete records from the Store API. + + Permanently removes one or more records from the content store using + their content identifiers (CIDs). This operation cannot be undone. + + Args: + refs: List of RecordRef objects containing the CIDs to delete + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the delete operation fails + + Example: + >>> refs = [core_v1.RecordRef(cid="QmExample123")] + >>> client.delete(refs) + + """ + try: + self.store_client.Delete(iter(refs), metadata=metadata) + except grpc.RpcError as e: + logger.exception("gRPC error during delete: %s", e) + raise + except Exception as e: + logger.exception("Unexpected error during delete: %s", e) + msg = f"Failed to delete object: {e}" + raise RuntimeError(msg) from e + + def create_sync( + self, + req: store_v1.CreateSyncRequest, + metadata: Sequence[tuple[str, str]] | None = None, + ) -> store_v1.CreateSyncResponse: + """Create a new synchronization configuration. + + Creates a new sync configuration that defines how data should be + synchronized between different Directory servers. This allows for + automated data replication and consistency across multiple locations. + + Args: + req: CreateSyncRequest containing the sync configuration details + including source, target, and synchronization parameters + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Returns: + store_v1.CreateSyncResponse: Response containing the created sync details + including the sync ID and configuration + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the sync creation fails + + Example: + >>> req = store_v1.CreateSyncRequest() + >>> response = client.create_sync(req) + >>> print(f"Created sync with ID: {response.sync_id}") + + """ + try: + response = self.sync_client.CreateSync(req, metadata=metadata) + except grpc.RpcError as e: + logger.exception("gRPC error during create_sync: %s", e) + raise + except Exception as e: + logger.exception("Unexpected error during create_sync: %s", e) + msg = f"Failed to create sync: {e}" + raise RuntimeError(msg) from e + + return response + + def list_syncs( + self, + req: store_v1.ListSyncsRequest, + metadata: Sequence[tuple[str, str]] | None = None, + ) -> builtins.list[store_v1.ListSyncsItem]: + """List existing synchronization configurations. + + Retrieves a list of all sync configurations that have been created, + with optional filtering and pagination support. This allows you to + monitor and manage multiple synchronization processes. + + Args: + req: ListSyncsRequest containing filtering criteria, pagination options, + and other query parameters + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Returns: + list[store_v1.ListSyncsItem]: List of sync configuration items with + their details including ID, name, status, + and configuration parameters + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the list operation fails + + Example: + >>> req = store_v1.ListSyncsRequest(limit=10) + >>> syncs = client.list_syncs(req) + >>> for sync in syncs: + ... print(f"Sync: {sync}") + + """ + results: list[store_v1.ListSyncsItem] = [] + + try: + stream = self.sync_client.ListSyncs(req, metadata=metadata) + results.extend(stream) + except grpc.RpcError as e: + logger.exception("gRPC error during list_syncs: %s", e) + raise + except Exception as e: + logger.exception("Unexpected error during list_syncs: %s", e) + msg = f"Failed to list syncs: {e}" + raise RuntimeError(msg) from e + + return results + + def get_sync( + self, + req: store_v1.GetSyncRequest, + metadata: Sequence[tuple[str, str]] | None = None, + ) -> store_v1.GetSyncResponse: + """Retrieve detailed information about a specific synchronization configuration. + + Gets comprehensive details about a specific sync configuration including + its current status, configuration parameters, performance metrics, + and any recent errors or warnings. + + Args: + req: GetSyncRequest containing the sync ID or identifier to retrieve + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Returns: + store_v1.GetSyncResponse: Detailed information about the sync configuration + including status, metrics, configuration, and logs + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the get operation fails + + Example: + >>> req = store_v1.GetSyncRequest(sync_id="sync-123") + >>> response = client.get_sync(req) + >>> print(f"Sync status: {response.status}") + >>> print(f"Last update: {response.last_update_time}") + + """ + try: + response = self.sync_client.GetSync(req, metadata=metadata) + except grpc.RpcError as e: + logger.exception("gRPC error during get_sync: %s", e) + raise + except Exception as e: + logger.exception("Unexpected error during get_sync: %s", e) + msg = f"Failed to get sync: {e}" + raise RuntimeError(msg) from e + + return response + + def delete_sync( + self, + req: store_v1.DeleteSyncRequest, + metadata: Sequence[tuple[str, str]] | None = None, + ) -> None: + """Delete a synchronization configuration. + + Permanently removes a sync configuration and stops any ongoing + synchronization processes. This operation cannot be undone and + will halt all data synchronization for the specified configuration. + + Args: + req: DeleteSyncRequest containing the sync ID or identifier to delete + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the delete operation fails + + Example: + >>> req = store_v1.DeleteSyncRequest(sync_id="sync-123") + >>> client.delete_sync(req) + >>> print(f"Sync deleted") + + """ + try: + self.sync_client.DeleteSync(req, metadata=metadata) + except grpc.RpcError as e: + logger.exception("gRPC error during delete_sync: %s", e) + raise + except Exception as e: + logger.exception("Unexpected error during delete_sync: %s", e) + msg = f"Failed to delete sync: {e}" + raise RuntimeError(msg) from e + + def listen( + self, + req: events_v1.ListenRequest, + metadata: Sequence[tuple[str, str]] | None = None, + ) -> grpc.UnaryStreamMultiCallable: + """ + Listen establishes a streaming connection to receive events. + Events are only delivered while the stream is active. + On disconnect, missed events are not recoverable. + + Args: + req: ListenRequest specifies filters for event subscription. + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Returns: + A grpc stream which can read and closed. + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the listen operation fails + """ + + try: + stream = self.event_client.Listen(req, metadata=metadata) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.CANCELLED: + logger.exception("gRPC listen stream was canceled: %s", e) + raise + else: + logger.exception("gRPC error during listen: %s", e) + raise + except Exception as e: + logger.exception("Unexpected error during listen: %s", e) + msg = f"Failed to listen: {e}" + raise RuntimeError(msg) from e + + return stream + + def create_publication( + self, + req: routing_v1.PublishRequest, + metadata: Sequence[tuple[str, str]] | None = None, + ) -> routing_v1.CreatePublicationResponse: + """ + Create publication creates a new publication request that will be processed by the PublicationWorker. + The publication request can specify either a query, a list of specific CIDs, + or all records to be announced to the DHT. + + Args: + req: PublishRequest specifies the record references and queries for publication. + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Returns: + CreatePublicationResponse returns the result of creating a publication request. + This includes the publication ID and any relevant metadata. + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the publication operation create fails + """ + try: + response = self.publication_client.CreatePublication(req, metadata=metadata) + except grpc.RpcError as e: + logger.exception("gRPC error during create_publication: %s", e) + raise + except Exception as e: + logger.exception("Unexpected error during create_publication: %s", e) + msg = f"Failed to create publication: {e}" + raise RuntimeError(msg) from e + + return response + + def get_publication( + self, + req: routing_v1.GetPublicationRequest, + metadata: Sequence[tuple[str, str]] | None = None, + ) -> routing_v1.GetPublicationResponse: + """ + GetPublication retrieves details of a specific publication request by its identifier. + This includes the current status and any associated metadata. + + Args: + req: GetPublicationRequest specifies which publication to retrieve by its identifier. + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Returns: + GetPublicationResponse contains the full details of a specific publication request. + Includes status, progress information, and any error details if applicable. + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the publication get operation fails + """ + try: + response = self.publication_client.GetPublication(req, metadata=metadata) + except grpc.RpcError as e: + logger.exception("gRPC error during get_publication: %s", e) + raise + except Exception as e: + logger.exception("Unexpected error during get_publication: %s", e) + msg = f"Failed to get publication: {e}" + raise RuntimeError(msg) from e + + return response + + def list_publication( + self, + req: routing_v1.ListPublicationsRequest, + metadata: Sequence[tuple[str, str]] | None = None, + ) -> builtins.list[routing_v1.ListPublicationsItem]: + """ + ListPublications returns a stream of all publication requests in the system. + This allows monitoring of pending, processing, and completed publication requests. + + Args: + req: ListPublicationsRequest contains optional filters for listing publication requests. + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Returns: + List of ListPublicationsItem represents a single publication request in the list response. + Contains publication details including ID, status, and creation timestamp. + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the publication list operation fails + """ + + results: list[routing_v1.ListPublicationsItem] = [] + + try: + stream = self.publication_client.ListPublications(req, metadata=metadata) + results.extend(stream) + except grpc.RpcError as e: + logger.exception("gRPC error during list_publication: %s", e) + raise + except Exception as e: + logger.exception("Unexpected error during list_publication: %s", e) + msg = f"Failed to list publication: {e}" + raise RuntimeError(msg) from e + + return results + + + def verify( + self, + req: sign_v1.VerifyRequest, + metadata: Sequence[tuple[str, str]] | None = None, + ) -> sign_v1.VerifyResponse: + """Verify a cryptographic signature on a record. + + Validates the cryptographic signature of a previously signed record + to ensure its authenticity and integrity. This operation verifies + that the record has not been tampered with since signing. + + Args: + req: VerifyRequest containing the record reference and verification + parameters + metadata: Optional gRPC metadata headers as sequence of key-value pairs + + Returns: + VerifyResponse containing the verification result and details + + Raises: + grpc.RpcError: If the gRPC call fails (includes InvalidArgument, NotFound, etc.) + RuntimeError: If the verification operation fails + + Example: + >>> req = sign_v1.VerifyRequest( + ... record_ref=core_v1.RecordRef(cid="QmExample123") + ... ) + >>> response = client.verify(req) + >>> print(f"Signature valid: {response.valid}") + + """ + try: + response = self.sign_client.Verify(req, metadata=metadata) + except grpc.RpcError as e: + logger.exception("gRPC error during verify: %s", e) + raise + except Exception as e: + logger.exception("Unexpected error during verify: %s", e) + msg = f"Failed to verify the object: {e}" + raise RuntimeError(msg) from e + + return response + + def sign( + self, + req: sign_v1.SignRequest, + oidc_client_id: str | None = "sigstore", + ) -> None: + """Sign a record with a cryptographic signature. + + Creates a cryptographic signature for a record using either a private + key or OIDC-based signing. The signing process uses the external dirctl + command-line tool to perform the actual cryptographic operations. + + Args: + req: SignRequest containing the record reference and signing provider + configuration. The provider can specify either key-based signing + (with a private key) or OIDC-based signing + oidc_client_id: OIDC client identifier for OIDC-based signing. + Defaults to "sigstore" + + Raises: + RuntimeError: If the signing operation fails + + Example: + >>> req = sign_v1.SignRequest( + ... record_ref=core_v1.RecordRef(cid="QmExample123"), + ... provider=sign_v1.SignProvider(key=key_config) + ... ) + >>> client.sign(req) + >>> print(f"Signing completed!") + + """ + try: + if len(req.provider.key.private_key) > 0: + self._sign_with_key(req.record_ref, req.provider.key) + else: + self._sign_with_oidc(req.record_ref, req.provider.oidc, oidc_client_id) + except RuntimeError as e: + msg = f"Failed to sign the object: {e}" + raise RuntimeError(msg) from e + except Exception as e: + logger.exception("Signing operation failed: %s", e) + msg = f"Failed to sign the object: {e}" + raise RuntimeError(msg) from e + + def _sign_with_key( + self, + record_ref: core_v1.RecordRef, + key_signer: sign_v1.SignWithKey, + ) -> None: + """Sign a record using a private key. + + This private method handles key-based signing by writing the private key + to a temporary file and executing the dirctl command with the key file. + + Args: + req: SignRequest containing the record reference and key provider + + Raises: + RuntimeError: If any other error occurs during signing + + """ + try: + # Create temporary file for the private key + with tempfile.NamedTemporaryFile(delete=False) as tmp_key_file: + tmp_key_file.write(key_signer.private_key) + tmp_key_file.flush() + + # Set up environment with password + shell_env = os.environ.copy() + shell_env["COSIGN_PASSWORD"] = key_signer.password.decode("utf-8") + + # Build and execute the signing command + command = [ + self.config.dirctl_path, + "sign", + record_ref.cid, + "--key", + tmp_key_file.name, + ] + + if self.config.spiffe_socket_path != "": + command.extend(["--spiffe-socket-path", self.config.spiffe_socket_path]) + + subprocess.run( + command, + check=True, + capture_output=True, + env=shell_env, + timeout=60, # 1 minute timeout + ) + + except OSError as e: + msg = f"Failed to write key file to disk: {e}" + raise RuntimeError(msg) from e + except subprocess.CalledProcessError as e: + msg = f"dirctl signing failed with return code {e.returncode}: {e.stderr.decode('utf-8', errors='ignore')}" + raise RuntimeError(msg) from e + except subprocess.TimeoutExpired as e: + msg = "dirctl signing timed out" + raise RuntimeError(msg) from e + except Exception as e: + msg = f"Unexpected error during key-based signing: {e}" + raise RuntimeError(msg) from e + + def _sign_with_oidc( + self, + record_ref: core_v1.RecordRef, + oidc_signer: sign_v1.SignWithOIDC, + oidc_client_id: str = "sigstore", + ) -> None: + """Sign a record using OIDC-based authentication. + + This private method handles OIDC-based signing by building the appropriate + dirctl command with OIDC parameters and executing it. + + Args: + req: SignRequest containing the record reference and OIDC provider + oidc_client_id: OIDC client identifier for authentication + + Raises: + RuntimeError: If any other error occurs during signing + + """ + try: + shell_env = os.environ.copy() + + # Build base command + command = [self.config.dirctl_path, "sign", record_ref.cid] + + # Add OIDC-specific parameters + if oidc_signer.id_token: + command.extend(["--oidc-token", oidc_signer.id_token]) + if oidc_signer.options.oidc_provider_url: + command.extend( + [ + "--oidc-provider-url", + oidc_signer.options.oidc_provider_url, + ] + ) + if oidc_signer.options.fulcio_url: + command.extend(["--fulcio-url", oidc_signer.options.fulcio_url]) + if oidc_signer.options.rekor_url: + command.extend(["--rekor-url", oidc_signer.options.rekor_url]) + if oidc_signer.options.timestamp_url: + command.extend(["--timestamp-url", oidc_signer.options.timestamp_url]) + + # Add client ID + command.extend(["--oidc-client-id", oidc_client_id]) + + if self.config.spiffe_socket_path != "": + command.extend(["--spiffe-socket-path", self.config.spiffe_socket_path]) + + # Execute the signing command + subprocess.run( + command, + check=True, + capture_output=True, + env=shell_env, + timeout=60, # 1 minute timeout + ) + + except subprocess.CalledProcessError as e: + msg = f"dirctl signing failed with return code {e.returncode}: {e.stderr.decode('utf-8', errors='ignore')}" + raise RuntimeError(msg) from e + except subprocess.TimeoutExpired as e: + msg = "dirctl signing timed out" + raise RuntimeError(msg) from e + except Exception as e: + msg = f"Unexpected error during OIDC signing: {e}" + raise RuntimeError(msg) from e diff --git a/sdk/dir-py/agntcy/dir_sdk/client/config.py b/sdk/dir-py/agntcy/dir_sdk/client/config.py index f3df1cadc..78ce5efcd 100644 --- a/sdk/dir-py/agntcy/dir_sdk/client/config.py +++ b/sdk/dir-py/agntcy/dir_sdk/client/config.py @@ -1,85 +1,85 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -import os - - -class Config: - DEFAULT_SERVER_ADDRESS = "127.0.0.1:8888" - DEFAULT_DIRCTL_PATH = "dirctl" - DEFAULT_SPIFFE_SOCKET_PATH = "" - DEFAULT_AUTH_MODE = "" - DEFAULT_JWT_AUDIENCE = "" - DEFAULT_TLS_CA_FILE = "" - DEFAULT_TLS_CERT_FILE = "" - DEFAULT_TLS_KEY_FILE = "" - - def __init__( - self, - server_address: str = DEFAULT_SERVER_ADDRESS, - dirctl_path: str = DEFAULT_DIRCTL_PATH, - spiffe_socket_path: str = DEFAULT_SPIFFE_SOCKET_PATH, - auth_mode: str = DEFAULT_AUTH_MODE, - jwt_audience: str = DEFAULT_JWT_AUDIENCE, - tls_ca_file: str = DEFAULT_TLS_CA_FILE, - tls_cert_file: str = DEFAULT_TLS_CERT_FILE, - tls_key_file: str = DEFAULT_TLS_KEY_FILE, - ) -> None: - self.server_address = server_address - self.dirctl_path = dirctl_path - self.spiffe_socket_path = spiffe_socket_path - self.auth_mode = auth_mode # '' for insecure, 'x509', 'jwt' or 'tls' - self.jwt_audience = jwt_audience - self.tls_ca_file = tls_ca_file - self.tls_cert_file = tls_cert_file - self.tls_key_file = tls_key_file - - @staticmethod - def load_from_env(env_prefix: str = "DIRECTORY_CLIENT_") -> "Config": - """Load configuration from environment variables.""" - # Get dirctl path from environment variable without prefix - dirctl_path = os.environ.get( - "DIRCTL_PATH", - Config.DEFAULT_DIRCTL_PATH, - ) - - # Use prefixed environment variables for other settings - server_address = os.environ.get( - f"{env_prefix}SERVER_ADDRESS", - Config.DEFAULT_SERVER_ADDRESS, - ) - spiffe_socket_path = os.environ.get( - f"{env_prefix}SPIFFE_SOCKET_PATH", - Config.DEFAULT_SPIFFE_SOCKET_PATH, - ) - auth_mode = os.environ.get( - f"{env_prefix}AUTH_MODE", - Config.DEFAULT_AUTH_MODE, - ) - jwt_audience = os.environ.get( - f"{env_prefix}JWT_AUDIENCE", - Config.DEFAULT_JWT_AUDIENCE, - ) - tls_ca_file = os.environ.get( - f"{env_prefix}TLS_CA_FILE", - Config.DEFAULT_TLS_CA_FILE, - ) - tls_cert_file = os.environ.get( - f"{env_prefix}TLS_CERT_FILE", - Config.DEFAULT_TLS_CERT_FILE, - ) - tls_key_file = os.environ.get( - f"{env_prefix}TLS_KEY_FILE", - Config.DEFAULT_TLS_KEY_FILE, - ) - - return Config( - server_address=server_address, - dirctl_path=dirctl_path, - spiffe_socket_path=spiffe_socket_path, - auth_mode=auth_mode, - jwt_audience=jwt_audience, - tls_ca_file=tls_ca_file, - tls_cert_file=tls_cert_file, - tls_key_file=tls_key_file, - ) +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +import os + + +class Config: + DEFAULT_SERVER_ADDRESS = "127.0.0.1:8888" + DEFAULT_DIRCTL_PATH = "dirctl" + DEFAULT_SPIFFE_SOCKET_PATH = "" + DEFAULT_AUTH_MODE = "" + DEFAULT_JWT_AUDIENCE = "" + DEFAULT_TLS_CA_FILE = "" + DEFAULT_TLS_CERT_FILE = "" + DEFAULT_TLS_KEY_FILE = "" + + def __init__( + self, + server_address: str = DEFAULT_SERVER_ADDRESS, + dirctl_path: str = DEFAULT_DIRCTL_PATH, + spiffe_socket_path: str = DEFAULT_SPIFFE_SOCKET_PATH, + auth_mode: str = DEFAULT_AUTH_MODE, + jwt_audience: str = DEFAULT_JWT_AUDIENCE, + tls_ca_file: str = DEFAULT_TLS_CA_FILE, + tls_cert_file: str = DEFAULT_TLS_CERT_FILE, + tls_key_file: str = DEFAULT_TLS_KEY_FILE, + ) -> None: + self.server_address = server_address + self.dirctl_path = dirctl_path + self.spiffe_socket_path = spiffe_socket_path + self.auth_mode = auth_mode # '' for insecure, 'x509', 'jwt' or 'tls' + self.jwt_audience = jwt_audience + self.tls_ca_file = tls_ca_file + self.tls_cert_file = tls_cert_file + self.tls_key_file = tls_key_file + + @staticmethod + def load_from_env(env_prefix: str = "DIRECTORY_CLIENT_") -> "Config": + """Load configuration from environment variables.""" + # Get dirctl path from environment variable without prefix + dirctl_path = os.environ.get( + "DIRCTL_PATH", + Config.DEFAULT_DIRCTL_PATH, + ) + + # Use prefixed environment variables for other settings + server_address = os.environ.get( + f"{env_prefix}SERVER_ADDRESS", + Config.DEFAULT_SERVER_ADDRESS, + ) + spiffe_socket_path = os.environ.get( + f"{env_prefix}SPIFFE_SOCKET_PATH", + Config.DEFAULT_SPIFFE_SOCKET_PATH, + ) + auth_mode = os.environ.get( + f"{env_prefix}AUTH_MODE", + Config.DEFAULT_AUTH_MODE, + ) + jwt_audience = os.environ.get( + f"{env_prefix}JWT_AUDIENCE", + Config.DEFAULT_JWT_AUDIENCE, + ) + tls_ca_file = os.environ.get( + f"{env_prefix}TLS_CA_FILE", + Config.DEFAULT_TLS_CA_FILE, + ) + tls_cert_file = os.environ.get( + f"{env_prefix}TLS_CERT_FILE", + Config.DEFAULT_TLS_CERT_FILE, + ) + tls_key_file = os.environ.get( + f"{env_prefix}TLS_KEY_FILE", + Config.DEFAULT_TLS_KEY_FILE, + ) + + return Config( + server_address=server_address, + dirctl_path=dirctl_path, + spiffe_socket_path=spiffe_socket_path, + auth_mode=auth_mode, + jwt_audience=jwt_audience, + tls_ca_file=tls_ca_file, + tls_cert_file=tls_cert_file, + tls_key_file=tls_key_file, + ) diff --git a/sdk/dir-py/agntcy/dir_sdk/client/test_client.py b/sdk/dir-py/agntcy/dir_sdk/client/test_client.py index cfe7da94f..61d7dfc05 100644 --- a/sdk/dir-py/agntcy/dir_sdk/client/test_client.py +++ b/sdk/dir-py/agntcy/dir_sdk/client/test_client.py @@ -1,493 +1,493 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 -import os -import pathlib -import subprocess -import time -import threading -import unittest -import uuid - -import grpc - -from agntcy.dir_sdk.client import Client -from agntcy.dir_sdk.models import * - - -class TestClient(unittest.TestCase): - def __init__(self, *args, **kwargs) -> None: - super().__init__(*args, **kwargs) - - # Verify that `DIRCTL_PATH` is set in the environment - assert os.environ.get("DIRCTL_PATH") is not None - - # Initialize the client - self.client = Client() - - def test_push(self) -> None: - records = self.gen_records(2, "push") - record_refs = self.client.push(records=records) - - assert record_refs is not None - assert isinstance(record_refs, list) - assert len(record_refs) == 2 - - for ref in record_refs: - assert isinstance(ref, core_v1.RecordRef) - assert len(ref.cid) == 59 - - def test_pull(self) -> None: - records = self.gen_records(2, "pull") - record_refs = self.client.push(records=records) - pulled_records = self.client.pull(refs=record_refs) - - assert pulled_records is not None - assert isinstance(pulled_records, list) - assert len(pulled_records) == 2 - - for index, record in enumerate(pulled_records): - assert isinstance(record, core_v1.Record) - assert records[index] == record - - def test_lookup(self) -> None: - records = self.gen_records(2, "lookup") - record_refs = self.client.push(records=records) - metadatas = self.client.lookup(record_refs) - - assert metadatas is not None - assert isinstance(metadatas, list) - assert len(metadatas) == 2 - - for metadata in metadatas: - assert isinstance(metadata, core_v1.RecordMeta) - - def test_publish(self) -> None: - records = self.gen_records(1, "publish") - record_refs = self.client.push(records=records) - publish_request = routing_v1.PublishRequest( - record_refs=routing_v1.RecordRefs(refs=record_refs), - ) - - try: - self.client.publish(publish_request) - except Exception as e: - assert e is None - - def test_list(self) -> None: - records = self.gen_records(1, "list") - record_refs = self.client.push(records=records) - self.client.publish(routing_v1.PublishRequest( - record_refs=routing_v1.RecordRefs(refs=record_refs), - )) - - # Sleep to allow the publication to be indexed - time.sleep(5) - - # Query for records in the domain - list_query = routing_v1.RecordQuery( - type=routing_v1.RECORD_QUERY_TYPE_DOMAIN, - value="technology/networking", - ) - - list_request = routing_v1.ListRequest(queries=[list_query]) - objects = list(self.client.list(list_request)) - - assert objects is not None - assert len(objects) != 0 - - for o in objects: - assert isinstance(o, routing_v1.ListResponse) - - def test_search(self) -> None: - records = self.gen_records(1, "search") - _ = self.client.push(records=records) - - search_query = search_v1.RecordQuery( - type=search_v1.RECORD_QUERY_TYPE_SKILL_ID, - value="10201", - ) - - search_request = search_v1.SearchCIDsRequest(queries=[search_query], limit=2) - - objects = list(self.client.search_cids(search_request)) - - assert objects is not None - assert len(objects) > 0 - - for o in objects: - assert isinstance(o, search_v1.SearchCIDsResponse) - - def test_unpublish(self) -> None: - records = self.gen_records(1, "unpublish") - record_refs = self.client.push(records=records) - - publish_record_refs = routing_v1.RecordRefs(refs=record_refs) - _ = routing_v1.PublishRequest(record_refs=publish_record_refs) - unpublish_request = routing_v1.UnpublishRequest(record_refs=publish_record_refs) - - try: - self.client.unpublish(unpublish_request) - except Exception as e: - assert e is None - - def test_delete(self) -> None: - records = self.gen_records(1, "delete") - record_refs = self.client.push(records=records) - try: - self.client.delete(record_refs) - except Exception as e: - assert e is None - - def test_push_referrer(self) -> None: - records = self.gen_records(2, "push_referrer") - record_refs = self.client.push(records=records) - - try: - request = [ - store_v1.PushReferrerRequest( - record_ref=record_refs[0], - referrer=core_v1.RecordReferrer( - type=sign_v1.Signature.DESCRIPTOR.full_name, - data={ - "signature": "dGVzdC1zaWduYXR1cmU=", # base64 encoded "test-signature" - "annotations": { - "payload": "test-payload-data" - } - } - ), - ), - store_v1.PushReferrerRequest( - record_ref=record_refs[1], - referrer=core_v1.RecordReferrer( - type=sign_v1.Signature.DESCRIPTOR.full_name, - data={ - "signature": "dGVzdC1zaWduYXR1cmU=", # base64 encoded "test-signature" - "annotations": { - "payload": "test-payload-data" - } - } - ), - ), - ] - - response = self.client.push_referrer(req=request) - - assert response is not None - assert len(response) == 2 - - for r in response: - assert isinstance(r, store_v1.PushReferrerResponse) - - except Exception as e: - assert e is None - - def test_pull_referrer(self) -> None: - records = self.gen_records(2, "pull_referrer") - record_refs = self.client.push(records=records) - - # Push referrers to these records - request = [ - store_v1.PushReferrerRequest( - record_ref=record_refs[0], - referrer=core_v1.RecordReferrer( - type=sign_v1.Signature.DESCRIPTOR.full_name, - data={ - "signature": "dGVzdC1zaWduYXR1cmU=", # base64 encoded "test-signature" - "annotations": { - "payload": "test-payload-data" - } - } - ), - ), - store_v1.PushReferrerRequest( - record_ref=record_refs[1], - referrer=core_v1.RecordReferrer( - type=sign_v1.Signature.DESCRIPTOR.full_name, - data={ - "signature": "dGVzdC1zaWduYXR1cmU=", # base64 encoded "test-signature" - "annotations": { - "payload": "test-payload-data" - } - } - ), - ), - ] - response = self.client.push_referrer(req=request) - assert response is not None - assert len(response) == 2 - for r in response: - assert isinstance(r, store_v1.PushReferrerResponse) - - try: - request = [ - store_v1.PullReferrerRequest( - record_ref=record_refs[0], - referrer_type=sign_v1.Signature.DESCRIPTOR.full_name, - ), - store_v1.PullReferrerRequest( - record_ref=record_refs[1], - referrer_type=sign_v1.Signature.DESCRIPTOR.full_name, - ), - ] - - response = self.client.pull_referrer(req=request) - - assert response is not None - assert len(response) == 2 - - for r in response: - assert isinstance(r, store_v1.PullReferrerResponse) - except Exception as e: - assert "pull referrer not implemented" in str( - e, - ) # Delete when the service implemented - - # self.assertIsNone(e) # Uncomment when the service implemented - - def test_sign_and_verify(self) -> None: - records = self.gen_records(2, "sign_verify") - record_refs = self.client.push(records=records) - - # Remove existing cosign keys if any - try: - pathlib.Path("cosign.key").unlink() - pathlib.Path("cosign.pub").unlink() - except FileNotFoundError: - pass # Clean state found - - # Prepare cosign key pair - key_password = "testing-key" - - # Set environment variable for cosign password - shell_env = os.environ.copy() - shell_env["COSIGN_PASSWORD"] = key_password - - # Generate a key pair using cosign - cosign_path = os.getenv("COSIGN_PATH", "cosign") - command = (cosign_path, "generate-key-pair") - subprocess.run(command, check=True, capture_output=True, env=shell_env) - - with open("cosign.key", "rb") as reader: - key_file = reader.read() - - # Prepare Key signing request - key_provider = sign_v1.SignWithKey( - private_key=key_file, - password=key_password.encode("utf-8"), - ) - - request_key_provider = sign_v1.SignRequestProvider(key=key_provider) - key_request = sign_v1.SignRequest( - record_ref=record_refs[0], - provider=request_key_provider, - ) - - # Prepare OIDC signing request - token = shell_env.get("OIDC_TOKEN", "") - provider_url = shell_env.get("OIDC_PROVIDER_URL", "") - client_id = shell_env.get("OIDC_CLIENT_ID", "sigstore") - - oidc_options = sign_v1.SignWithOIDC.SignOpts(oidc_provider_url=provider_url) - oidc_provider = sign_v1.SignWithOIDC(id_token=token, options=oidc_options) - request_oidc_provider = sign_v1.SignRequestProvider(oidc=oidc_provider) - oidc_request = sign_v1.SignRequest( - record_ref=record_refs[1], - provider=request_oidc_provider, - ) - - try: - # Sign and verify using Key signing - self.client.sign(key_request) - - # Sign and verify using OIDC signing if set - if shell_env.get("OIDC_TOKEN", "") != "" and shell_env.get("OIDC_PROVIDER_URL", "") != "": - self.client.sign(oidc_request, client_id) - else: - record_refs.pop() # NOTE: Drop the unsigned record if no OIDC tested - - for ref in record_refs: - response = self.client.verify(sign_v1.VerifyRequest(record_ref=ref)) - - assert response.success is True - - except Exception as e: - assert e is None - finally: - pathlib.Path("cosign.key").unlink() - pathlib.Path("cosign.pub").unlink() - - # Test invalid sign request - invalid_request = sign_v1.SignRequest( - record_ref=core_v1.RecordRef(cid="invalid-cid"), - provider=request_key_provider, - ) - try: - self.client.sign(invalid_request) - except RuntimeError as e: - assert "Failed to sign the object" in str(e) - - def test_sync(self) -> None: - try: - create_request = store_v1.CreateSyncRequest( - remote_directory_url=os.getenv( - "DIRECTORY_SERVER_PEER1_ADDRESS", - "0.0.0.0:8891", - ), - ) - create_response = self.client.create_sync(create_request) - - try: - assert uuid.UUID(create_response.sync_id) - except ValueError: - msg = f"Not an UUID: {create_response.sync_id}" - raise ValueError(msg) - - list_request = store_v1.ListSyncsRequest() - list_response = self.client.list_syncs(list_request) - - for sync_item in list_response: - try: - assert isinstance(sync_item, store_v1.ListSyncsItem) - assert uuid.UUID(sync_item.sync_id) - except ValueError: - msg = f"Not an UUID: {sync_item.sync_id}" - raise ValueError(msg) - - get_request = store_v1.GetSyncRequest(sync_id=create_response.sync_id) - get_response = self.client.get_sync(get_request) - - assert isinstance(get_response, store_v1.GetSyncResponse) - assert get_response.sync_id == create_response.sync_id - - delete_request = store_v1.DeleteSyncRequest(sync_id=create_response.sync_id) - self.client.delete_sync(delete_request) - - except Exception as e: - assert e is None - - def test_listen(self) -> None: - listen_request = events_v1.ListenRequest() - listen_stream = self.client.listen(listen_request) - events = [] - - def cancel_stream(): - time.sleep(15) - listen_stream.cancel() - - def read_stream(): - try: - for response in listen_stream: - events.append(response) - except grpc.RpcError as e: - if e.code() != grpc.StatusCode.CANCELLED: - raise - except Exception as e: - msg = f"Failed to listen: {e}" - raise RuntimeError(msg) from e - - cancel_thread = threading.Thread(target=cancel_stream) - read_thread = threading.Thread(target=read_stream) - - cancel_thread.start() - read_thread.start() - - event_records = self.gen_records(10, "listen") - _ = self.client.push(records=event_records) - - cancel_thread.join() - - assert events is not None - assert len(events) > 0 - - for o in events: - assert isinstance(o, events_v1.ListenResponse) - - def test_publication(self) -> None: - records = self.gen_records(1, "publication") - record_refs = self.client.push(records=records) - - try: - create_request = routing_v1.PublishRequest( - record_refs=routing_v1.RecordRefs(refs=record_refs), - ) - - create_response = self.client.create_publication(create_request) - - try: - assert isinstance(create_response, routing_v1.CreatePublicationResponse) - except ValueError: - msg = f"Not a CreatePublicationResponse object." - raise ValueError(msg) - - list_request = routing_v1.ListPublicationsRequest(limit=3) - list_response = self.client.list_publication(list_request) - - for publication_item in list_response: - try: - assert isinstance(publication_item, routing_v1.ListPublicationsItem) - except ValueError: - msg = f"Not a ListPublicationsItem object." - raise ValueError(msg) - - get_request = routing_v1.GetPublicationRequest(publication_id=create_response.publication_id) - get_response = self.client.get_publication(get_request) - - assert isinstance(get_response, routing_v1.GetPublicationResponse) - assert get_response.publication_id == create_response.publication_id - except Exception as e: - assert e is None - - def gen_records(self, count: int, test_function_name: str) -> list[core_v1.Record]: - """ - Generate test records with unique names. - Schema: https://schema.oasf.outshift.com/0.7.0/objects/record - """ - records: list[core_v1.Record] = [ - core_v1.Record( - data={ - "name": f"agntcy-{test_function_name}-{index}-{str(uuid.uuid4())[:8]}", - "version": "v3.0.0", - "schema_version": "0.7.0", - "description": "Research agent for Cisco's marketing strategy.", - "authors": ["Cisco Systems"], - "created_at": "2025-03-19T17:06:37Z", - "skills": [ - { - "name": "natural_language_processing/natural_language_generation/text_completion", - "id": 10201 - }, - { - "name": "natural_language_processing/analytical_reasoning/problem_solving", - "id": 10702 - } - ], - "locators": [ - { - "type": "docker_image", - "url": "https://ghcr.io/agntcy/marketing-strategy" - } - ], - "domains": [ - { - "name": "technology/networking", - "id": 103 - } - ], - "modules": [] - } - ) - for index in range(count) - ] - - return records - - @staticmethod - def cancel_stream_after_delay(responses, delay_sec=5): - # Wait before cancelling to simulate some condition or timeout - time.sleep(delay_sec) - print("Cancelling the stream...") - responses.cancel() - - -if __name__ == "__main__": - unittest.main() +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 +import os +import pathlib +import subprocess +import time +import threading +import unittest +import uuid + +import grpc + +from agntcy.dir_sdk.client import Client +from agntcy.dir_sdk.models import * + + +class TestClient(unittest.TestCase): + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + # Verify that `DIRCTL_PATH` is set in the environment + assert os.environ.get("DIRCTL_PATH") is not None + + # Initialize the client + self.client = Client() + + def test_push(self) -> None: + records = self.gen_records(2, "push") + record_refs = self.client.push(records=records) + + assert record_refs is not None + assert isinstance(record_refs, list) + assert len(record_refs) == 2 + + for ref in record_refs: + assert isinstance(ref, core_v1.RecordRef) + assert len(ref.cid) == 59 + + def test_pull(self) -> None: + records = self.gen_records(2, "pull") + record_refs = self.client.push(records=records) + pulled_records = self.client.pull(refs=record_refs) + + assert pulled_records is not None + assert isinstance(pulled_records, list) + assert len(pulled_records) == 2 + + for index, record in enumerate(pulled_records): + assert isinstance(record, core_v1.Record) + assert records[index] == record + + def test_lookup(self) -> None: + records = self.gen_records(2, "lookup") + record_refs = self.client.push(records=records) + metadatas = self.client.lookup(record_refs) + + assert metadatas is not None + assert isinstance(metadatas, list) + assert len(metadatas) == 2 + + for metadata in metadatas: + assert isinstance(metadata, core_v1.RecordMeta) + + def test_publish(self) -> None: + records = self.gen_records(1, "publish") + record_refs = self.client.push(records=records) + publish_request = routing_v1.PublishRequest( + record_refs=routing_v1.RecordRefs(refs=record_refs), + ) + + try: + self.client.publish(publish_request) + except Exception as e: + assert e is None + + def test_list(self) -> None: + records = self.gen_records(1, "list") + record_refs = self.client.push(records=records) + self.client.publish(routing_v1.PublishRequest( + record_refs=routing_v1.RecordRefs(refs=record_refs), + )) + + # Sleep to allow the publication to be indexed + time.sleep(5) + + # Query for records in the domain + list_query = routing_v1.RecordQuery( + type=routing_v1.RECORD_QUERY_TYPE_DOMAIN, + value="technology/networking", + ) + + list_request = routing_v1.ListRequest(queries=[list_query]) + objects = list(self.client.list(list_request)) + + assert objects is not None + assert len(objects) != 0 + + for o in objects: + assert isinstance(o, routing_v1.ListResponse) + + def test_search(self) -> None: + records = self.gen_records(1, "search") + _ = self.client.push(records=records) + + search_query = search_v1.RecordQuery( + type=search_v1.RECORD_QUERY_TYPE_SKILL_ID, + value="10201", + ) + + search_request = search_v1.SearchCIDsRequest(queries=[search_query], limit=2) + + objects = list(self.client.search_cids(search_request)) + + assert objects is not None + assert len(objects) > 0 + + for o in objects: + assert isinstance(o, search_v1.SearchCIDsResponse) + + def test_unpublish(self) -> None: + records = self.gen_records(1, "unpublish") + record_refs = self.client.push(records=records) + + publish_record_refs = routing_v1.RecordRefs(refs=record_refs) + _ = routing_v1.PublishRequest(record_refs=publish_record_refs) + unpublish_request = routing_v1.UnpublishRequest(record_refs=publish_record_refs) + + try: + self.client.unpublish(unpublish_request) + except Exception as e: + assert e is None + + def test_delete(self) -> None: + records = self.gen_records(1, "delete") + record_refs = self.client.push(records=records) + try: + self.client.delete(record_refs) + except Exception as e: + assert e is None + + def test_push_referrer(self) -> None: + records = self.gen_records(2, "push_referrer") + record_refs = self.client.push(records=records) + + try: + request = [ + store_v1.PushReferrerRequest( + record_ref=record_refs[0], + referrer=core_v1.RecordReferrer( + type=sign_v1.Signature.DESCRIPTOR.full_name, + data={ + "signature": "dGVzdC1zaWduYXR1cmU=", # base64 encoded "test-signature" + "annotations": { + "payload": "test-payload-data" + } + } + ), + ), + store_v1.PushReferrerRequest( + record_ref=record_refs[1], + referrer=core_v1.RecordReferrer( + type=sign_v1.Signature.DESCRIPTOR.full_name, + data={ + "signature": "dGVzdC1zaWduYXR1cmU=", # base64 encoded "test-signature" + "annotations": { + "payload": "test-payload-data" + } + } + ), + ), + ] + + response = self.client.push_referrer(req=request) + + assert response is not None + assert len(response) == 2 + + for r in response: + assert isinstance(r, store_v1.PushReferrerResponse) + + except Exception as e: + assert e is None + + def test_pull_referrer(self) -> None: + records = self.gen_records(2, "pull_referrer") + record_refs = self.client.push(records=records) + + # Push referrers to these records + request = [ + store_v1.PushReferrerRequest( + record_ref=record_refs[0], + referrer=core_v1.RecordReferrer( + type=sign_v1.Signature.DESCRIPTOR.full_name, + data={ + "signature": "dGVzdC1zaWduYXR1cmU=", # base64 encoded "test-signature" + "annotations": { + "payload": "test-payload-data" + } + } + ), + ), + store_v1.PushReferrerRequest( + record_ref=record_refs[1], + referrer=core_v1.RecordReferrer( + type=sign_v1.Signature.DESCRIPTOR.full_name, + data={ + "signature": "dGVzdC1zaWduYXR1cmU=", # base64 encoded "test-signature" + "annotations": { + "payload": "test-payload-data" + } + } + ), + ), + ] + response = self.client.push_referrer(req=request) + assert response is not None + assert len(response) == 2 + for r in response: + assert isinstance(r, store_v1.PushReferrerResponse) + + try: + request = [ + store_v1.PullReferrerRequest( + record_ref=record_refs[0], + referrer_type=sign_v1.Signature.DESCRIPTOR.full_name, + ), + store_v1.PullReferrerRequest( + record_ref=record_refs[1], + referrer_type=sign_v1.Signature.DESCRIPTOR.full_name, + ), + ] + + response = self.client.pull_referrer(req=request) + + assert response is not None + assert len(response) == 2 + + for r in response: + assert isinstance(r, store_v1.PullReferrerResponse) + except Exception as e: + assert "pull referrer not implemented" in str( + e, + ) # Delete when the service implemented + + # self.assertIsNone(e) # Uncomment when the service implemented + + def test_sign_and_verify(self) -> None: + records = self.gen_records(2, "sign_verify") + record_refs = self.client.push(records=records) + + # Remove existing cosign keys if any + try: + pathlib.Path("cosign.key").unlink() + pathlib.Path("cosign.pub").unlink() + except FileNotFoundError: + pass # Clean state found + + # Prepare cosign key pair + key_password = "testing-key" + + # Set environment variable for cosign password + shell_env = os.environ.copy() + shell_env["COSIGN_PASSWORD"] = key_password + + # Generate a key pair using cosign + cosign_path = os.getenv("COSIGN_PATH", "cosign") + command = (cosign_path, "generate-key-pair") + subprocess.run(command, check=True, capture_output=True, env=shell_env) + + with open("cosign.key", "rb") as reader: + key_file = reader.read() + + # Prepare Key signing request + key_provider = sign_v1.SignWithKey( + private_key=key_file, + password=key_password.encode("utf-8"), + ) + + request_key_provider = sign_v1.SignRequestProvider(key=key_provider) + key_request = sign_v1.SignRequest( + record_ref=record_refs[0], + provider=request_key_provider, + ) + + # Prepare OIDC signing request + token = shell_env.get("OIDC_TOKEN", "") + provider_url = shell_env.get("OIDC_PROVIDER_URL", "") + client_id = shell_env.get("OIDC_CLIENT_ID", "sigstore") + + oidc_options = sign_v1.SignWithOIDC.SignOpts(oidc_provider_url=provider_url) + oidc_provider = sign_v1.SignWithOIDC(id_token=token, options=oidc_options) + request_oidc_provider = sign_v1.SignRequestProvider(oidc=oidc_provider) + oidc_request = sign_v1.SignRequest( + record_ref=record_refs[1], + provider=request_oidc_provider, + ) + + try: + # Sign and verify using Key signing + self.client.sign(key_request) + + # Sign and verify using OIDC signing if set + if shell_env.get("OIDC_TOKEN", "") != "" and shell_env.get("OIDC_PROVIDER_URL", "") != "": + self.client.sign(oidc_request, client_id) + else: + record_refs.pop() # NOTE: Drop the unsigned record if no OIDC tested + + for ref in record_refs: + response = self.client.verify(sign_v1.VerifyRequest(record_ref=ref)) + + assert response.success is True + + except Exception as e: + assert e is None + finally: + pathlib.Path("cosign.key").unlink() + pathlib.Path("cosign.pub").unlink() + + # Test invalid sign request + invalid_request = sign_v1.SignRequest( + record_ref=core_v1.RecordRef(cid="invalid-cid"), + provider=request_key_provider, + ) + try: + self.client.sign(invalid_request) + except RuntimeError as e: + assert "Failed to sign the object" in str(e) + + def test_sync(self) -> None: + try: + create_request = store_v1.CreateSyncRequest( + remote_directory_url=os.getenv( + "DIRECTORY_SERVER_PEER1_ADDRESS", + "0.0.0.0:8891", + ), + ) + create_response = self.client.create_sync(create_request) + + try: + assert uuid.UUID(create_response.sync_id) + except ValueError: + msg = f"Not an UUID: {create_response.sync_id}" + raise ValueError(msg) + + list_request = store_v1.ListSyncsRequest() + list_response = self.client.list_syncs(list_request) + + for sync_item in list_response: + try: + assert isinstance(sync_item, store_v1.ListSyncsItem) + assert uuid.UUID(sync_item.sync_id) + except ValueError: + msg = f"Not an UUID: {sync_item.sync_id}" + raise ValueError(msg) + + get_request = store_v1.GetSyncRequest(sync_id=create_response.sync_id) + get_response = self.client.get_sync(get_request) + + assert isinstance(get_response, store_v1.GetSyncResponse) + assert get_response.sync_id == create_response.sync_id + + delete_request = store_v1.DeleteSyncRequest(sync_id=create_response.sync_id) + self.client.delete_sync(delete_request) + + except Exception as e: + assert e is None + + def test_listen(self) -> None: + listen_request = events_v1.ListenRequest() + listen_stream = self.client.listen(listen_request) + events = [] + + def cancel_stream(): + time.sleep(15) + listen_stream.cancel() + + def read_stream(): + try: + for response in listen_stream: + events.append(response) + except grpc.RpcError as e: + if e.code() != grpc.StatusCode.CANCELLED: + raise + except Exception as e: + msg = f"Failed to listen: {e}" + raise RuntimeError(msg) from e + + cancel_thread = threading.Thread(target=cancel_stream) + read_thread = threading.Thread(target=read_stream) + + cancel_thread.start() + read_thread.start() + + event_records = self.gen_records(10, "listen") + _ = self.client.push(records=event_records) + + cancel_thread.join() + + assert events is not None + assert len(events) > 0 + + for o in events: + assert isinstance(o, events_v1.ListenResponse) + + def test_publication(self) -> None: + records = self.gen_records(1, "publication") + record_refs = self.client.push(records=records) + + try: + create_request = routing_v1.PublishRequest( + record_refs=routing_v1.RecordRefs(refs=record_refs), + ) + + create_response = self.client.create_publication(create_request) + + try: + assert isinstance(create_response, routing_v1.CreatePublicationResponse) + except ValueError: + msg = f"Not a CreatePublicationResponse object." + raise ValueError(msg) + + list_request = routing_v1.ListPublicationsRequest(limit=3) + list_response = self.client.list_publication(list_request) + + for publication_item in list_response: + try: + assert isinstance(publication_item, routing_v1.ListPublicationsItem) + except ValueError: + msg = f"Not a ListPublicationsItem object." + raise ValueError(msg) + + get_request = routing_v1.GetPublicationRequest(publication_id=create_response.publication_id) + get_response = self.client.get_publication(get_request) + + assert isinstance(get_response, routing_v1.GetPublicationResponse) + assert get_response.publication_id == create_response.publication_id + except Exception as e: + assert e is None + + def gen_records(self, count: int, test_function_name: str) -> list[core_v1.Record]: + """ + Generate test records with unique names. + Schema: https://schema.oasf.outshift.com/0.7.0/objects/record + """ + records: list[core_v1.Record] = [ + core_v1.Record( + data={ + "name": f"agntcy-{test_function_name}-{index}-{str(uuid.uuid4())[:8]}", + "version": "v3.0.0", + "schema_version": "0.7.0", + "description": "Research agent for Cisco's marketing strategy.", + "authors": ["Cisco Systems"], + "created_at": "2025-03-19T17:06:37Z", + "skills": [ + { + "name": "natural_language_processing/natural_language_generation/text_completion", + "id": 10201 + }, + { + "name": "natural_language_processing/analytical_reasoning/problem_solving", + "id": 10702 + } + ], + "locators": [ + { + "type": "docker_image", + "url": "https://ghcr.io/agntcy/marketing-strategy" + } + ], + "domains": [ + { + "name": "technology/networking", + "id": 103 + } + ], + "modules": [] + } + ) + for index in range(count) + ] + + return records + + @staticmethod + def cancel_stream_after_delay(responses, delay_sec=5): + # Wait before cancelling to simulate some condition or timeout + time.sleep(delay_sec) + print("Cancelling the stream...") + responses.cancel() + + +if __name__ == "__main__": + unittest.main() diff --git a/sdk/dir-py/agntcy/dir_sdk/models/README.md b/sdk/dir-py/agntcy/dir_sdk/models/README.md index 9fd81d64e..b42e67231 100644 --- a/sdk/dir-py/agntcy/dir_sdk/models/README.md +++ b/sdk/dir-py/agntcy/dir_sdk/models/README.md @@ -1,12 +1,12 @@ -# Directory SDK Models - -Directory models are distributed via `buf.build` and generated from Protocol Buffers definitions, -which can become cumbersome to import and use. -This module simplifies the imports and usage of data models needed by Directory APIs. -It re-exports all the models from the generated code into dedicated namespaces so that they can be imported directly from this module. - -For example, instead of importing `RecordMeta` from the generated code, use: - -```python -from agntcy.dir_sdk.models.core_v1 import RecordMeta -``` +# Directory SDK Models + +Directory models are distributed via `buf.build` and generated from Protocol Buffers definitions, +which can become cumbersome to import and use. +This module simplifies the imports and usage of data models needed by Directory APIs. +It re-exports all the models from the generated code into dedicated namespaces so that they can be imported directly from this module. + +For example, instead of importing `RecordMeta` from the generated code, use: + +```python +from agntcy.dir_sdk.models.core_v1 import RecordMeta +``` diff --git a/sdk/dir-py/agntcy/dir_sdk/models/__init__.py b/sdk/dir-py/agntcy/dir_sdk/models/__init__.py index f98657707..c8fe323de 100644 --- a/sdk/dir-py/agntcy/dir_sdk/models/__init__.py +++ b/sdk/dir-py/agntcy/dir_sdk/models/__init__.py @@ -1,12 +1,12 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -# Export all protobuf packages for easier module imports. -# The actual subpackages in agntcy_dir.models expose gRPC stubs. - -import agntcy.dir_sdk.models.core_v1 as core_v1 -import agntcy.dir_sdk.models.routing_v1 as routing_v1 -import agntcy.dir_sdk.models.search_v1 as search_v1 -import agntcy.dir_sdk.models.sign_v1 as sign_v1 -import agntcy.dir_sdk.models.store_v1 as store_v1 -import agntcy.dir_sdk.models.events_v1 as events_v1 +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +# Export all protobuf packages for easier module imports. +# The actual subpackages in agntcy_dir.models expose gRPC stubs. + +import agntcy.dir_sdk.models.core_v1 as core_v1 +import agntcy.dir_sdk.models.routing_v1 as routing_v1 +import agntcy.dir_sdk.models.search_v1 as search_v1 +import agntcy.dir_sdk.models.sign_v1 as sign_v1 +import agntcy.dir_sdk.models.store_v1 as store_v1 +import agntcy.dir_sdk.models.events_v1 as events_v1 diff --git a/sdk/dir-py/agntcy/dir_sdk/models/core_v1.py b/sdk/dir-py/agntcy/dir_sdk/models/core_v1.py index 83f34b6f1..a8c757013 100644 --- a/sdk/dir-py/agntcy/dir_sdk/models/core_v1.py +++ b/sdk/dir-py/agntcy/dir_sdk/models/core_v1.py @@ -1,5 +1,5 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -from agntcy.dir.core.v1.record_pb2 import * -from agntcy.dir.core.v1.record_pb2_grpc import * +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +from agntcy.dir.core.v1.record_pb2 import * +from agntcy.dir.core.v1.record_pb2_grpc import * diff --git a/sdk/dir-py/agntcy/dir_sdk/models/events_v1.py b/sdk/dir-py/agntcy/dir_sdk/models/events_v1.py index 2b8a8a386..4a2806857 100644 --- a/sdk/dir-py/agntcy/dir_sdk/models/events_v1.py +++ b/sdk/dir-py/agntcy/dir_sdk/models/events_v1.py @@ -1,5 +1,5 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -from agntcy.dir.events.v1.event_service_pb2 import * -from agntcy.dir.events.v1.event_service_pb2_grpc import * +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +from agntcy.dir.events.v1.event_service_pb2 import * +from agntcy.dir.events.v1.event_service_pb2_grpc import * diff --git a/sdk/dir-py/agntcy/dir_sdk/models/routing_v1.py b/sdk/dir-py/agntcy/dir_sdk/models/routing_v1.py index 16d1571be..8fa32e00a 100644 --- a/sdk/dir-py/agntcy/dir_sdk/models/routing_v1.py +++ b/sdk/dir-py/agntcy/dir_sdk/models/routing_v1.py @@ -1,11 +1,11 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -from agntcy.dir.routing.v1.peer_pb2 import * -from agntcy.dir.routing.v1.peer_pb2_grpc import * -from agntcy.dir.routing.v1.publication_service_pb2 import * -from agntcy.dir.routing.v1.publication_service_pb2_grpc import * -from agntcy.dir.routing.v1.record_query_pb2 import * -from agntcy.dir.routing.v1.record_query_pb2_grpc import * -from agntcy.dir.routing.v1.routing_service_pb2 import * -from agntcy.dir.routing.v1.routing_service_pb2_grpc import * +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +from agntcy.dir.routing.v1.peer_pb2 import * +from agntcy.dir.routing.v1.peer_pb2_grpc import * +from agntcy.dir.routing.v1.publication_service_pb2 import * +from agntcy.dir.routing.v1.publication_service_pb2_grpc import * +from agntcy.dir.routing.v1.record_query_pb2 import * +from agntcy.dir.routing.v1.record_query_pb2_grpc import * +from agntcy.dir.routing.v1.routing_service_pb2 import * +from agntcy.dir.routing.v1.routing_service_pb2_grpc import * diff --git a/sdk/dir-py/agntcy/dir_sdk/models/search_v1.py b/sdk/dir-py/agntcy/dir_sdk/models/search_v1.py index 03eaaab91..42304c04a 100644 --- a/sdk/dir-py/agntcy/dir_sdk/models/search_v1.py +++ b/sdk/dir-py/agntcy/dir_sdk/models/search_v1.py @@ -1,7 +1,7 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -from agntcy.dir.search.v1.record_query_pb2 import * -from agntcy.dir.search.v1.record_query_pb2_grpc import * -from agntcy.dir.search.v1.search_service_pb2 import * -from agntcy.dir.search.v1.search_service_pb2_grpc import * +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +from agntcy.dir.search.v1.record_query_pb2 import * +from agntcy.dir.search.v1.record_query_pb2_grpc import * +from agntcy.dir.search.v1.search_service_pb2 import * +from agntcy.dir.search.v1.search_service_pb2_grpc import * diff --git a/sdk/dir-py/agntcy/dir_sdk/models/sign_v1.py b/sdk/dir-py/agntcy/dir_sdk/models/sign_v1.py index 891c013cc..472377dd5 100644 --- a/sdk/dir-py/agntcy/dir_sdk/models/sign_v1.py +++ b/sdk/dir-py/agntcy/dir_sdk/models/sign_v1.py @@ -1,7 +1,7 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -from agntcy.dir.sign.v1.signature_pb2 import * -from agntcy.dir.sign.v1.public_key_pb2 import * -from agntcy.dir.sign.v1.sign_service_pb2 import * -from agntcy.dir.sign.v1.sign_service_pb2_grpc import * +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +from agntcy.dir.sign.v1.signature_pb2 import * +from agntcy.dir.sign.v1.public_key_pb2 import * +from agntcy.dir.sign.v1.sign_service_pb2 import * +from agntcy.dir.sign.v1.sign_service_pb2_grpc import * diff --git a/sdk/dir-py/agntcy/dir_sdk/models/store_v1.py b/sdk/dir-py/agntcy/dir_sdk/models/store_v1.py index cd89b7e07..7efbac870 100644 --- a/sdk/dir-py/agntcy/dir_sdk/models/store_v1.py +++ b/sdk/dir-py/agntcy/dir_sdk/models/store_v1.py @@ -1,7 +1,7 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -from agntcy.dir.store.v1.store_service_pb2 import * -from agntcy.dir.store.v1.store_service_pb2_grpc import * -from agntcy.dir.store.v1.sync_service_pb2 import * -from agntcy.dir.store.v1.sync_service_pb2_grpc import * +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +from agntcy.dir.store.v1.store_service_pb2 import * +from agntcy.dir.store.v1.store_service_pb2_grpc import * +from agntcy.dir.store.v1.sync_service_pb2 import * +from agntcy.dir.store.v1.sync_service_pb2_grpc import * diff --git a/sdk/dir-py/pyproject.toml b/sdk/dir-py/pyproject.toml index 38128e9d0..f744d7825 100644 --- a/sdk/dir-py/pyproject.toml +++ b/sdk/dir-py/pyproject.toml @@ -1,38 +1,38 @@ -[project] -name = "agntcy-dir" -version = "0.6.0" -description = "Directory SDK" -readme = "README.md" -requires-python = ">=3.10" -dependencies = ["grpcio>=1.74.0", "spiffe>=0.2.2", "spiffe-tls>=0.2.1"] - -[dependency-groups] -dev = ["pytest>=8.4.1", "uuid>=1.30"] - -[[tool.uv.index]] -url = "https://buf.build/gen/python" - -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - -[tool.hatch.build.targets.wheel] -packages = ["."] - -[tool.pytest.ini_options] -pythonpath = ["."] - -[tool.ruff] -line-length = 88 - -[tool.ruff.lint] -select = ["CPY001"] -ignore = [] -fixable = ["ALL"] -unfixable = [] - -[tool.ruff.lint.flake8-copyright] -notice-rgx = ''' -(?i)# Copyright AGNTCY Contributors \(https:\/\/github\.com\/agntcy\) -(?i)# SPDX-License-Identifier: Apache-2\.0 -''' +[project] +name = "agntcy-dir" +version = "0.6.0" +description = "Directory SDK" +readme = "README.md" +requires-python = ">=3.10" +dependencies = ["grpcio>=1.74.0", "spiffe>=0.2.2", "spiffe-tls>=0.2.1"] + +[dependency-groups] +dev = ["pytest>=8.4.1", "uuid>=1.30"] + +[[tool.uv.index]] +url = "https://buf.build/gen/python" + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.pytest.ini_options] +pythonpath = ["."] + +[tool.ruff] +line-length = 88 + +[tool.ruff.lint] +select = ["CPY001"] +ignore = [] +fixable = ["ALL"] +unfixable = [] + +[tool.ruff.lint.flake8-copyright] +notice-rgx = ''' +(?i)# Copyright AGNTCY Contributors \(https:\/\/github\.com\/agntcy\) +(?i)# SPDX-License-Identifier: Apache-2\.0 +''' diff --git a/sdk/dir-py/uv.lock b/sdk/dir-py/uv.lock index bd37a4b26..18745fd3e 100644 --- a/sdk/dir-py/uv.lock +++ b/sdk/dir-py/uv.lock @@ -1,449 +1,449 @@ -version = 1 -revision = 3 -requires-python = ">=3.10" - -[[package]] -name = "agntcy-dir" -version = "0.6.0" -source = { editable = "." } -dependencies = [ - { name = "grpcio" }, - { name = "spiffe" }, - { name = "spiffe-tls" }, -] - -[package.dev-dependencies] -dev = [ - { name = "pytest" }, - { name = "uuid" }, -] - -[package.metadata] -requires-dist = [ - { name = "grpcio", specifier = ">=1.74.0" }, - { name = "spiffe", specifier = ">=0.2.2" }, - { name = "spiffe-tls", specifier = ">=0.2.1" }, -] - -[package.metadata.requires-dev] -dev = [ - { name = "pytest", specifier = ">=8.4.1" }, - { name = "uuid", specifier = ">=1.30" }, -] - -[[package]] -name = "cffi" -version = "2.0.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pycparser", marker = "implementation_name != 'PyPy'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/93/d7/516d984057745a6cd96575eea814fe1edd6646ee6efd552fb7b0921dec83/cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44", size = 184283, upload-time = "2025-09-08T23:22:08.01Z" }, - { url = "https://files.pythonhosted.org/packages/9e/84/ad6a0b408daa859246f57c03efd28e5dd1b33c21737c2db84cae8c237aa5/cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49", size = 180504, upload-time = "2025-09-08T23:22:10.637Z" }, - { url = "https://files.pythonhosted.org/packages/50/bd/b1a6362b80628111e6653c961f987faa55262b4002fcec42308cad1db680/cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c", size = 208811, upload-time = "2025-09-08T23:22:12.267Z" }, - { url = "https://files.pythonhosted.org/packages/4f/27/6933a8b2562d7bd1fb595074cf99cc81fc3789f6a6c05cdabb46284a3188/cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb", size = 216402, upload-time = "2025-09-08T23:22:13.455Z" }, - { url = "https://files.pythonhosted.org/packages/05/eb/b86f2a2645b62adcfff53b0dd97e8dfafb5c8aa864bd0d9a2c2049a0d551/cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0", size = 203217, upload-time = "2025-09-08T23:22:14.596Z" }, - { url = "https://files.pythonhosted.org/packages/9f/e0/6cbe77a53acf5acc7c08cc186c9928864bd7c005f9efd0d126884858a5fe/cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4", size = 203079, upload-time = "2025-09-08T23:22:15.769Z" }, - { url = "https://files.pythonhosted.org/packages/98/29/9b366e70e243eb3d14a5cb488dfd3a0b6b2f1fb001a203f653b93ccfac88/cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453", size = 216475, upload-time = "2025-09-08T23:22:17.427Z" }, - { url = "https://files.pythonhosted.org/packages/21/7a/13b24e70d2f90a322f2900c5d8e1f14fa7e2a6b3332b7309ba7b2ba51a5a/cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495", size = 218829, upload-time = "2025-09-08T23:22:19.069Z" }, - { url = "https://files.pythonhosted.org/packages/60/99/c9dc110974c59cc981b1f5b66e1d8af8af764e00f0293266824d9c4254bc/cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5", size = 211211, upload-time = "2025-09-08T23:22:20.588Z" }, - { url = "https://files.pythonhosted.org/packages/49/72/ff2d12dbf21aca1b32a40ed792ee6b40f6dc3a9cf1644bd7ef6e95e0ac5e/cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb", size = 218036, upload-time = "2025-09-08T23:22:22.143Z" }, - { url = "https://files.pythonhosted.org/packages/e2/cc/027d7fb82e58c48ea717149b03bcadcbdc293553edb283af792bd4bcbb3f/cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a", size = 172184, upload-time = "2025-09-08T23:22:23.328Z" }, - { url = "https://files.pythonhosted.org/packages/33/fa/072dd15ae27fbb4e06b437eb6e944e75b068deb09e2a2826039e49ee2045/cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739", size = 182790, upload-time = "2025-09-08T23:22:24.752Z" }, - { url = "https://files.pythonhosted.org/packages/12/4a/3dfd5f7850cbf0d06dc84ba9aa00db766b52ca38d8b86e3a38314d52498c/cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe", size = 184344, upload-time = "2025-09-08T23:22:26.456Z" }, - { url = "https://files.pythonhosted.org/packages/4f/8b/f0e4c441227ba756aafbe78f117485b25bb26b1c059d01f137fa6d14896b/cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c", size = 180560, upload-time = "2025-09-08T23:22:28.197Z" }, - { url = "https://files.pythonhosted.org/packages/b1/b7/1200d354378ef52ec227395d95c2576330fd22a869f7a70e88e1447eb234/cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92", size = 209613, upload-time = "2025-09-08T23:22:29.475Z" }, - { url = "https://files.pythonhosted.org/packages/b8/56/6033f5e86e8cc9bb629f0077ba71679508bdf54a9a5e112a3c0b91870332/cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93", size = 216476, upload-time = "2025-09-08T23:22:31.063Z" }, - { url = "https://files.pythonhosted.org/packages/dc/7f/55fecd70f7ece178db2f26128ec41430d8720f2d12ca97bf8f0a628207d5/cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5", size = 203374, upload-time = "2025-09-08T23:22:32.507Z" }, - { url = "https://files.pythonhosted.org/packages/84/ef/a7b77c8bdc0f77adc3b46888f1ad54be8f3b7821697a7b89126e829e676a/cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664", size = 202597, upload-time = "2025-09-08T23:22:34.132Z" }, - { url = "https://files.pythonhosted.org/packages/d7/91/500d892b2bf36529a75b77958edfcd5ad8e2ce4064ce2ecfeab2125d72d1/cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26", size = 215574, upload-time = "2025-09-08T23:22:35.443Z" }, - { url = "https://files.pythonhosted.org/packages/44/64/58f6255b62b101093d5df22dcb752596066c7e89dd725e0afaed242a61be/cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9", size = 218971, upload-time = "2025-09-08T23:22:36.805Z" }, - { url = "https://files.pythonhosted.org/packages/ab/49/fa72cebe2fd8a55fbe14956f9970fe8eb1ac59e5df042f603ef7c8ba0adc/cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414", size = 211972, upload-time = "2025-09-08T23:22:38.436Z" }, - { url = "https://files.pythonhosted.org/packages/0b/28/dd0967a76aab36731b6ebfe64dec4e981aff7e0608f60c2d46b46982607d/cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743", size = 217078, upload-time = "2025-09-08T23:22:39.776Z" }, - { url = "https://files.pythonhosted.org/packages/2b/c0/015b25184413d7ab0a410775fdb4a50fca20f5589b5dab1dbbfa3baad8ce/cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5", size = 172076, upload-time = "2025-09-08T23:22:40.95Z" }, - { url = "https://files.pythonhosted.org/packages/ae/8f/dc5531155e7070361eb1b7e4c1a9d896d0cb21c49f807a6c03fd63fc877e/cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5", size = 182820, upload-time = "2025-09-08T23:22:42.463Z" }, - { url = "https://files.pythonhosted.org/packages/95/5c/1b493356429f9aecfd56bc171285a4c4ac8697f76e9bbbbb105e537853a1/cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d", size = 177635, upload-time = "2025-09-08T23:22:43.623Z" }, - { url = "https://files.pythonhosted.org/packages/ea/47/4f61023ea636104d4f16ab488e268b93008c3d0bb76893b1b31db1f96802/cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d", size = 185271, upload-time = "2025-09-08T23:22:44.795Z" }, - { url = "https://files.pythonhosted.org/packages/df/a2/781b623f57358e360d62cdd7a8c681f074a71d445418a776eef0aadb4ab4/cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c", size = 181048, upload-time = "2025-09-08T23:22:45.938Z" }, - { url = "https://files.pythonhosted.org/packages/ff/df/a4f0fbd47331ceeba3d37c2e51e9dfc9722498becbeec2bd8bc856c9538a/cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe", size = 212529, upload-time = "2025-09-08T23:22:47.349Z" }, - { url = "https://files.pythonhosted.org/packages/d5/72/12b5f8d3865bf0f87cf1404d8c374e7487dcf097a1c91c436e72e6badd83/cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062", size = 220097, upload-time = "2025-09-08T23:22:48.677Z" }, - { url = "https://files.pythonhosted.org/packages/c2/95/7a135d52a50dfa7c882ab0ac17e8dc11cec9d55d2c18dda414c051c5e69e/cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e", size = 207983, upload-time = "2025-09-08T23:22:50.06Z" }, - { url = "https://files.pythonhosted.org/packages/3a/c8/15cb9ada8895957ea171c62dc78ff3e99159ee7adb13c0123c001a2546c1/cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037", size = 206519, upload-time = "2025-09-08T23:22:51.364Z" }, - { url = "https://files.pythonhosted.org/packages/78/2d/7fa73dfa841b5ac06c7b8855cfc18622132e365f5b81d02230333ff26e9e/cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba", size = 219572, upload-time = "2025-09-08T23:22:52.902Z" }, - { url = "https://files.pythonhosted.org/packages/07/e0/267e57e387b4ca276b90f0434ff88b2c2241ad72b16d31836adddfd6031b/cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94", size = 222963, upload-time = "2025-09-08T23:22:54.518Z" }, - { url = "https://files.pythonhosted.org/packages/b6/75/1f2747525e06f53efbd878f4d03bac5b859cbc11c633d0fb81432d98a795/cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187", size = 221361, upload-time = "2025-09-08T23:22:55.867Z" }, - { url = "https://files.pythonhosted.org/packages/7b/2b/2b6435f76bfeb6bbf055596976da087377ede68df465419d192acf00c437/cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18", size = 172932, upload-time = "2025-09-08T23:22:57.188Z" }, - { url = "https://files.pythonhosted.org/packages/f8/ed/13bd4418627013bec4ed6e54283b1959cf6db888048c7cf4b4c3b5b36002/cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5", size = 183557, upload-time = "2025-09-08T23:22:58.351Z" }, - { url = "https://files.pythonhosted.org/packages/95/31/9f7f93ad2f8eff1dbc1c3656d7ca5bfd8fb52c9d786b4dcf19b2d02217fa/cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6", size = 177762, upload-time = "2025-09-08T23:22:59.668Z" }, - { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230, upload-time = "2025-09-08T23:23:00.879Z" }, - { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043, upload-time = "2025-09-08T23:23:02.231Z" }, - { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446, upload-time = "2025-09-08T23:23:03.472Z" }, - { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101, upload-time = "2025-09-08T23:23:04.792Z" }, - { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948, upload-time = "2025-09-08T23:23:06.127Z" }, - { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422, upload-time = "2025-09-08T23:23:07.753Z" }, - { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499, upload-time = "2025-09-08T23:23:09.648Z" }, - { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928, upload-time = "2025-09-08T23:23:10.928Z" }, - { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302, upload-time = "2025-09-08T23:23:12.42Z" }, - { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" }, - { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" }, - { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" }, - { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320, upload-time = "2025-09-08T23:23:18.087Z" }, - { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487, upload-time = "2025-09-08T23:23:19.622Z" }, - { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049, upload-time = "2025-09-08T23:23:20.853Z" }, - { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793, upload-time = "2025-09-08T23:23:22.08Z" }, - { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300, upload-time = "2025-09-08T23:23:23.314Z" }, - { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244, upload-time = "2025-09-08T23:23:24.541Z" }, - { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828, upload-time = "2025-09-08T23:23:26.143Z" }, - { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926, upload-time = "2025-09-08T23:23:27.873Z" }, - { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" }, - { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" }, - { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" }, - { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773, upload-time = "2025-09-08T23:23:29.347Z" }, - { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013, upload-time = "2025-09-08T23:23:30.63Z" }, - { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593, upload-time = "2025-09-08T23:23:31.91Z" }, - { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354, upload-time = "2025-09-08T23:23:33.214Z" }, - { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480, upload-time = "2025-09-08T23:23:34.495Z" }, - { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584, upload-time = "2025-09-08T23:23:36.096Z" }, - { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443, upload-time = "2025-09-08T23:23:37.328Z" }, - { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437, upload-time = "2025-09-08T23:23:38.945Z" }, - { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" }, - { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" }, - { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" }, -] - -[[package]] -name = "colorama" -version = "0.4.6" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, -] - -[[package]] -name = "cryptography" -version = "45.0.7" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/a7/35/c495bffc2056f2dadb32434f1feedd79abde2a7f8363e1974afa9c33c7e2/cryptography-45.0.7.tar.gz", hash = "sha256:4b1654dfc64ea479c242508eb8c724044f1e964a47d1d1cacc5132292d851971", size = 744980, upload-time = "2025-09-01T11:15:03.146Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/0c/91/925c0ac74362172ae4516000fe877912e33b5983df735ff290c653de4913/cryptography-45.0.7-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:3be4f21c6245930688bd9e162829480de027f8bf962ede33d4f8ba7d67a00cee", size = 7041105, upload-time = "2025-09-01T11:13:59.684Z" }, - { url = "https://files.pythonhosted.org/packages/fc/63/43641c5acce3a6105cf8bd5baeceeb1846bb63067d26dae3e5db59f1513a/cryptography-45.0.7-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:67285f8a611b0ebc0857ced2081e30302909f571a46bfa7a3cc0ad303fe015c6", size = 4205799, upload-time = "2025-09-01T11:14:02.517Z" }, - { url = "https://files.pythonhosted.org/packages/bc/29/c238dd9107f10bfde09a4d1c52fd38828b1aa353ced11f358b5dd2507d24/cryptography-45.0.7-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:577470e39e60a6cd7780793202e63536026d9b8641de011ed9d8174da9ca5339", size = 4430504, upload-time = "2025-09-01T11:14:04.522Z" }, - { url = "https://files.pythonhosted.org/packages/62/62/24203e7cbcc9bd7c94739428cd30680b18ae6b18377ae66075c8e4771b1b/cryptography-45.0.7-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:4bd3e5c4b9682bc112d634f2c6ccc6736ed3635fc3319ac2bb11d768cc5a00d8", size = 4209542, upload-time = "2025-09-01T11:14:06.309Z" }, - { url = "https://files.pythonhosted.org/packages/cd/e3/e7de4771a08620eef2389b86cd87a2c50326827dea5528feb70595439ce4/cryptography-45.0.7-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:465ccac9d70115cd4de7186e60cfe989de73f7bb23e8a7aa45af18f7412e75bf", size = 3889244, upload-time = "2025-09-01T11:14:08.152Z" }, - { url = "https://files.pythonhosted.org/packages/96/b8/bca71059e79a0bb2f8e4ec61d9c205fbe97876318566cde3b5092529faa9/cryptography-45.0.7-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:16ede8a4f7929b4b7ff3642eba2bf79aa1d71f24ab6ee443935c0d269b6bc513", size = 4461975, upload-time = "2025-09-01T11:14:09.755Z" }, - { url = "https://files.pythonhosted.org/packages/58/67/3f5b26937fe1218c40e95ef4ff8d23c8dc05aa950d54200cc7ea5fb58d28/cryptography-45.0.7-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:8978132287a9d3ad6b54fcd1e08548033cc09dc6aacacb6c004c73c3eb5d3ac3", size = 4209082, upload-time = "2025-09-01T11:14:11.229Z" }, - { url = "https://files.pythonhosted.org/packages/0e/e4/b3e68a4ac363406a56cf7b741eeb80d05284d8c60ee1a55cdc7587e2a553/cryptography-45.0.7-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:b6a0e535baec27b528cb07a119f321ac024592388c5681a5ced167ae98e9fff3", size = 4460397, upload-time = "2025-09-01T11:14:12.924Z" }, - { url = "https://files.pythonhosted.org/packages/22/49/2c93f3cd4e3efc8cb22b02678c1fad691cff9dd71bb889e030d100acbfe0/cryptography-45.0.7-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:a24ee598d10befaec178efdff6054bc4d7e883f615bfbcd08126a0f4931c83a6", size = 4337244, upload-time = "2025-09-01T11:14:14.431Z" }, - { url = "https://files.pythonhosted.org/packages/04/19/030f400de0bccccc09aa262706d90f2ec23d56bc4eb4f4e8268d0ddf3fb8/cryptography-45.0.7-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:fa26fa54c0a9384c27fcdc905a2fb7d60ac6e47d14bc2692145f2b3b1e2cfdbd", size = 4568862, upload-time = "2025-09-01T11:14:16.185Z" }, - { url = "https://files.pythonhosted.org/packages/29/56/3034a3a353efa65116fa20eb3c990a8c9f0d3db4085429040a7eef9ada5f/cryptography-45.0.7-cp311-abi3-win32.whl", hash = "sha256:bef32a5e327bd8e5af915d3416ffefdbe65ed975b646b3805be81b23580b57b8", size = 2936578, upload-time = "2025-09-01T11:14:17.638Z" }, - { url = "https://files.pythonhosted.org/packages/b3/61/0ab90f421c6194705a99d0fa9f6ee2045d916e4455fdbb095a9c2c9a520f/cryptography-45.0.7-cp311-abi3-win_amd64.whl", hash = "sha256:3808e6b2e5f0b46d981c24d79648e5c25c35e59902ea4391a0dcb3e667bf7443", size = 3405400, upload-time = "2025-09-01T11:14:18.958Z" }, - { url = "https://files.pythonhosted.org/packages/63/e8/c436233ddf19c5f15b25ace33979a9dd2e7aa1a59209a0ee8554179f1cc0/cryptography-45.0.7-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bfb4c801f65dd61cedfc61a83732327fafbac55a47282e6f26f073ca7a41c3b2", size = 7021824, upload-time = "2025-09-01T11:14:20.954Z" }, - { url = "https://files.pythonhosted.org/packages/bc/4c/8f57f2500d0ccd2675c5d0cc462095adf3faa8c52294ba085c036befb901/cryptography-45.0.7-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:81823935e2f8d476707e85a78a405953a03ef7b7b4f55f93f7c2d9680e5e0691", size = 4202233, upload-time = "2025-09-01T11:14:22.454Z" }, - { url = "https://files.pythonhosted.org/packages/eb/ac/59b7790b4ccaed739fc44775ce4645c9b8ce54cbec53edf16c74fd80cb2b/cryptography-45.0.7-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3994c809c17fc570c2af12c9b840d7cea85a9fd3e5c0e0491f4fa3c029216d59", size = 4423075, upload-time = "2025-09-01T11:14:24.287Z" }, - { url = "https://files.pythonhosted.org/packages/b8/56/d4f07ea21434bf891faa088a6ac15d6d98093a66e75e30ad08e88aa2b9ba/cryptography-45.0.7-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dad43797959a74103cb59c5dac71409f9c27d34c8a05921341fb64ea8ccb1dd4", size = 4204517, upload-time = "2025-09-01T11:14:25.679Z" }, - { url = "https://files.pythonhosted.org/packages/e8/ac/924a723299848b4c741c1059752c7cfe09473b6fd77d2920398fc26bfb53/cryptography-45.0.7-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:ce7a453385e4c4693985b4a4a3533e041558851eae061a58a5405363b098fcd3", size = 3882893, upload-time = "2025-09-01T11:14:27.1Z" }, - { url = "https://files.pythonhosted.org/packages/83/dc/4dab2ff0a871cc2d81d3ae6d780991c0192b259c35e4d83fe1de18b20c70/cryptography-45.0.7-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b04f85ac3a90c227b6e5890acb0edbaf3140938dbecf07bff618bf3638578cf1", size = 4450132, upload-time = "2025-09-01T11:14:28.58Z" }, - { url = "https://files.pythonhosted.org/packages/12/dd/b2882b65db8fc944585d7fb00d67cf84a9cef4e77d9ba8f69082e911d0de/cryptography-45.0.7-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:48c41a44ef8b8c2e80ca4527ee81daa4c527df3ecbc9423c41a420a9559d0e27", size = 4204086, upload-time = "2025-09-01T11:14:30.572Z" }, - { url = "https://files.pythonhosted.org/packages/5d/fa/1d5745d878048699b8eb87c984d4ccc5da4f5008dfd3ad7a94040caca23a/cryptography-45.0.7-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:f3df7b3d0f91b88b2106031fd995802a2e9ae13e02c36c1fc075b43f420f3a17", size = 4449383, upload-time = "2025-09-01T11:14:32.046Z" }, - { url = "https://files.pythonhosted.org/packages/36/8b/fc61f87931bc030598e1876c45b936867bb72777eac693e905ab89832670/cryptography-45.0.7-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd342f085542f6eb894ca00ef70236ea46070c8a13824c6bde0dfdcd36065b9b", size = 4332186, upload-time = "2025-09-01T11:14:33.95Z" }, - { url = "https://files.pythonhosted.org/packages/0b/11/09700ddad7443ccb11d674efdbe9a832b4455dc1f16566d9bd3834922ce5/cryptography-45.0.7-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1993a1bb7e4eccfb922b6cd414f072e08ff5816702a0bdb8941c247a6b1b287c", size = 4561639, upload-time = "2025-09-01T11:14:35.343Z" }, - { url = "https://files.pythonhosted.org/packages/71/ed/8f4c1337e9d3b94d8e50ae0b08ad0304a5709d483bfcadfcc77a23dbcb52/cryptography-45.0.7-cp37-abi3-win32.whl", hash = "sha256:18fcf70f243fe07252dcb1b268a687f2358025ce32f9f88028ca5c364b123ef5", size = 2926552, upload-time = "2025-09-01T11:14:36.929Z" }, - { url = "https://files.pythonhosted.org/packages/bc/ff/026513ecad58dacd45d1d24ebe52b852165a26e287177de1d545325c0c25/cryptography-45.0.7-cp37-abi3-win_amd64.whl", hash = "sha256:7285a89df4900ed3bfaad5679b1e668cb4b38a8de1ccbfc84b05f34512da0a90", size = 3392742, upload-time = "2025-09-01T11:14:38.368Z" }, - { url = "https://files.pythonhosted.org/packages/13/3e/e42f1528ca1ea82256b835191eab1be014e0f9f934b60d98b0be8a38ed70/cryptography-45.0.7-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:de58755d723e86175756f463f2f0bddd45cc36fbd62601228a3f8761c9f58252", size = 3572442, upload-time = "2025-09-01T11:14:39.836Z" }, - { url = "https://files.pythonhosted.org/packages/59/aa/e947693ab08674a2663ed2534cd8d345cf17bf6a1facf99273e8ec8986dc/cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a20e442e917889d1a6b3c570c9e3fa2fdc398c20868abcea268ea33c024c4083", size = 4142233, upload-time = "2025-09-01T11:14:41.305Z" }, - { url = "https://files.pythonhosted.org/packages/24/06/09b6f6a2fc43474a32b8fe259038eef1500ee3d3c141599b57ac6c57612c/cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:258e0dff86d1d891169b5af222d362468a9570e2532923088658aa866eb11130", size = 4376202, upload-time = "2025-09-01T11:14:43.047Z" }, - { url = "https://files.pythonhosted.org/packages/00/f2/c166af87e95ce6ae6d38471a7e039d3a0549c2d55d74e059680162052824/cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:d97cf502abe2ab9eff8bd5e4aca274da8d06dd3ef08b759a8d6143f4ad65d4b4", size = 4141900, upload-time = "2025-09-01T11:14:45.089Z" }, - { url = "https://files.pythonhosted.org/packages/16/b9/e96e0b6cb86eae27ea51fa8a3151535a18e66fe7c451fa90f7f89c85f541/cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:c987dad82e8c65ebc985f5dae5e74a3beda9d0a2a4daf8a1115f3772b59e5141", size = 4375562, upload-time = "2025-09-01T11:14:47.166Z" }, - { url = "https://files.pythonhosted.org/packages/36/d0/36e8ee39274e9d77baf7d0dafda680cba6e52f3936b846f0d56d64fec915/cryptography-45.0.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c13b1e3afd29a5b3b2656257f14669ca8fa8d7956d509926f0b130b600b50ab7", size = 3322781, upload-time = "2025-09-01T11:14:48.747Z" }, - { url = "https://files.pythonhosted.org/packages/99/4e/49199a4c82946938a3e05d2e8ad9482484ba48bbc1e809e3d506c686d051/cryptography-45.0.7-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a862753b36620af6fc54209264f92c716367f2f0ff4624952276a6bbd18cbde", size = 3584634, upload-time = "2025-09-01T11:14:50.593Z" }, - { url = "https://files.pythonhosted.org/packages/16/ce/5f6ff59ea9c7779dba51b84871c19962529bdcc12e1a6ea172664916c550/cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:06ce84dc14df0bf6ea84666f958e6080cdb6fe1231be2a51f3fc1267d9f3fb34", size = 4149533, upload-time = "2025-09-01T11:14:52.091Z" }, - { url = "https://files.pythonhosted.org/packages/ce/13/b3cfbd257ac96da4b88b46372e662009b7a16833bfc5da33bb97dd5631ae/cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d0c5c6bac22b177bf8da7435d9d27a6834ee130309749d162b26c3105c0795a9", size = 4385557, upload-time = "2025-09-01T11:14:53.551Z" }, - { url = "https://files.pythonhosted.org/packages/1c/c5/8c59d6b7c7b439ba4fc8d0cab868027fd095f215031bc123c3a070962912/cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:2f641b64acc00811da98df63df7d59fd4706c0df449da71cb7ac39a0732b40ae", size = 4149023, upload-time = "2025-09-01T11:14:55.022Z" }, - { url = "https://files.pythonhosted.org/packages/55/32/05385c86d6ca9ab0b4d5bb442d2e3d85e727939a11f3e163fc776ce5eb40/cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:f5414a788ecc6ee6bc58560e85ca624258a55ca434884445440a810796ea0e0b", size = 4385722, upload-time = "2025-09-01T11:14:57.319Z" }, - { url = "https://files.pythonhosted.org/packages/23/87/7ce86f3fa14bc11a5a48c30d8103c26e09b6465f8d8e9d74cf7a0714f043/cryptography-45.0.7-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:1f3d56f73595376f4244646dd5c5870c14c196949807be39e79e7bd9bac3da63", size = 3332908, upload-time = "2025-09-01T11:14:58.78Z" }, -] - -[[package]] -name = "exceptiongroup" -version = "1.3.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/0b/9f/a65090624ecf468cdca03533906e7c69ed7588582240cfe7cc9e770b50eb/exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88", size = 29749, upload-time = "2025-05-10T17:42:51.123Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/36/f4/c6e662dade71f56cd2f3735141b265c3c79293c109549c1e6933b0651ffc/exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10", size = 16674, upload-time = "2025-05-10T17:42:49.33Z" }, -] - -[[package]] -name = "grpcio" -version = "1.74.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/38/b4/35feb8f7cab7239c5b94bd2db71abb3d6adb5f335ad8f131abb6060840b6/grpcio-1.74.0.tar.gz", hash = "sha256:80d1f4fbb35b0742d3e3d3bb654b7381cd5f015f8497279a1e9c21ba623e01b1", size = 12756048, upload-time = "2025-07-24T18:54:23.039Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/66/54/68e51a90797ad7afc5b0a7881426c337f6a9168ebab73c3210b76aa7c90d/grpcio-1.74.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:85bd5cdf4ed7b2d6438871adf6afff9af7096486fcf51818a81b77ef4dd30907", size = 5481935, upload-time = "2025-07-24T18:52:43.756Z" }, - { url = "https://files.pythonhosted.org/packages/32/2a/af817c7e9843929e93e54d09c9aee2555c2e8d81b93102a9426b36e91833/grpcio-1.74.0-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:68c8ebcca945efff9d86d8d6d7bfb0841cf0071024417e2d7f45c5e46b5b08eb", size = 10986796, upload-time = "2025-07-24T18:52:47.219Z" }, - { url = "https://files.pythonhosted.org/packages/d5/94/d67756638d7bb07750b07d0826c68e414124574b53840ba1ff777abcd388/grpcio-1.74.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:e154d230dc1bbbd78ad2fdc3039fa50ad7ffcf438e4eb2fa30bce223a70c7486", size = 5983663, upload-time = "2025-07-24T18:52:49.463Z" }, - { url = "https://files.pythonhosted.org/packages/35/f5/c5e4853bf42148fea8532d49e919426585b73eafcf379a712934652a8de9/grpcio-1.74.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8978003816c7b9eabe217f88c78bc26adc8f9304bf6a594b02e5a49b2ef9c11", size = 6653765, upload-time = "2025-07-24T18:52:51.094Z" }, - { url = "https://files.pythonhosted.org/packages/fd/75/a1991dd64b331d199935e096cc9daa3415ee5ccbe9f909aa48eded7bba34/grpcio-1.74.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3d7bd6e3929fd2ea7fbc3f562e4987229ead70c9ae5f01501a46701e08f1ad9", size = 6215172, upload-time = "2025-07-24T18:52:53.282Z" }, - { url = "https://files.pythonhosted.org/packages/01/a4/7cef3dbb3b073d0ce34fd507efc44ac4c9442a0ef9fba4fb3f5c551efef5/grpcio-1.74.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:136b53c91ac1d02c8c24201bfdeb56f8b3ac3278668cbb8e0ba49c88069e1bdc", size = 6329142, upload-time = "2025-07-24T18:52:54.927Z" }, - { url = "https://files.pythonhosted.org/packages/bf/d3/587920f882b46e835ad96014087054655312400e2f1f1446419e5179a383/grpcio-1.74.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fe0f540750a13fd8e5da4b3eaba91a785eea8dca5ccd2bc2ffe978caa403090e", size = 7018632, upload-time = "2025-07-24T18:52:56.523Z" }, - { url = "https://files.pythonhosted.org/packages/1f/95/c70a3b15a0bc83334b507e3d2ae20ee8fa38d419b8758a4d838f5c2a7d32/grpcio-1.74.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4e4181bfc24413d1e3a37a0b7889bea68d973d4b45dd2bc68bb766c140718f82", size = 6509641, upload-time = "2025-07-24T18:52:58.495Z" }, - { url = "https://files.pythonhosted.org/packages/4b/06/2e7042d06247d668ae69ea6998eca33f475fd4e2855f94dcb2aa5daef334/grpcio-1.74.0-cp310-cp310-win32.whl", hash = "sha256:1733969040989f7acc3d94c22f55b4a9501a30f6aaacdbccfaba0a3ffb255ab7", size = 3817478, upload-time = "2025-07-24T18:53:00.128Z" }, - { url = "https://files.pythonhosted.org/packages/93/20/e02b9dcca3ee91124060b65bbf5b8e1af80b3b76a30f694b44b964ab4d71/grpcio-1.74.0-cp310-cp310-win_amd64.whl", hash = "sha256:9e912d3c993a29df6c627459af58975b2e5c897d93287939b9d5065f000249b5", size = 4493971, upload-time = "2025-07-24T18:53:02.068Z" }, - { url = "https://files.pythonhosted.org/packages/e7/77/b2f06db9f240a5abeddd23a0e49eae2b6ac54d85f0e5267784ce02269c3b/grpcio-1.74.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:69e1a8180868a2576f02356565f16635b99088da7df3d45aaa7e24e73a054e31", size = 5487368, upload-time = "2025-07-24T18:53:03.548Z" }, - { url = "https://files.pythonhosted.org/packages/48/99/0ac8678a819c28d9a370a663007581744a9f2a844e32f0fa95e1ddda5b9e/grpcio-1.74.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:8efe72fde5500f47aca1ef59495cb59c885afe04ac89dd11d810f2de87d935d4", size = 10999804, upload-time = "2025-07-24T18:53:05.095Z" }, - { url = "https://files.pythonhosted.org/packages/45/c6/a2d586300d9e14ad72e8dc211c7aecb45fe9846a51e558c5bca0c9102c7f/grpcio-1.74.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:a8f0302f9ac4e9923f98d8e243939a6fb627cd048f5cd38595c97e38020dffce", size = 5987667, upload-time = "2025-07-24T18:53:07.157Z" }, - { url = "https://files.pythonhosted.org/packages/c9/57/5f338bf56a7f22584e68d669632e521f0de460bb3749d54533fc3d0fca4f/grpcio-1.74.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f609a39f62a6f6f05c7512746798282546358a37ea93c1fcbadf8b2fed162e3", size = 6655612, upload-time = "2025-07-24T18:53:09.244Z" }, - { url = "https://files.pythonhosted.org/packages/82/ea/a4820c4c44c8b35b1903a6c72a5bdccec92d0840cf5c858c498c66786ba5/grpcio-1.74.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c98e0b7434a7fa4e3e63f250456eaef52499fba5ae661c58cc5b5477d11e7182", size = 6219544, upload-time = "2025-07-24T18:53:11.221Z" }, - { url = "https://files.pythonhosted.org/packages/a4/17/0537630a921365928f5abb6d14c79ba4dcb3e662e0dbeede8af4138d9dcf/grpcio-1.74.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:662456c4513e298db6d7bd9c3b8df6f75f8752f0ba01fb653e252ed4a59b5a5d", size = 6334863, upload-time = "2025-07-24T18:53:12.925Z" }, - { url = "https://files.pythonhosted.org/packages/e2/a6/85ca6cb9af3f13e1320d0a806658dca432ff88149d5972df1f7b51e87127/grpcio-1.74.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3d14e3c4d65e19d8430a4e28ceb71ace4728776fd6c3ce34016947474479683f", size = 7019320, upload-time = "2025-07-24T18:53:15.002Z" }, - { url = "https://files.pythonhosted.org/packages/4f/a7/fe2beab970a1e25d2eff108b3cf4f7d9a53c185106377a3d1989216eba45/grpcio-1.74.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1bf949792cee20d2078323a9b02bacbbae002b9e3b9e2433f2741c15bdeba1c4", size = 6514228, upload-time = "2025-07-24T18:53:16.999Z" }, - { url = "https://files.pythonhosted.org/packages/6a/c2/2f9c945c8a248cebc3ccda1b7a1bf1775b9d7d59e444dbb18c0014e23da6/grpcio-1.74.0-cp311-cp311-win32.whl", hash = "sha256:55b453812fa7c7ce2f5c88be3018fb4a490519b6ce80788d5913f3f9d7da8c7b", size = 3817216, upload-time = "2025-07-24T18:53:20.564Z" }, - { url = "https://files.pythonhosted.org/packages/ff/d1/a9cf9c94b55becda2199299a12b9feef0c79946b0d9d34c989de6d12d05d/grpcio-1.74.0-cp311-cp311-win_amd64.whl", hash = "sha256:86ad489db097141a907c559988c29718719aa3e13370d40e20506f11b4de0d11", size = 4495380, upload-time = "2025-07-24T18:53:22.058Z" }, - { url = "https://files.pythonhosted.org/packages/4c/5d/e504d5d5c4469823504f65687d6c8fb97b7f7bf0b34873b7598f1df24630/grpcio-1.74.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:8533e6e9c5bd630ca98062e3a1326249e6ada07d05acf191a77bc33f8948f3d8", size = 5445551, upload-time = "2025-07-24T18:53:23.641Z" }, - { url = "https://files.pythonhosted.org/packages/43/01/730e37056f96f2f6ce9f17999af1556df62ee8dab7fa48bceeaab5fd3008/grpcio-1.74.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:2918948864fec2a11721d91568effffbe0a02b23ecd57f281391d986847982f6", size = 10979810, upload-time = "2025-07-24T18:53:25.349Z" }, - { url = "https://files.pythonhosted.org/packages/79/3d/09fd100473ea5c47083889ca47ffd356576173ec134312f6aa0e13111dee/grpcio-1.74.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:60d2d48b0580e70d2e1954d0d19fa3c2e60dd7cbed826aca104fff518310d1c5", size = 5941946, upload-time = "2025-07-24T18:53:27.387Z" }, - { url = "https://files.pythonhosted.org/packages/8a/99/12d2cca0a63c874c6d3d195629dcd85cdf5d6f98a30d8db44271f8a97b93/grpcio-1.74.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3601274bc0523f6dc07666c0e01682c94472402ac2fd1226fd96e079863bfa49", size = 6621763, upload-time = "2025-07-24T18:53:29.193Z" }, - { url = "https://files.pythonhosted.org/packages/9d/2c/930b0e7a2f1029bbc193443c7bc4dc2a46fedb0203c8793dcd97081f1520/grpcio-1.74.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:176d60a5168d7948539def20b2a3adcce67d72454d9ae05969a2e73f3a0feee7", size = 6180664, upload-time = "2025-07-24T18:53:30.823Z" }, - { url = "https://files.pythonhosted.org/packages/db/d5/ff8a2442180ad0867717e670f5ec42bfd8d38b92158ad6bcd864e6d4b1ed/grpcio-1.74.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e759f9e8bc908aaae0412642afe5416c9f983a80499448fcc7fab8692ae044c3", size = 6301083, upload-time = "2025-07-24T18:53:32.454Z" }, - { url = "https://files.pythonhosted.org/packages/b0/ba/b361d390451a37ca118e4ec7dccec690422e05bc85fba2ec72b06cefec9f/grpcio-1.74.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:9e7c4389771855a92934b2846bd807fc25a3dfa820fd912fe6bd8136026b2707", size = 6994132, upload-time = "2025-07-24T18:53:34.506Z" }, - { url = "https://files.pythonhosted.org/packages/3b/0c/3a5fa47d2437a44ced74141795ac0251bbddeae74bf81df3447edd767d27/grpcio-1.74.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:cce634b10aeab37010449124814b05a62fb5f18928ca878f1bf4750d1f0c815b", size = 6489616, upload-time = "2025-07-24T18:53:36.217Z" }, - { url = "https://files.pythonhosted.org/packages/ae/95/ab64703b436d99dc5217228babc76047d60e9ad14df129e307b5fec81fd0/grpcio-1.74.0-cp312-cp312-win32.whl", hash = "sha256:885912559974df35d92219e2dc98f51a16a48395f37b92865ad45186f294096c", size = 3807083, upload-time = "2025-07-24T18:53:37.911Z" }, - { url = "https://files.pythonhosted.org/packages/84/59/900aa2445891fc47a33f7d2f76e00ca5d6ae6584b20d19af9c06fa09bf9a/grpcio-1.74.0-cp312-cp312-win_amd64.whl", hash = "sha256:42f8fee287427b94be63d916c90399ed310ed10aadbf9e2e5538b3e497d269bc", size = 4490123, upload-time = "2025-07-24T18:53:39.528Z" }, - { url = "https://files.pythonhosted.org/packages/d4/d8/1004a5f468715221450e66b051c839c2ce9a985aa3ee427422061fcbb6aa/grpcio-1.74.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:2bc2d7d8d184e2362b53905cb1708c84cb16354771c04b490485fa07ce3a1d89", size = 5449488, upload-time = "2025-07-24T18:53:41.174Z" }, - { url = "https://files.pythonhosted.org/packages/94/0e/33731a03f63740d7743dced423846c831d8e6da808fcd02821a4416df7fa/grpcio-1.74.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:c14e803037e572c177ba54a3e090d6eb12efd795d49327c5ee2b3bddb836bf01", size = 10974059, upload-time = "2025-07-24T18:53:43.066Z" }, - { url = "https://files.pythonhosted.org/packages/0d/c6/3d2c14d87771a421205bdca991467cfe473ee4c6a1231c1ede5248c62ab8/grpcio-1.74.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:f6ec94f0e50eb8fa1744a731088b966427575e40c2944a980049798b127a687e", size = 5945647, upload-time = "2025-07-24T18:53:45.269Z" }, - { url = "https://files.pythonhosted.org/packages/c5/83/5a354c8aaff58594eef7fffebae41a0f8995a6258bbc6809b800c33d4c13/grpcio-1.74.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:566b9395b90cc3d0d0c6404bc8572c7c18786ede549cdb540ae27b58afe0fb91", size = 6626101, upload-time = "2025-07-24T18:53:47.015Z" }, - { url = "https://files.pythonhosted.org/packages/3f/ca/4fdc7bf59bf6994aa45cbd4ef1055cd65e2884de6113dbd49f75498ddb08/grpcio-1.74.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1ea6176d7dfd5b941ea01c2ec34de9531ba494d541fe2057c904e601879f249", size = 6182562, upload-time = "2025-07-24T18:53:48.967Z" }, - { url = "https://files.pythonhosted.org/packages/fd/48/2869e5b2c1922583686f7ae674937986807c2f676d08be70d0a541316270/grpcio-1.74.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:64229c1e9cea079420527fa8ac45d80fc1e8d3f94deaa35643c381fa8d98f362", size = 6303425, upload-time = "2025-07-24T18:53:50.847Z" }, - { url = "https://files.pythonhosted.org/packages/a6/0e/bac93147b9a164f759497bc6913e74af1cb632c733c7af62c0336782bd38/grpcio-1.74.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:0f87bddd6e27fc776aacf7ebfec367b6d49cad0455123951e4488ea99d9b9b8f", size = 6996533, upload-time = "2025-07-24T18:53:52.747Z" }, - { url = "https://files.pythonhosted.org/packages/84/35/9f6b2503c1fd86d068b46818bbd7329db26a87cdd8c01e0d1a9abea1104c/grpcio-1.74.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:3b03d8f2a07f0fea8c8f74deb59f8352b770e3900d143b3d1475effcb08eec20", size = 6491489, upload-time = "2025-07-24T18:53:55.06Z" }, - { url = "https://files.pythonhosted.org/packages/75/33/a04e99be2a82c4cbc4039eb3a76f6c3632932b9d5d295221389d10ac9ca7/grpcio-1.74.0-cp313-cp313-win32.whl", hash = "sha256:b6a73b2ba83e663b2480a90b82fdae6a7aa6427f62bf43b29912c0cfd1aa2bfa", size = 3805811, upload-time = "2025-07-24T18:53:56.798Z" }, - { url = "https://files.pythonhosted.org/packages/34/80/de3eb55eb581815342d097214bed4c59e806b05f1b3110df03b2280d6dfd/grpcio-1.74.0-cp313-cp313-win_amd64.whl", hash = "sha256:fd3c71aeee838299c5887230b8a1822795325ddfea635edd82954c1eaa831e24", size = 4489214, upload-time = "2025-07-24T18:53:59.771Z" }, -] - -[[package]] -name = "iniconfig" -version = "2.1.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, -] - -[[package]] -name = "packaging" -version = "25.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, -] - -[[package]] -name = "pem" -version = "23.1.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/05/86/16c0b6789816f8d53f2f208b5a090c9197da8a6dae4d490554bb1bedbb09/pem-23.1.0.tar.gz", hash = "sha256:06503ff2441a111f853ce4e8b9eb9d5fedb488ebdbf560115d3dd53a1b4afc73", size = 43796, upload-time = "2023-06-21T10:24:40.539Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c9/97/8299a481ae6c08494b5d53511e6a4746775d8a354c685c69d8796b2ed482/pem-23.1.0-py3-none-any.whl", hash = "sha256:78bbb1e75b737891350cb9499cbba31da5d59545f360f44163c0bc751cad55d3", size = 9195, upload-time = "2023-06-21T10:24:39.164Z" }, -] - -[[package]] -name = "pluggy" -version = "1.6.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, -] - -[[package]] -name = "protobuf" -version = "6.32.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fa/a4/cc17347aa2897568beece2e674674359f911d6fe21b0b8d6268cd42727ac/protobuf-6.32.1.tar.gz", hash = "sha256:ee2469e4a021474ab9baafea6cd070e5bf27c7d29433504ddea1a4ee5850f68d", size = 440635, upload-time = "2025-09-11T21:38:42.935Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c0/98/645183ea03ab3995d29086b8bf4f7562ebd3d10c9a4b14ee3f20d47cfe50/protobuf-6.32.1-cp310-abi3-win32.whl", hash = "sha256:a8a32a84bc9f2aad712041b8b366190f71dde248926da517bde9e832e4412085", size = 424411, upload-time = "2025-09-11T21:38:27.427Z" }, - { url = "https://files.pythonhosted.org/packages/8c/f3/6f58f841f6ebafe076cebeae33fc336e900619d34b1c93e4b5c97a81fdfa/protobuf-6.32.1-cp310-abi3-win_amd64.whl", hash = "sha256:b00a7d8c25fa471f16bc8153d0e53d6c9e827f0953f3c09aaa4331c718cae5e1", size = 435738, upload-time = "2025-09-11T21:38:30.959Z" }, - { url = "https://files.pythonhosted.org/packages/10/56/a8a3f4e7190837139e68c7002ec749190a163af3e330f65d90309145a210/protobuf-6.32.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d8c7e6eb619ffdf105ee4ab76af5a68b60a9d0f66da3ea12d1640e6d8dab7281", size = 426454, upload-time = "2025-09-11T21:38:34.076Z" }, - { url = "https://files.pythonhosted.org/packages/3f/be/8dd0a927c559b37d7a6c8ab79034fd167dcc1f851595f2e641ad62be8643/protobuf-6.32.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:2f5b80a49e1eb7b86d85fcd23fe92df154b9730a725c3b38c4e43b9d77018bf4", size = 322874, upload-time = "2025-09-11T21:38:35.509Z" }, - { url = "https://files.pythonhosted.org/packages/5c/f6/88d77011b605ef979aace37b7703e4eefad066f7e84d935e5a696515c2dd/protobuf-6.32.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:b1864818300c297265c83a4982fd3169f97122c299f56a56e2445c3698d34710", size = 322013, upload-time = "2025-09-11T21:38:37.017Z" }, - { url = "https://files.pythonhosted.org/packages/97/b7/15cc7d93443d6c6a84626ae3258a91f4c6ac8c0edd5df35ea7658f71b79c/protobuf-6.32.1-py3-none-any.whl", hash = "sha256:2601b779fc7d32a866c6b4404f9d42a3f67c5b9f3f15b4db3cccabe06b95c346", size = 169289, upload-time = "2025-09-11T21:38:41.234Z" }, -] - -[[package]] -name = "pyasn1" -version = "0.6.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, -] - -[[package]] -name = "pyasn1-modules" -version = "0.4.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pyasn1" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" }, -] - -[[package]] -name = "pycparser" -version = "2.23" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" }, -] - -[[package]] -name = "pygments" -version = "2.19.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, -] - -[[package]] -name = "pyjwt" -version = "2.10.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" }, -] - -[package.optional-dependencies] -crypto = [ - { name = "cryptography" }, -] - -[[package]] -name = "pyopenssl" -version = "25.3.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "cryptography" }, - { name = "typing-extensions", marker = "python_full_version < '3.13'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/80/be/97b83a464498a79103036bc74d1038df4a7ef0e402cfaf4d5e113fb14759/pyopenssl-25.3.0.tar.gz", hash = "sha256:c981cb0a3fd84e8602d7afc209522773b94c1c2446a3c710a75b06fe1beae329", size = 184073, upload-time = "2025-09-17T00:32:21.037Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d1/81/ef2b1dfd1862567d573a4fdbc9f969067621764fbb74338496840a1d2977/pyopenssl-25.3.0-py3-none-any.whl", hash = "sha256:1fda6fc034d5e3d179d39e59c1895c9faeaf40a79de5fc4cbbfbe0d36f4a77b6", size = 57268, upload-time = "2025-09-17T00:32:19.474Z" }, -] - -[[package]] -name = "pytest" -version = "8.4.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "colorama", marker = "sys_platform == 'win32'" }, - { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, - { name = "iniconfig" }, - { name = "packaging" }, - { name = "pluggy" }, - { name = "pygments" }, - { name = "tomli", marker = "python_full_version < '3.11'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, -] - -[[package]] -name = "spiffe" -version = "0.2.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "cryptography" }, - { name = "grpcio" }, - { name = "pem" }, - { name = "protobuf" }, - { name = "pyasn1" }, - { name = "pyasn1-modules" }, - { name = "pyjwt", extra = ["crypto"] }, -] -sdist = { url = "https://files.pythonhosted.org/packages/e5/f9/716f29e5e0cb13d2786cf30fd2b001898cfe7cf33134f2bcd52da3e2b49c/spiffe-0.2.2.tar.gz", hash = "sha256:e4ca1247b1a08631a3f822eec7db70447b6d99734ff50670f2c9020dfb006231", size = 34912, upload-time = "2025-07-15T16:50:04.113Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/01/7e/50b8f8d4bbeed9b7598cb2e15a7f3b9a55b1635d50d67d763f02ffc4cc93/spiffe-0.2.2-py3-none-any.whl", hash = "sha256:a53fb39ab59408b15dd2f969989045d68bc6b3ebfd283bf2f77e9ff9a66b047b", size = 56009, upload-time = "2025-07-15T16:50:02.958Z" }, -] - -[[package]] -name = "spiffe-tls" -version = "0.2.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pyopenssl" }, - { name = "spiffe" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/d7/19/2228e634dfa76ba41ef2896d69205441beea5abce7b871430ca618210d9d/spiffe_tls-0.2.1.tar.gz", hash = "sha256:5898b91f7e4f8db9f8cbdd00f625a113e2ce5fc7379cf0104c6d19b73d53ddef", size = 10520, upload-time = "2025-06-11T18:28:31.87Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f6/e1/fe672f042adcd86291a39c0ed7ada5fdc13be945dbf7b53f2f3df59d0c7e/spiffe_tls-0.2.1-py3-none-any.whl", hash = "sha256:b90d302c92deaedd8278339b0c41d6f98d7c512a76e947e013e6189c9f75a53d", size = 15522, upload-time = "2025-06-11T18:28:30.948Z" }, -] - -[[package]] -name = "tomli" -version = "2.2.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175, upload-time = "2024-11-27T22:38:36.873Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/43/ca/75707e6efa2b37c77dadb324ae7d9571cb424e61ea73fad7c56c2d14527f/tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249", size = 131077, upload-time = "2024-11-27T22:37:54.956Z" }, - { url = "https://files.pythonhosted.org/packages/c7/16/51ae563a8615d472fdbffc43a3f3d46588c264ac4f024f63f01283becfbb/tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6", size = 123429, upload-time = "2024-11-27T22:37:56.698Z" }, - { url = "https://files.pythonhosted.org/packages/f1/dd/4f6cd1e7b160041db83c694abc78e100473c15d54620083dbd5aae7b990e/tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a", size = 226067, upload-time = "2024-11-27T22:37:57.63Z" }, - { url = "https://files.pythonhosted.org/packages/a9/6b/c54ede5dc70d648cc6361eaf429304b02f2871a345bbdd51e993d6cdf550/tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee", size = 236030, upload-time = "2024-11-27T22:37:59.344Z" }, - { url = "https://files.pythonhosted.org/packages/1f/47/999514fa49cfaf7a92c805a86c3c43f4215621855d151b61c602abb38091/tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e", size = 240898, upload-time = "2024-11-27T22:38:00.429Z" }, - { url = "https://files.pythonhosted.org/packages/73/41/0a01279a7ae09ee1573b423318e7934674ce06eb33f50936655071d81a24/tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4", size = 229894, upload-time = "2024-11-27T22:38:02.094Z" }, - { url = "https://files.pythonhosted.org/packages/55/18/5d8bc5b0a0362311ce4d18830a5d28943667599a60d20118074ea1b01bb7/tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106", size = 245319, upload-time = "2024-11-27T22:38:03.206Z" }, - { url = "https://files.pythonhosted.org/packages/92/a3/7ade0576d17f3cdf5ff44d61390d4b3febb8a9fc2b480c75c47ea048c646/tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8", size = 238273, upload-time = "2024-11-27T22:38:04.217Z" }, - { url = "https://files.pythonhosted.org/packages/72/6f/fa64ef058ac1446a1e51110c375339b3ec6be245af9d14c87c4a6412dd32/tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff", size = 98310, upload-time = "2024-11-27T22:38:05.908Z" }, - { url = "https://files.pythonhosted.org/packages/6a/1c/4a2dcde4a51b81be3530565e92eda625d94dafb46dbeb15069df4caffc34/tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b", size = 108309, upload-time = "2024-11-27T22:38:06.812Z" }, - { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762, upload-time = "2024-11-27T22:38:07.731Z" }, - { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453, upload-time = "2024-11-27T22:38:09.384Z" }, - { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486, upload-time = "2024-11-27T22:38:10.329Z" }, - { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349, upload-time = "2024-11-27T22:38:11.443Z" }, - { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159, upload-time = "2024-11-27T22:38:13.099Z" }, - { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243, upload-time = "2024-11-27T22:38:14.766Z" }, - { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645, upload-time = "2024-11-27T22:38:15.843Z" }, - { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584, upload-time = "2024-11-27T22:38:17.645Z" }, - { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875, upload-time = "2024-11-27T22:38:19.159Z" }, - { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418, upload-time = "2024-11-27T22:38:20.064Z" }, - { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708, upload-time = "2024-11-27T22:38:21.659Z" }, - { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582, upload-time = "2024-11-27T22:38:22.693Z" }, - { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543, upload-time = "2024-11-27T22:38:24.367Z" }, - { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691, upload-time = "2024-11-27T22:38:26.081Z" }, - { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170, upload-time = "2024-11-27T22:38:27.921Z" }, - { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530, upload-time = "2024-11-27T22:38:29.591Z" }, - { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666, upload-time = "2024-11-27T22:38:30.639Z" }, - { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954, upload-time = "2024-11-27T22:38:31.702Z" }, - { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724, upload-time = "2024-11-27T22:38:32.837Z" }, - { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383, upload-time = "2024-11-27T22:38:34.455Z" }, - { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257, upload-time = "2024-11-27T22:38:35.385Z" }, -] - -[[package]] -name = "typing-extensions" -version = "4.15.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, -] - -[[package]] -name = "uuid" -version = "1.30" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ce/63/f42f5aa951ebf2c8dac81f77a8edcc1c218640a2a35a03b9ff2d4aa64c3d/uuid-1.30.tar.gz", hash = "sha256:1f87cc004ac5120466f36c5beae48b4c48cc411968eed0eaecd3da82aa96193f", size = 5811, upload-time = "2007-05-26T11:13:24Z" } +version = 1 +revision = 3 +requires-python = ">=3.10" + +[[package]] +name = "agntcy-dir" +version = "0.6.0" +source = { editable = "." } +dependencies = [ + { name = "grpcio" }, + { name = "spiffe" }, + { name = "spiffe-tls" }, +] + +[package.dev-dependencies] +dev = [ + { name = "pytest" }, + { name = "uuid" }, +] + +[package.metadata] +requires-dist = [ + { name = "grpcio", specifier = ">=1.74.0" }, + { name = "spiffe", specifier = ">=0.2.2" }, + { name = "spiffe-tls", specifier = ">=0.2.1" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "pytest", specifier = ">=8.4.1" }, + { name = "uuid", specifier = ">=1.30" }, +] + +[[package]] +name = "cffi" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser", marker = "implementation_name != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/93/d7/516d984057745a6cd96575eea814fe1edd6646ee6efd552fb7b0921dec83/cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44", size = 184283, upload-time = "2025-09-08T23:22:08.01Z" }, + { url = "https://files.pythonhosted.org/packages/9e/84/ad6a0b408daa859246f57c03efd28e5dd1b33c21737c2db84cae8c237aa5/cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49", size = 180504, upload-time = "2025-09-08T23:22:10.637Z" }, + { url = "https://files.pythonhosted.org/packages/50/bd/b1a6362b80628111e6653c961f987faa55262b4002fcec42308cad1db680/cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c", size = 208811, upload-time = "2025-09-08T23:22:12.267Z" }, + { url = "https://files.pythonhosted.org/packages/4f/27/6933a8b2562d7bd1fb595074cf99cc81fc3789f6a6c05cdabb46284a3188/cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb", size = 216402, upload-time = "2025-09-08T23:22:13.455Z" }, + { url = "https://files.pythonhosted.org/packages/05/eb/b86f2a2645b62adcfff53b0dd97e8dfafb5c8aa864bd0d9a2c2049a0d551/cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0", size = 203217, upload-time = "2025-09-08T23:22:14.596Z" }, + { url = "https://files.pythonhosted.org/packages/9f/e0/6cbe77a53acf5acc7c08cc186c9928864bd7c005f9efd0d126884858a5fe/cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4", size = 203079, upload-time = "2025-09-08T23:22:15.769Z" }, + { url = "https://files.pythonhosted.org/packages/98/29/9b366e70e243eb3d14a5cb488dfd3a0b6b2f1fb001a203f653b93ccfac88/cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453", size = 216475, upload-time = "2025-09-08T23:22:17.427Z" }, + { url = "https://files.pythonhosted.org/packages/21/7a/13b24e70d2f90a322f2900c5d8e1f14fa7e2a6b3332b7309ba7b2ba51a5a/cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495", size = 218829, upload-time = "2025-09-08T23:22:19.069Z" }, + { url = "https://files.pythonhosted.org/packages/60/99/c9dc110974c59cc981b1f5b66e1d8af8af764e00f0293266824d9c4254bc/cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5", size = 211211, upload-time = "2025-09-08T23:22:20.588Z" }, + { url = "https://files.pythonhosted.org/packages/49/72/ff2d12dbf21aca1b32a40ed792ee6b40f6dc3a9cf1644bd7ef6e95e0ac5e/cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb", size = 218036, upload-time = "2025-09-08T23:22:22.143Z" }, + { url = "https://files.pythonhosted.org/packages/e2/cc/027d7fb82e58c48ea717149b03bcadcbdc293553edb283af792bd4bcbb3f/cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a", size = 172184, upload-time = "2025-09-08T23:22:23.328Z" }, + { url = "https://files.pythonhosted.org/packages/33/fa/072dd15ae27fbb4e06b437eb6e944e75b068deb09e2a2826039e49ee2045/cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739", size = 182790, upload-time = "2025-09-08T23:22:24.752Z" }, + { url = "https://files.pythonhosted.org/packages/12/4a/3dfd5f7850cbf0d06dc84ba9aa00db766b52ca38d8b86e3a38314d52498c/cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe", size = 184344, upload-time = "2025-09-08T23:22:26.456Z" }, + { url = "https://files.pythonhosted.org/packages/4f/8b/f0e4c441227ba756aafbe78f117485b25bb26b1c059d01f137fa6d14896b/cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c", size = 180560, upload-time = "2025-09-08T23:22:28.197Z" }, + { url = "https://files.pythonhosted.org/packages/b1/b7/1200d354378ef52ec227395d95c2576330fd22a869f7a70e88e1447eb234/cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92", size = 209613, upload-time = "2025-09-08T23:22:29.475Z" }, + { url = "https://files.pythonhosted.org/packages/b8/56/6033f5e86e8cc9bb629f0077ba71679508bdf54a9a5e112a3c0b91870332/cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93", size = 216476, upload-time = "2025-09-08T23:22:31.063Z" }, + { url = "https://files.pythonhosted.org/packages/dc/7f/55fecd70f7ece178db2f26128ec41430d8720f2d12ca97bf8f0a628207d5/cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5", size = 203374, upload-time = "2025-09-08T23:22:32.507Z" }, + { url = "https://files.pythonhosted.org/packages/84/ef/a7b77c8bdc0f77adc3b46888f1ad54be8f3b7821697a7b89126e829e676a/cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664", size = 202597, upload-time = "2025-09-08T23:22:34.132Z" }, + { url = "https://files.pythonhosted.org/packages/d7/91/500d892b2bf36529a75b77958edfcd5ad8e2ce4064ce2ecfeab2125d72d1/cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26", size = 215574, upload-time = "2025-09-08T23:22:35.443Z" }, + { url = "https://files.pythonhosted.org/packages/44/64/58f6255b62b101093d5df22dcb752596066c7e89dd725e0afaed242a61be/cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9", size = 218971, upload-time = "2025-09-08T23:22:36.805Z" }, + { url = "https://files.pythonhosted.org/packages/ab/49/fa72cebe2fd8a55fbe14956f9970fe8eb1ac59e5df042f603ef7c8ba0adc/cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414", size = 211972, upload-time = "2025-09-08T23:22:38.436Z" }, + { url = "https://files.pythonhosted.org/packages/0b/28/dd0967a76aab36731b6ebfe64dec4e981aff7e0608f60c2d46b46982607d/cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743", size = 217078, upload-time = "2025-09-08T23:22:39.776Z" }, + { url = "https://files.pythonhosted.org/packages/2b/c0/015b25184413d7ab0a410775fdb4a50fca20f5589b5dab1dbbfa3baad8ce/cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5", size = 172076, upload-time = "2025-09-08T23:22:40.95Z" }, + { url = "https://files.pythonhosted.org/packages/ae/8f/dc5531155e7070361eb1b7e4c1a9d896d0cb21c49f807a6c03fd63fc877e/cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5", size = 182820, upload-time = "2025-09-08T23:22:42.463Z" }, + { url = "https://files.pythonhosted.org/packages/95/5c/1b493356429f9aecfd56bc171285a4c4ac8697f76e9bbbbb105e537853a1/cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d", size = 177635, upload-time = "2025-09-08T23:22:43.623Z" }, + { url = "https://files.pythonhosted.org/packages/ea/47/4f61023ea636104d4f16ab488e268b93008c3d0bb76893b1b31db1f96802/cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d", size = 185271, upload-time = "2025-09-08T23:22:44.795Z" }, + { url = "https://files.pythonhosted.org/packages/df/a2/781b623f57358e360d62cdd7a8c681f074a71d445418a776eef0aadb4ab4/cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c", size = 181048, upload-time = "2025-09-08T23:22:45.938Z" }, + { url = "https://files.pythonhosted.org/packages/ff/df/a4f0fbd47331ceeba3d37c2e51e9dfc9722498becbeec2bd8bc856c9538a/cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe", size = 212529, upload-time = "2025-09-08T23:22:47.349Z" }, + { url = "https://files.pythonhosted.org/packages/d5/72/12b5f8d3865bf0f87cf1404d8c374e7487dcf097a1c91c436e72e6badd83/cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062", size = 220097, upload-time = "2025-09-08T23:22:48.677Z" }, + { url = "https://files.pythonhosted.org/packages/c2/95/7a135d52a50dfa7c882ab0ac17e8dc11cec9d55d2c18dda414c051c5e69e/cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e", size = 207983, upload-time = "2025-09-08T23:22:50.06Z" }, + { url = "https://files.pythonhosted.org/packages/3a/c8/15cb9ada8895957ea171c62dc78ff3e99159ee7adb13c0123c001a2546c1/cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037", size = 206519, upload-time = "2025-09-08T23:22:51.364Z" }, + { url = "https://files.pythonhosted.org/packages/78/2d/7fa73dfa841b5ac06c7b8855cfc18622132e365f5b81d02230333ff26e9e/cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba", size = 219572, upload-time = "2025-09-08T23:22:52.902Z" }, + { url = "https://files.pythonhosted.org/packages/07/e0/267e57e387b4ca276b90f0434ff88b2c2241ad72b16d31836adddfd6031b/cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94", size = 222963, upload-time = "2025-09-08T23:22:54.518Z" }, + { url = "https://files.pythonhosted.org/packages/b6/75/1f2747525e06f53efbd878f4d03bac5b859cbc11c633d0fb81432d98a795/cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187", size = 221361, upload-time = "2025-09-08T23:22:55.867Z" }, + { url = "https://files.pythonhosted.org/packages/7b/2b/2b6435f76bfeb6bbf055596976da087377ede68df465419d192acf00c437/cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18", size = 172932, upload-time = "2025-09-08T23:22:57.188Z" }, + { url = "https://files.pythonhosted.org/packages/f8/ed/13bd4418627013bec4ed6e54283b1959cf6db888048c7cf4b4c3b5b36002/cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5", size = 183557, upload-time = "2025-09-08T23:22:58.351Z" }, + { url = "https://files.pythonhosted.org/packages/95/31/9f7f93ad2f8eff1dbc1c3656d7ca5bfd8fb52c9d786b4dcf19b2d02217fa/cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6", size = 177762, upload-time = "2025-09-08T23:22:59.668Z" }, + { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230, upload-time = "2025-09-08T23:23:00.879Z" }, + { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043, upload-time = "2025-09-08T23:23:02.231Z" }, + { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446, upload-time = "2025-09-08T23:23:03.472Z" }, + { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101, upload-time = "2025-09-08T23:23:04.792Z" }, + { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948, upload-time = "2025-09-08T23:23:06.127Z" }, + { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422, upload-time = "2025-09-08T23:23:07.753Z" }, + { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499, upload-time = "2025-09-08T23:23:09.648Z" }, + { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928, upload-time = "2025-09-08T23:23:10.928Z" }, + { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302, upload-time = "2025-09-08T23:23:12.42Z" }, + { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" }, + { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" }, + { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" }, + { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320, upload-time = "2025-09-08T23:23:18.087Z" }, + { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487, upload-time = "2025-09-08T23:23:19.622Z" }, + { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049, upload-time = "2025-09-08T23:23:20.853Z" }, + { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793, upload-time = "2025-09-08T23:23:22.08Z" }, + { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300, upload-time = "2025-09-08T23:23:23.314Z" }, + { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244, upload-time = "2025-09-08T23:23:24.541Z" }, + { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828, upload-time = "2025-09-08T23:23:26.143Z" }, + { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926, upload-time = "2025-09-08T23:23:27.873Z" }, + { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" }, + { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" }, + { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" }, + { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773, upload-time = "2025-09-08T23:23:29.347Z" }, + { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013, upload-time = "2025-09-08T23:23:30.63Z" }, + { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593, upload-time = "2025-09-08T23:23:31.91Z" }, + { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354, upload-time = "2025-09-08T23:23:33.214Z" }, + { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480, upload-time = "2025-09-08T23:23:34.495Z" }, + { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584, upload-time = "2025-09-08T23:23:36.096Z" }, + { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443, upload-time = "2025-09-08T23:23:37.328Z" }, + { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437, upload-time = "2025-09-08T23:23:38.945Z" }, + { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" }, + { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" }, + { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "cryptography" +version = "45.0.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/35/c495bffc2056f2dadb32434f1feedd79abde2a7f8363e1974afa9c33c7e2/cryptography-45.0.7.tar.gz", hash = "sha256:4b1654dfc64ea479c242508eb8c724044f1e964a47d1d1cacc5132292d851971", size = 744980, upload-time = "2025-09-01T11:15:03.146Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/91/925c0ac74362172ae4516000fe877912e33b5983df735ff290c653de4913/cryptography-45.0.7-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:3be4f21c6245930688bd9e162829480de027f8bf962ede33d4f8ba7d67a00cee", size = 7041105, upload-time = "2025-09-01T11:13:59.684Z" }, + { url = "https://files.pythonhosted.org/packages/fc/63/43641c5acce3a6105cf8bd5baeceeb1846bb63067d26dae3e5db59f1513a/cryptography-45.0.7-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:67285f8a611b0ebc0857ced2081e30302909f571a46bfa7a3cc0ad303fe015c6", size = 4205799, upload-time = "2025-09-01T11:14:02.517Z" }, + { url = "https://files.pythonhosted.org/packages/bc/29/c238dd9107f10bfde09a4d1c52fd38828b1aa353ced11f358b5dd2507d24/cryptography-45.0.7-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:577470e39e60a6cd7780793202e63536026d9b8641de011ed9d8174da9ca5339", size = 4430504, upload-time = "2025-09-01T11:14:04.522Z" }, + { url = "https://files.pythonhosted.org/packages/62/62/24203e7cbcc9bd7c94739428cd30680b18ae6b18377ae66075c8e4771b1b/cryptography-45.0.7-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:4bd3e5c4b9682bc112d634f2c6ccc6736ed3635fc3319ac2bb11d768cc5a00d8", size = 4209542, upload-time = "2025-09-01T11:14:06.309Z" }, + { url = "https://files.pythonhosted.org/packages/cd/e3/e7de4771a08620eef2389b86cd87a2c50326827dea5528feb70595439ce4/cryptography-45.0.7-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:465ccac9d70115cd4de7186e60cfe989de73f7bb23e8a7aa45af18f7412e75bf", size = 3889244, upload-time = "2025-09-01T11:14:08.152Z" }, + { url = "https://files.pythonhosted.org/packages/96/b8/bca71059e79a0bb2f8e4ec61d9c205fbe97876318566cde3b5092529faa9/cryptography-45.0.7-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:16ede8a4f7929b4b7ff3642eba2bf79aa1d71f24ab6ee443935c0d269b6bc513", size = 4461975, upload-time = "2025-09-01T11:14:09.755Z" }, + { url = "https://files.pythonhosted.org/packages/58/67/3f5b26937fe1218c40e95ef4ff8d23c8dc05aa950d54200cc7ea5fb58d28/cryptography-45.0.7-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:8978132287a9d3ad6b54fcd1e08548033cc09dc6aacacb6c004c73c3eb5d3ac3", size = 4209082, upload-time = "2025-09-01T11:14:11.229Z" }, + { url = "https://files.pythonhosted.org/packages/0e/e4/b3e68a4ac363406a56cf7b741eeb80d05284d8c60ee1a55cdc7587e2a553/cryptography-45.0.7-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:b6a0e535baec27b528cb07a119f321ac024592388c5681a5ced167ae98e9fff3", size = 4460397, upload-time = "2025-09-01T11:14:12.924Z" }, + { url = "https://files.pythonhosted.org/packages/22/49/2c93f3cd4e3efc8cb22b02678c1fad691cff9dd71bb889e030d100acbfe0/cryptography-45.0.7-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:a24ee598d10befaec178efdff6054bc4d7e883f615bfbcd08126a0f4931c83a6", size = 4337244, upload-time = "2025-09-01T11:14:14.431Z" }, + { url = "https://files.pythonhosted.org/packages/04/19/030f400de0bccccc09aa262706d90f2ec23d56bc4eb4f4e8268d0ddf3fb8/cryptography-45.0.7-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:fa26fa54c0a9384c27fcdc905a2fb7d60ac6e47d14bc2692145f2b3b1e2cfdbd", size = 4568862, upload-time = "2025-09-01T11:14:16.185Z" }, + { url = "https://files.pythonhosted.org/packages/29/56/3034a3a353efa65116fa20eb3c990a8c9f0d3db4085429040a7eef9ada5f/cryptography-45.0.7-cp311-abi3-win32.whl", hash = "sha256:bef32a5e327bd8e5af915d3416ffefdbe65ed975b646b3805be81b23580b57b8", size = 2936578, upload-time = "2025-09-01T11:14:17.638Z" }, + { url = "https://files.pythonhosted.org/packages/b3/61/0ab90f421c6194705a99d0fa9f6ee2045d916e4455fdbb095a9c2c9a520f/cryptography-45.0.7-cp311-abi3-win_amd64.whl", hash = "sha256:3808e6b2e5f0b46d981c24d79648e5c25c35e59902ea4391a0dcb3e667bf7443", size = 3405400, upload-time = "2025-09-01T11:14:18.958Z" }, + { url = "https://files.pythonhosted.org/packages/63/e8/c436233ddf19c5f15b25ace33979a9dd2e7aa1a59209a0ee8554179f1cc0/cryptography-45.0.7-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bfb4c801f65dd61cedfc61a83732327fafbac55a47282e6f26f073ca7a41c3b2", size = 7021824, upload-time = "2025-09-01T11:14:20.954Z" }, + { url = "https://files.pythonhosted.org/packages/bc/4c/8f57f2500d0ccd2675c5d0cc462095adf3faa8c52294ba085c036befb901/cryptography-45.0.7-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:81823935e2f8d476707e85a78a405953a03ef7b7b4f55f93f7c2d9680e5e0691", size = 4202233, upload-time = "2025-09-01T11:14:22.454Z" }, + { url = "https://files.pythonhosted.org/packages/eb/ac/59b7790b4ccaed739fc44775ce4645c9b8ce54cbec53edf16c74fd80cb2b/cryptography-45.0.7-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3994c809c17fc570c2af12c9b840d7cea85a9fd3e5c0e0491f4fa3c029216d59", size = 4423075, upload-time = "2025-09-01T11:14:24.287Z" }, + { url = "https://files.pythonhosted.org/packages/b8/56/d4f07ea21434bf891faa088a6ac15d6d98093a66e75e30ad08e88aa2b9ba/cryptography-45.0.7-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dad43797959a74103cb59c5dac71409f9c27d34c8a05921341fb64ea8ccb1dd4", size = 4204517, upload-time = "2025-09-01T11:14:25.679Z" }, + { url = "https://files.pythonhosted.org/packages/e8/ac/924a723299848b4c741c1059752c7cfe09473b6fd77d2920398fc26bfb53/cryptography-45.0.7-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:ce7a453385e4c4693985b4a4a3533e041558851eae061a58a5405363b098fcd3", size = 3882893, upload-time = "2025-09-01T11:14:27.1Z" }, + { url = "https://files.pythonhosted.org/packages/83/dc/4dab2ff0a871cc2d81d3ae6d780991c0192b259c35e4d83fe1de18b20c70/cryptography-45.0.7-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b04f85ac3a90c227b6e5890acb0edbaf3140938dbecf07bff618bf3638578cf1", size = 4450132, upload-time = "2025-09-01T11:14:28.58Z" }, + { url = "https://files.pythonhosted.org/packages/12/dd/b2882b65db8fc944585d7fb00d67cf84a9cef4e77d9ba8f69082e911d0de/cryptography-45.0.7-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:48c41a44ef8b8c2e80ca4527ee81daa4c527df3ecbc9423c41a420a9559d0e27", size = 4204086, upload-time = "2025-09-01T11:14:30.572Z" }, + { url = "https://files.pythonhosted.org/packages/5d/fa/1d5745d878048699b8eb87c984d4ccc5da4f5008dfd3ad7a94040caca23a/cryptography-45.0.7-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:f3df7b3d0f91b88b2106031fd995802a2e9ae13e02c36c1fc075b43f420f3a17", size = 4449383, upload-time = "2025-09-01T11:14:32.046Z" }, + { url = "https://files.pythonhosted.org/packages/36/8b/fc61f87931bc030598e1876c45b936867bb72777eac693e905ab89832670/cryptography-45.0.7-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd342f085542f6eb894ca00ef70236ea46070c8a13824c6bde0dfdcd36065b9b", size = 4332186, upload-time = "2025-09-01T11:14:33.95Z" }, + { url = "https://files.pythonhosted.org/packages/0b/11/09700ddad7443ccb11d674efdbe9a832b4455dc1f16566d9bd3834922ce5/cryptography-45.0.7-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1993a1bb7e4eccfb922b6cd414f072e08ff5816702a0bdb8941c247a6b1b287c", size = 4561639, upload-time = "2025-09-01T11:14:35.343Z" }, + { url = "https://files.pythonhosted.org/packages/71/ed/8f4c1337e9d3b94d8e50ae0b08ad0304a5709d483bfcadfcc77a23dbcb52/cryptography-45.0.7-cp37-abi3-win32.whl", hash = "sha256:18fcf70f243fe07252dcb1b268a687f2358025ce32f9f88028ca5c364b123ef5", size = 2926552, upload-time = "2025-09-01T11:14:36.929Z" }, + { url = "https://files.pythonhosted.org/packages/bc/ff/026513ecad58dacd45d1d24ebe52b852165a26e287177de1d545325c0c25/cryptography-45.0.7-cp37-abi3-win_amd64.whl", hash = "sha256:7285a89df4900ed3bfaad5679b1e668cb4b38a8de1ccbfc84b05f34512da0a90", size = 3392742, upload-time = "2025-09-01T11:14:38.368Z" }, + { url = "https://files.pythonhosted.org/packages/13/3e/e42f1528ca1ea82256b835191eab1be014e0f9f934b60d98b0be8a38ed70/cryptography-45.0.7-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:de58755d723e86175756f463f2f0bddd45cc36fbd62601228a3f8761c9f58252", size = 3572442, upload-time = "2025-09-01T11:14:39.836Z" }, + { url = "https://files.pythonhosted.org/packages/59/aa/e947693ab08674a2663ed2534cd8d345cf17bf6a1facf99273e8ec8986dc/cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a20e442e917889d1a6b3c570c9e3fa2fdc398c20868abcea268ea33c024c4083", size = 4142233, upload-time = "2025-09-01T11:14:41.305Z" }, + { url = "https://files.pythonhosted.org/packages/24/06/09b6f6a2fc43474a32b8fe259038eef1500ee3d3c141599b57ac6c57612c/cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:258e0dff86d1d891169b5af222d362468a9570e2532923088658aa866eb11130", size = 4376202, upload-time = "2025-09-01T11:14:43.047Z" }, + { url = "https://files.pythonhosted.org/packages/00/f2/c166af87e95ce6ae6d38471a7e039d3a0549c2d55d74e059680162052824/cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:d97cf502abe2ab9eff8bd5e4aca274da8d06dd3ef08b759a8d6143f4ad65d4b4", size = 4141900, upload-time = "2025-09-01T11:14:45.089Z" }, + { url = "https://files.pythonhosted.org/packages/16/b9/e96e0b6cb86eae27ea51fa8a3151535a18e66fe7c451fa90f7f89c85f541/cryptography-45.0.7-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:c987dad82e8c65ebc985f5dae5e74a3beda9d0a2a4daf8a1115f3772b59e5141", size = 4375562, upload-time = "2025-09-01T11:14:47.166Z" }, + { url = "https://files.pythonhosted.org/packages/36/d0/36e8ee39274e9d77baf7d0dafda680cba6e52f3936b846f0d56d64fec915/cryptography-45.0.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c13b1e3afd29a5b3b2656257f14669ca8fa8d7956d509926f0b130b600b50ab7", size = 3322781, upload-time = "2025-09-01T11:14:48.747Z" }, + { url = "https://files.pythonhosted.org/packages/99/4e/49199a4c82946938a3e05d2e8ad9482484ba48bbc1e809e3d506c686d051/cryptography-45.0.7-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a862753b36620af6fc54209264f92c716367f2f0ff4624952276a6bbd18cbde", size = 3584634, upload-time = "2025-09-01T11:14:50.593Z" }, + { url = "https://files.pythonhosted.org/packages/16/ce/5f6ff59ea9c7779dba51b84871c19962529bdcc12e1a6ea172664916c550/cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:06ce84dc14df0bf6ea84666f958e6080cdb6fe1231be2a51f3fc1267d9f3fb34", size = 4149533, upload-time = "2025-09-01T11:14:52.091Z" }, + { url = "https://files.pythonhosted.org/packages/ce/13/b3cfbd257ac96da4b88b46372e662009b7a16833bfc5da33bb97dd5631ae/cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d0c5c6bac22b177bf8da7435d9d27a6834ee130309749d162b26c3105c0795a9", size = 4385557, upload-time = "2025-09-01T11:14:53.551Z" }, + { url = "https://files.pythonhosted.org/packages/1c/c5/8c59d6b7c7b439ba4fc8d0cab868027fd095f215031bc123c3a070962912/cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:2f641b64acc00811da98df63df7d59fd4706c0df449da71cb7ac39a0732b40ae", size = 4149023, upload-time = "2025-09-01T11:14:55.022Z" }, + { url = "https://files.pythonhosted.org/packages/55/32/05385c86d6ca9ab0b4d5bb442d2e3d85e727939a11f3e163fc776ce5eb40/cryptography-45.0.7-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:f5414a788ecc6ee6bc58560e85ca624258a55ca434884445440a810796ea0e0b", size = 4385722, upload-time = "2025-09-01T11:14:57.319Z" }, + { url = "https://files.pythonhosted.org/packages/23/87/7ce86f3fa14bc11a5a48c30d8103c26e09b6465f8d8e9d74cf7a0714f043/cryptography-45.0.7-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:1f3d56f73595376f4244646dd5c5870c14c196949807be39e79e7bd9bac3da63", size = 3332908, upload-time = "2025-09-01T11:14:58.78Z" }, +] + +[[package]] +name = "exceptiongroup" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0b/9f/a65090624ecf468cdca03533906e7c69ed7588582240cfe7cc9e770b50eb/exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88", size = 29749, upload-time = "2025-05-10T17:42:51.123Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/36/f4/c6e662dade71f56cd2f3735141b265c3c79293c109549c1e6933b0651ffc/exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10", size = 16674, upload-time = "2025-05-10T17:42:49.33Z" }, +] + +[[package]] +name = "grpcio" +version = "1.74.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/38/b4/35feb8f7cab7239c5b94bd2db71abb3d6adb5f335ad8f131abb6060840b6/grpcio-1.74.0.tar.gz", hash = "sha256:80d1f4fbb35b0742d3e3d3bb654b7381cd5f015f8497279a1e9c21ba623e01b1", size = 12756048, upload-time = "2025-07-24T18:54:23.039Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/66/54/68e51a90797ad7afc5b0a7881426c337f6a9168ebab73c3210b76aa7c90d/grpcio-1.74.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:85bd5cdf4ed7b2d6438871adf6afff9af7096486fcf51818a81b77ef4dd30907", size = 5481935, upload-time = "2025-07-24T18:52:43.756Z" }, + { url = "https://files.pythonhosted.org/packages/32/2a/af817c7e9843929e93e54d09c9aee2555c2e8d81b93102a9426b36e91833/grpcio-1.74.0-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:68c8ebcca945efff9d86d8d6d7bfb0841cf0071024417e2d7f45c5e46b5b08eb", size = 10986796, upload-time = "2025-07-24T18:52:47.219Z" }, + { url = "https://files.pythonhosted.org/packages/d5/94/d67756638d7bb07750b07d0826c68e414124574b53840ba1ff777abcd388/grpcio-1.74.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:e154d230dc1bbbd78ad2fdc3039fa50ad7ffcf438e4eb2fa30bce223a70c7486", size = 5983663, upload-time = "2025-07-24T18:52:49.463Z" }, + { url = "https://files.pythonhosted.org/packages/35/f5/c5e4853bf42148fea8532d49e919426585b73eafcf379a712934652a8de9/grpcio-1.74.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8978003816c7b9eabe217f88c78bc26adc8f9304bf6a594b02e5a49b2ef9c11", size = 6653765, upload-time = "2025-07-24T18:52:51.094Z" }, + { url = "https://files.pythonhosted.org/packages/fd/75/a1991dd64b331d199935e096cc9daa3415ee5ccbe9f909aa48eded7bba34/grpcio-1.74.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3d7bd6e3929fd2ea7fbc3f562e4987229ead70c9ae5f01501a46701e08f1ad9", size = 6215172, upload-time = "2025-07-24T18:52:53.282Z" }, + { url = "https://files.pythonhosted.org/packages/01/a4/7cef3dbb3b073d0ce34fd507efc44ac4c9442a0ef9fba4fb3f5c551efef5/grpcio-1.74.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:136b53c91ac1d02c8c24201bfdeb56f8b3ac3278668cbb8e0ba49c88069e1bdc", size = 6329142, upload-time = "2025-07-24T18:52:54.927Z" }, + { url = "https://files.pythonhosted.org/packages/bf/d3/587920f882b46e835ad96014087054655312400e2f1f1446419e5179a383/grpcio-1.74.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fe0f540750a13fd8e5da4b3eaba91a785eea8dca5ccd2bc2ffe978caa403090e", size = 7018632, upload-time = "2025-07-24T18:52:56.523Z" }, + { url = "https://files.pythonhosted.org/packages/1f/95/c70a3b15a0bc83334b507e3d2ae20ee8fa38d419b8758a4d838f5c2a7d32/grpcio-1.74.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4e4181bfc24413d1e3a37a0b7889bea68d973d4b45dd2bc68bb766c140718f82", size = 6509641, upload-time = "2025-07-24T18:52:58.495Z" }, + { url = "https://files.pythonhosted.org/packages/4b/06/2e7042d06247d668ae69ea6998eca33f475fd4e2855f94dcb2aa5daef334/grpcio-1.74.0-cp310-cp310-win32.whl", hash = "sha256:1733969040989f7acc3d94c22f55b4a9501a30f6aaacdbccfaba0a3ffb255ab7", size = 3817478, upload-time = "2025-07-24T18:53:00.128Z" }, + { url = "https://files.pythonhosted.org/packages/93/20/e02b9dcca3ee91124060b65bbf5b8e1af80b3b76a30f694b44b964ab4d71/grpcio-1.74.0-cp310-cp310-win_amd64.whl", hash = "sha256:9e912d3c993a29df6c627459af58975b2e5c897d93287939b9d5065f000249b5", size = 4493971, upload-time = "2025-07-24T18:53:02.068Z" }, + { url = "https://files.pythonhosted.org/packages/e7/77/b2f06db9f240a5abeddd23a0e49eae2b6ac54d85f0e5267784ce02269c3b/grpcio-1.74.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:69e1a8180868a2576f02356565f16635b99088da7df3d45aaa7e24e73a054e31", size = 5487368, upload-time = "2025-07-24T18:53:03.548Z" }, + { url = "https://files.pythonhosted.org/packages/48/99/0ac8678a819c28d9a370a663007581744a9f2a844e32f0fa95e1ddda5b9e/grpcio-1.74.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:8efe72fde5500f47aca1ef59495cb59c885afe04ac89dd11d810f2de87d935d4", size = 10999804, upload-time = "2025-07-24T18:53:05.095Z" }, + { url = "https://files.pythonhosted.org/packages/45/c6/a2d586300d9e14ad72e8dc211c7aecb45fe9846a51e558c5bca0c9102c7f/grpcio-1.74.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:a8f0302f9ac4e9923f98d8e243939a6fb627cd048f5cd38595c97e38020dffce", size = 5987667, upload-time = "2025-07-24T18:53:07.157Z" }, + { url = "https://files.pythonhosted.org/packages/c9/57/5f338bf56a7f22584e68d669632e521f0de460bb3749d54533fc3d0fca4f/grpcio-1.74.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f609a39f62a6f6f05c7512746798282546358a37ea93c1fcbadf8b2fed162e3", size = 6655612, upload-time = "2025-07-24T18:53:09.244Z" }, + { url = "https://files.pythonhosted.org/packages/82/ea/a4820c4c44c8b35b1903a6c72a5bdccec92d0840cf5c858c498c66786ba5/grpcio-1.74.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c98e0b7434a7fa4e3e63f250456eaef52499fba5ae661c58cc5b5477d11e7182", size = 6219544, upload-time = "2025-07-24T18:53:11.221Z" }, + { url = "https://files.pythonhosted.org/packages/a4/17/0537630a921365928f5abb6d14c79ba4dcb3e662e0dbeede8af4138d9dcf/grpcio-1.74.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:662456c4513e298db6d7bd9c3b8df6f75f8752f0ba01fb653e252ed4a59b5a5d", size = 6334863, upload-time = "2025-07-24T18:53:12.925Z" }, + { url = "https://files.pythonhosted.org/packages/e2/a6/85ca6cb9af3f13e1320d0a806658dca432ff88149d5972df1f7b51e87127/grpcio-1.74.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3d14e3c4d65e19d8430a4e28ceb71ace4728776fd6c3ce34016947474479683f", size = 7019320, upload-time = "2025-07-24T18:53:15.002Z" }, + { url = "https://files.pythonhosted.org/packages/4f/a7/fe2beab970a1e25d2eff108b3cf4f7d9a53c185106377a3d1989216eba45/grpcio-1.74.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1bf949792cee20d2078323a9b02bacbbae002b9e3b9e2433f2741c15bdeba1c4", size = 6514228, upload-time = "2025-07-24T18:53:16.999Z" }, + { url = "https://files.pythonhosted.org/packages/6a/c2/2f9c945c8a248cebc3ccda1b7a1bf1775b9d7d59e444dbb18c0014e23da6/grpcio-1.74.0-cp311-cp311-win32.whl", hash = "sha256:55b453812fa7c7ce2f5c88be3018fb4a490519b6ce80788d5913f3f9d7da8c7b", size = 3817216, upload-time = "2025-07-24T18:53:20.564Z" }, + { url = "https://files.pythonhosted.org/packages/ff/d1/a9cf9c94b55becda2199299a12b9feef0c79946b0d9d34c989de6d12d05d/grpcio-1.74.0-cp311-cp311-win_amd64.whl", hash = "sha256:86ad489db097141a907c559988c29718719aa3e13370d40e20506f11b4de0d11", size = 4495380, upload-time = "2025-07-24T18:53:22.058Z" }, + { url = "https://files.pythonhosted.org/packages/4c/5d/e504d5d5c4469823504f65687d6c8fb97b7f7bf0b34873b7598f1df24630/grpcio-1.74.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:8533e6e9c5bd630ca98062e3a1326249e6ada07d05acf191a77bc33f8948f3d8", size = 5445551, upload-time = "2025-07-24T18:53:23.641Z" }, + { url = "https://files.pythonhosted.org/packages/43/01/730e37056f96f2f6ce9f17999af1556df62ee8dab7fa48bceeaab5fd3008/grpcio-1.74.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:2918948864fec2a11721d91568effffbe0a02b23ecd57f281391d986847982f6", size = 10979810, upload-time = "2025-07-24T18:53:25.349Z" }, + { url = "https://files.pythonhosted.org/packages/79/3d/09fd100473ea5c47083889ca47ffd356576173ec134312f6aa0e13111dee/grpcio-1.74.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:60d2d48b0580e70d2e1954d0d19fa3c2e60dd7cbed826aca104fff518310d1c5", size = 5941946, upload-time = "2025-07-24T18:53:27.387Z" }, + { url = "https://files.pythonhosted.org/packages/8a/99/12d2cca0a63c874c6d3d195629dcd85cdf5d6f98a30d8db44271f8a97b93/grpcio-1.74.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3601274bc0523f6dc07666c0e01682c94472402ac2fd1226fd96e079863bfa49", size = 6621763, upload-time = "2025-07-24T18:53:29.193Z" }, + { url = "https://files.pythonhosted.org/packages/9d/2c/930b0e7a2f1029bbc193443c7bc4dc2a46fedb0203c8793dcd97081f1520/grpcio-1.74.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:176d60a5168d7948539def20b2a3adcce67d72454d9ae05969a2e73f3a0feee7", size = 6180664, upload-time = "2025-07-24T18:53:30.823Z" }, + { url = "https://files.pythonhosted.org/packages/db/d5/ff8a2442180ad0867717e670f5ec42bfd8d38b92158ad6bcd864e6d4b1ed/grpcio-1.74.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e759f9e8bc908aaae0412642afe5416c9f983a80499448fcc7fab8692ae044c3", size = 6301083, upload-time = "2025-07-24T18:53:32.454Z" }, + { url = "https://files.pythonhosted.org/packages/b0/ba/b361d390451a37ca118e4ec7dccec690422e05bc85fba2ec72b06cefec9f/grpcio-1.74.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:9e7c4389771855a92934b2846bd807fc25a3dfa820fd912fe6bd8136026b2707", size = 6994132, upload-time = "2025-07-24T18:53:34.506Z" }, + { url = "https://files.pythonhosted.org/packages/3b/0c/3a5fa47d2437a44ced74141795ac0251bbddeae74bf81df3447edd767d27/grpcio-1.74.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:cce634b10aeab37010449124814b05a62fb5f18928ca878f1bf4750d1f0c815b", size = 6489616, upload-time = "2025-07-24T18:53:36.217Z" }, + { url = "https://files.pythonhosted.org/packages/ae/95/ab64703b436d99dc5217228babc76047d60e9ad14df129e307b5fec81fd0/grpcio-1.74.0-cp312-cp312-win32.whl", hash = "sha256:885912559974df35d92219e2dc98f51a16a48395f37b92865ad45186f294096c", size = 3807083, upload-time = "2025-07-24T18:53:37.911Z" }, + { url = "https://files.pythonhosted.org/packages/84/59/900aa2445891fc47a33f7d2f76e00ca5d6ae6584b20d19af9c06fa09bf9a/grpcio-1.74.0-cp312-cp312-win_amd64.whl", hash = "sha256:42f8fee287427b94be63d916c90399ed310ed10aadbf9e2e5538b3e497d269bc", size = 4490123, upload-time = "2025-07-24T18:53:39.528Z" }, + { url = "https://files.pythonhosted.org/packages/d4/d8/1004a5f468715221450e66b051c839c2ce9a985aa3ee427422061fcbb6aa/grpcio-1.74.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:2bc2d7d8d184e2362b53905cb1708c84cb16354771c04b490485fa07ce3a1d89", size = 5449488, upload-time = "2025-07-24T18:53:41.174Z" }, + { url = "https://files.pythonhosted.org/packages/94/0e/33731a03f63740d7743dced423846c831d8e6da808fcd02821a4416df7fa/grpcio-1.74.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:c14e803037e572c177ba54a3e090d6eb12efd795d49327c5ee2b3bddb836bf01", size = 10974059, upload-time = "2025-07-24T18:53:43.066Z" }, + { url = "https://files.pythonhosted.org/packages/0d/c6/3d2c14d87771a421205bdca991467cfe473ee4c6a1231c1ede5248c62ab8/grpcio-1.74.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:f6ec94f0e50eb8fa1744a731088b966427575e40c2944a980049798b127a687e", size = 5945647, upload-time = "2025-07-24T18:53:45.269Z" }, + { url = "https://files.pythonhosted.org/packages/c5/83/5a354c8aaff58594eef7fffebae41a0f8995a6258bbc6809b800c33d4c13/grpcio-1.74.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:566b9395b90cc3d0d0c6404bc8572c7c18786ede549cdb540ae27b58afe0fb91", size = 6626101, upload-time = "2025-07-24T18:53:47.015Z" }, + { url = "https://files.pythonhosted.org/packages/3f/ca/4fdc7bf59bf6994aa45cbd4ef1055cd65e2884de6113dbd49f75498ddb08/grpcio-1.74.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1ea6176d7dfd5b941ea01c2ec34de9531ba494d541fe2057c904e601879f249", size = 6182562, upload-time = "2025-07-24T18:53:48.967Z" }, + { url = "https://files.pythonhosted.org/packages/fd/48/2869e5b2c1922583686f7ae674937986807c2f676d08be70d0a541316270/grpcio-1.74.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:64229c1e9cea079420527fa8ac45d80fc1e8d3f94deaa35643c381fa8d98f362", size = 6303425, upload-time = "2025-07-24T18:53:50.847Z" }, + { url = "https://files.pythonhosted.org/packages/a6/0e/bac93147b9a164f759497bc6913e74af1cb632c733c7af62c0336782bd38/grpcio-1.74.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:0f87bddd6e27fc776aacf7ebfec367b6d49cad0455123951e4488ea99d9b9b8f", size = 6996533, upload-time = "2025-07-24T18:53:52.747Z" }, + { url = "https://files.pythonhosted.org/packages/84/35/9f6b2503c1fd86d068b46818bbd7329db26a87cdd8c01e0d1a9abea1104c/grpcio-1.74.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:3b03d8f2a07f0fea8c8f74deb59f8352b770e3900d143b3d1475effcb08eec20", size = 6491489, upload-time = "2025-07-24T18:53:55.06Z" }, + { url = "https://files.pythonhosted.org/packages/75/33/a04e99be2a82c4cbc4039eb3a76f6c3632932b9d5d295221389d10ac9ca7/grpcio-1.74.0-cp313-cp313-win32.whl", hash = "sha256:b6a73b2ba83e663b2480a90b82fdae6a7aa6427f62bf43b29912c0cfd1aa2bfa", size = 3805811, upload-time = "2025-07-24T18:53:56.798Z" }, + { url = "https://files.pythonhosted.org/packages/34/80/de3eb55eb581815342d097214bed4c59e806b05f1b3110df03b2280d6dfd/grpcio-1.74.0-cp313-cp313-win_amd64.whl", hash = "sha256:fd3c71aeee838299c5887230b8a1822795325ddfea635edd82954c1eaa831e24", size = 4489214, upload-time = "2025-07-24T18:53:59.771Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "pem" +version = "23.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/05/86/16c0b6789816f8d53f2f208b5a090c9197da8a6dae4d490554bb1bedbb09/pem-23.1.0.tar.gz", hash = "sha256:06503ff2441a111f853ce4e8b9eb9d5fedb488ebdbf560115d3dd53a1b4afc73", size = 43796, upload-time = "2023-06-21T10:24:40.539Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/97/8299a481ae6c08494b5d53511e6a4746775d8a354c685c69d8796b2ed482/pem-23.1.0-py3-none-any.whl", hash = "sha256:78bbb1e75b737891350cb9499cbba31da5d59545f360f44163c0bc751cad55d3", size = 9195, upload-time = "2023-06-21T10:24:39.164Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "protobuf" +version = "6.32.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fa/a4/cc17347aa2897568beece2e674674359f911d6fe21b0b8d6268cd42727ac/protobuf-6.32.1.tar.gz", hash = "sha256:ee2469e4a021474ab9baafea6cd070e5bf27c7d29433504ddea1a4ee5850f68d", size = 440635, upload-time = "2025-09-11T21:38:42.935Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/98/645183ea03ab3995d29086b8bf4f7562ebd3d10c9a4b14ee3f20d47cfe50/protobuf-6.32.1-cp310-abi3-win32.whl", hash = "sha256:a8a32a84bc9f2aad712041b8b366190f71dde248926da517bde9e832e4412085", size = 424411, upload-time = "2025-09-11T21:38:27.427Z" }, + { url = "https://files.pythonhosted.org/packages/8c/f3/6f58f841f6ebafe076cebeae33fc336e900619d34b1c93e4b5c97a81fdfa/protobuf-6.32.1-cp310-abi3-win_amd64.whl", hash = "sha256:b00a7d8c25fa471f16bc8153d0e53d6c9e827f0953f3c09aaa4331c718cae5e1", size = 435738, upload-time = "2025-09-11T21:38:30.959Z" }, + { url = "https://files.pythonhosted.org/packages/10/56/a8a3f4e7190837139e68c7002ec749190a163af3e330f65d90309145a210/protobuf-6.32.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d8c7e6eb619ffdf105ee4ab76af5a68b60a9d0f66da3ea12d1640e6d8dab7281", size = 426454, upload-time = "2025-09-11T21:38:34.076Z" }, + { url = "https://files.pythonhosted.org/packages/3f/be/8dd0a927c559b37d7a6c8ab79034fd167dcc1f851595f2e641ad62be8643/protobuf-6.32.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:2f5b80a49e1eb7b86d85fcd23fe92df154b9730a725c3b38c4e43b9d77018bf4", size = 322874, upload-time = "2025-09-11T21:38:35.509Z" }, + { url = "https://files.pythonhosted.org/packages/5c/f6/88d77011b605ef979aace37b7703e4eefad066f7e84d935e5a696515c2dd/protobuf-6.32.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:b1864818300c297265c83a4982fd3169f97122c299f56a56e2445c3698d34710", size = 322013, upload-time = "2025-09-11T21:38:37.017Z" }, + { url = "https://files.pythonhosted.org/packages/97/b7/15cc7d93443d6c6a84626ae3258a91f4c6ac8c0edd5df35ea7658f71b79c/protobuf-6.32.1-py3-none-any.whl", hash = "sha256:2601b779fc7d32a866c6b4404f9d42a3f67c5b9f3f15b4db3cccabe06b95c346", size = 169289, upload-time = "2025-09-11T21:38:41.234Z" }, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" }, +] + +[[package]] +name = "pycparser" +version = "2.23" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pyjwt" +version = "2.10.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" }, +] + +[package.optional-dependencies] +crypto = [ + { name = "cryptography" }, +] + +[[package]] +name = "pyopenssl" +version = "25.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/80/be/97b83a464498a79103036bc74d1038df4a7ef0e402cfaf4d5e113fb14759/pyopenssl-25.3.0.tar.gz", hash = "sha256:c981cb0a3fd84e8602d7afc209522773b94c1c2446a3c710a75b06fe1beae329", size = 184073, upload-time = "2025-09-17T00:32:21.037Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/81/ef2b1dfd1862567d573a4fdbc9f969067621764fbb74338496840a1d2977/pyopenssl-25.3.0-py3-none-any.whl", hash = "sha256:1fda6fc034d5e3d179d39e59c1895c9faeaf40a79de5fc4cbbfbe0d36f4a77b6", size = 57268, upload-time = "2025-09-17T00:32:19.474Z" }, +] + +[[package]] +name = "pytest" +version = "8.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, +] + +[[package]] +name = "spiffe" +version = "0.2.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, + { name = "grpcio" }, + { name = "pem" }, + { name = "protobuf" }, + { name = "pyasn1" }, + { name = "pyasn1-modules" }, + { name = "pyjwt", extra = ["crypto"] }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/f9/716f29e5e0cb13d2786cf30fd2b001898cfe7cf33134f2bcd52da3e2b49c/spiffe-0.2.2.tar.gz", hash = "sha256:e4ca1247b1a08631a3f822eec7db70447b6d99734ff50670f2c9020dfb006231", size = 34912, upload-time = "2025-07-15T16:50:04.113Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/01/7e/50b8f8d4bbeed9b7598cb2e15a7f3b9a55b1635d50d67d763f02ffc4cc93/spiffe-0.2.2-py3-none-any.whl", hash = "sha256:a53fb39ab59408b15dd2f969989045d68bc6b3ebfd283bf2f77e9ff9a66b047b", size = 56009, upload-time = "2025-07-15T16:50:02.958Z" }, +] + +[[package]] +name = "spiffe-tls" +version = "0.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyopenssl" }, + { name = "spiffe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d7/19/2228e634dfa76ba41ef2896d69205441beea5abce7b871430ca618210d9d/spiffe_tls-0.2.1.tar.gz", hash = "sha256:5898b91f7e4f8db9f8cbdd00f625a113e2ce5fc7379cf0104c6d19b73d53ddef", size = 10520, upload-time = "2025-06-11T18:28:31.87Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f6/e1/fe672f042adcd86291a39c0ed7ada5fdc13be945dbf7b53f2f3df59d0c7e/spiffe_tls-0.2.1-py3-none-any.whl", hash = "sha256:b90d302c92deaedd8278339b0c41d6f98d7c512a76e947e013e6189c9f75a53d", size = 15522, upload-time = "2025-06-11T18:28:30.948Z" }, +] + +[[package]] +name = "tomli" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175, upload-time = "2024-11-27T22:38:36.873Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/ca/75707e6efa2b37c77dadb324ae7d9571cb424e61ea73fad7c56c2d14527f/tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249", size = 131077, upload-time = "2024-11-27T22:37:54.956Z" }, + { url = "https://files.pythonhosted.org/packages/c7/16/51ae563a8615d472fdbffc43a3f3d46588c264ac4f024f63f01283becfbb/tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6", size = 123429, upload-time = "2024-11-27T22:37:56.698Z" }, + { url = "https://files.pythonhosted.org/packages/f1/dd/4f6cd1e7b160041db83c694abc78e100473c15d54620083dbd5aae7b990e/tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a", size = 226067, upload-time = "2024-11-27T22:37:57.63Z" }, + { url = "https://files.pythonhosted.org/packages/a9/6b/c54ede5dc70d648cc6361eaf429304b02f2871a345bbdd51e993d6cdf550/tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee", size = 236030, upload-time = "2024-11-27T22:37:59.344Z" }, + { url = "https://files.pythonhosted.org/packages/1f/47/999514fa49cfaf7a92c805a86c3c43f4215621855d151b61c602abb38091/tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e", size = 240898, upload-time = "2024-11-27T22:38:00.429Z" }, + { url = "https://files.pythonhosted.org/packages/73/41/0a01279a7ae09ee1573b423318e7934674ce06eb33f50936655071d81a24/tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4", size = 229894, upload-time = "2024-11-27T22:38:02.094Z" }, + { url = "https://files.pythonhosted.org/packages/55/18/5d8bc5b0a0362311ce4d18830a5d28943667599a60d20118074ea1b01bb7/tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106", size = 245319, upload-time = "2024-11-27T22:38:03.206Z" }, + { url = "https://files.pythonhosted.org/packages/92/a3/7ade0576d17f3cdf5ff44d61390d4b3febb8a9fc2b480c75c47ea048c646/tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8", size = 238273, upload-time = "2024-11-27T22:38:04.217Z" }, + { url = "https://files.pythonhosted.org/packages/72/6f/fa64ef058ac1446a1e51110c375339b3ec6be245af9d14c87c4a6412dd32/tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff", size = 98310, upload-time = "2024-11-27T22:38:05.908Z" }, + { url = "https://files.pythonhosted.org/packages/6a/1c/4a2dcde4a51b81be3530565e92eda625d94dafb46dbeb15069df4caffc34/tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b", size = 108309, upload-time = "2024-11-27T22:38:06.812Z" }, + { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762, upload-time = "2024-11-27T22:38:07.731Z" }, + { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453, upload-time = "2024-11-27T22:38:09.384Z" }, + { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486, upload-time = "2024-11-27T22:38:10.329Z" }, + { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349, upload-time = "2024-11-27T22:38:11.443Z" }, + { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159, upload-time = "2024-11-27T22:38:13.099Z" }, + { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243, upload-time = "2024-11-27T22:38:14.766Z" }, + { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645, upload-time = "2024-11-27T22:38:15.843Z" }, + { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584, upload-time = "2024-11-27T22:38:17.645Z" }, + { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875, upload-time = "2024-11-27T22:38:19.159Z" }, + { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418, upload-time = "2024-11-27T22:38:20.064Z" }, + { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708, upload-time = "2024-11-27T22:38:21.659Z" }, + { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582, upload-time = "2024-11-27T22:38:22.693Z" }, + { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543, upload-time = "2024-11-27T22:38:24.367Z" }, + { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691, upload-time = "2024-11-27T22:38:26.081Z" }, + { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170, upload-time = "2024-11-27T22:38:27.921Z" }, + { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530, upload-time = "2024-11-27T22:38:29.591Z" }, + { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666, upload-time = "2024-11-27T22:38:30.639Z" }, + { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954, upload-time = "2024-11-27T22:38:31.702Z" }, + { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724, upload-time = "2024-11-27T22:38:32.837Z" }, + { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383, upload-time = "2024-11-27T22:38:34.455Z" }, + { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257, upload-time = "2024-11-27T22:38:35.385Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "uuid" +version = "1.30" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ce/63/f42f5aa951ebf2c8dac81f77a8edcc1c218640a2a35a03b9ff2d4aa64c3d/uuid-1.30.tar.gz", hash = "sha256:1f87cc004ac5120466f36c5beae48b4c48cc411968eed0eaecd3da82aa96193f", size = 5811, upload-time = "2007-05-26T11:13:24Z" } diff --git a/sdk/examples/example-js/.gitignore b/sdk/examples/example-js/.gitignore index f4e2c6d6b..eccb865c2 100644 --- a/sdk/examples/example-js/.gitignore +++ b/sdk/examples/example-js/.gitignore @@ -1,3 +1,3 @@ -node_modules/ -dist/ -*.tsbuildinfo +node_modules/ +dist/ +*.tsbuildinfo diff --git a/sdk/examples/example-js/example.js b/sdk/examples/example-js/example.js index 2d3ece866..a23e6fd54 100644 --- a/sdk/examples/example-js/example.js +++ b/sdk/examples/example-js/example.js @@ -1,140 +1,140 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -import { Client, Config, models } from 'agntcy-dir'; - -function generateRecords(names) { - return names.map(name => JSON.parse(` -{ - "data": { - "name": "${name}", - "version": "v1.0.0", - "schema_version": "0.7.0", - "description": "My example agent", - "authors": ["AGNTCY"], - "created_at": "2025-03-19T17:06:37Z", - "skills": [ - { - "name": "natural_language_processing/natural_language_generation/text_completion", - "id": 10201 - }, - { - "name": "natural_language_processing/analytical_reasoning/problem_solving", - "id": 10702 - } - ], - "locators": [ - { - "type": "docker_image", - "url": "https://ghcr.io/agntcy/marketing-strategy" - } - ], - "domains": [ - { - "name": "technology/networking", - "id": 103 - } - ], - "modules": [ - { - "name": "integration/a2a", - "id": 203, - "prompts": "", - "data": { - "protocol_version": "lightweight orchestra moral", - "card_data": "centres", - "capabilities": [ - "state_transition_history", - "push_notifications" - ], - "transports": [ - "grpc", - "http" - ], - "output_modes": [ - "text/html" - ] - } - } - ] - } -} - `)); -} - -(async () => { - // Create client - const config = Config.loadFromEnv(); - let t = await Client.createGRPCTransport(config); - const client = new Client(config, t); - - // Create record objects - const records = generateRecords(['example-record', 'example-record2']); - - // Push objects - const pushed_refs = await client.push(records); - pushed_refs.forEach(ref => { - console.log('Pushed object ref:', ref); - }); - - // Pull objects - const pulled_records = await client.pull(pushed_refs); - pulled_records.forEach(pulled_record => { - console.log('Pulled object:', pulled_record); - }); - - // Lookup objects - const metadatas = await client.lookup(pushed_refs); - metadatas.forEach(metadata => { - console.log('Lookup result:', metadata); - }); - - // Search objects - const search_response = await client.search({ - queries: [{ - type: models.search_v1.RecordQueryType.SKILL_ID, - value: "10201" - }], - limit: 3 - }); - console.log('Search result:', search_response); - - // Publish objects - await client.publish({ - request: { - case: "recordRefs", - value: { - refs: pushed_refs, - } - } - }); - console.log('Objects published.'); - - // List objects in the routing table - const list_response = await client.list({ - queries: [ - { - type: models.routing_v1.RecordQueryType.SKILL, - value: 'natural_language_processing/analytical_reasoning/problem_solving' - } - ], - }); - list_response.forEach(r => { - console.log('Listed objects:', r); - }); - - // Unpublish objects - await client.unpublish({ - request: { - case: "recordRefs", - value: { - refs: pushed_refs, - } - } - }); - console.log('Objects unpublished.'); - - // Delete objects - await client.delete(pushed_refs); - console.log('Objects deleted.'); -})(); +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +import { Client, Config, models } from 'agntcy-dir'; + +function generateRecords(names) { + return names.map(name => JSON.parse(` +{ + "data": { + "name": "${name}", + "version": "v1.0.0", + "schema_version": "0.7.0", + "description": "My example agent", + "authors": ["AGNTCY"], + "created_at": "2025-03-19T17:06:37Z", + "skills": [ + { + "name": "natural_language_processing/natural_language_generation/text_completion", + "id": 10201 + }, + { + "name": "natural_language_processing/analytical_reasoning/problem_solving", + "id": 10702 + } + ], + "locators": [ + { + "type": "docker_image", + "url": "https://ghcr.io/agntcy/marketing-strategy" + } + ], + "domains": [ + { + "name": "technology/networking", + "id": 103 + } + ], + "modules": [ + { + "name": "integration/a2a", + "id": 203, + "prompts": "", + "data": { + "protocol_version": "lightweight orchestra moral", + "card_data": "centres", + "capabilities": [ + "state_transition_history", + "push_notifications" + ], + "transports": [ + "grpc", + "http" + ], + "output_modes": [ + "text/html" + ] + } + } + ] + } +} + `)); +} + +(async () => { + // Create client + const config = Config.loadFromEnv(); + let t = await Client.createGRPCTransport(config); + const client = new Client(config, t); + + // Create record objects + const records = generateRecords(['example-record', 'example-record2']); + + // Push objects + const pushed_refs = await client.push(records); + pushed_refs.forEach(ref => { + console.log('Pushed object ref:', ref); + }); + + // Pull objects + const pulled_records = await client.pull(pushed_refs); + pulled_records.forEach(pulled_record => { + console.log('Pulled object:', pulled_record); + }); + + // Lookup objects + const metadatas = await client.lookup(pushed_refs); + metadatas.forEach(metadata => { + console.log('Lookup result:', metadata); + }); + + // Search objects + const search_response = await client.search({ + queries: [{ + type: models.search_v1.RecordQueryType.SKILL_ID, + value: "10201" + }], + limit: 3 + }); + console.log('Search result:', search_response); + + // Publish objects + await client.publish({ + request: { + case: "recordRefs", + value: { + refs: pushed_refs, + } + } + }); + console.log('Objects published.'); + + // List objects in the routing table + const list_response = await client.list({ + queries: [ + { + type: models.routing_v1.RecordQueryType.SKILL, + value: 'natural_language_processing/analytical_reasoning/problem_solving' + } + ], + }); + list_response.forEach(r => { + console.log('Listed objects:', r); + }); + + // Unpublish objects + await client.unpublish({ + request: { + case: "recordRefs", + value: { + refs: pushed_refs, + } + } + }); + console.log('Objects unpublished.'); + + // Delete objects + await client.delete(pushed_refs); + console.log('Objects deleted.'); +})(); diff --git a/sdk/examples/example-js/package-lock.json b/sdk/examples/example-js/package-lock.json index a6f93409a..989b3bb8d 100644 --- a/sdk/examples/example-js/package-lock.json +++ b/sdk/examples/example-js/package-lock.json @@ -1,3423 +1,3423 @@ -{ - "name": "dir-example", - "version": "0.0.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "dir-example", - "version": "0.0.0", - "dependencies": { - "agntcy-dir": "file:../../dir-js" - } - }, - "../../dir-js": { - "name": "agntcy-dir", - "version": "0.4.0", - "license": "Apache-2.0", - "dependencies": { - "@buf/agntcy_dir.bufbuild_es": "^2.9.0-20251007090412-102a9af80c74.1", - "@bufbuild/protobuf": "^2.8.0", - "@connectrpc/connect": "^2.1.0", - "@connectrpc/connect-node": "^2.1.0", - "@grpc/grpc-js": "^1.13.4", - "spiffe": "^0.4.0" - }, - "devDependencies": { - "@microsoft/api-extractor": "^7.52.13", - "@rollup/plugin-json": "^6.1.0", - "@rollup/plugin-node-resolve": "^16.0.1", - "@types/node": "^22.7.5", - "@types/uuid": "^10.0.0", - "rollup-plugin-typescript2": "^0.36.0", - "ts-node": "^10.9.2", - "typescript": "^5.9.2", - "typescript-eslint": "^8.44.0", - "uuid": "^11.1.0", - "vitest": "^3.2.4" - }, - "engines": { - "node": ">=20.0.0" - }, - "optionalDependencies": { - "@rollup/rollup-linux-x64-gnu": "4.50.2" - } - }, - "../../dir-js/node_modules/@buf/agntcy_dir.bufbuild_es": { - "version": "2.8.0-20250915154105-152c4b7b4ec6.1", - "peerDependencies": { - "@bufbuild/protobuf": "^2.8.0" - } - }, - "../../dir-js/node_modules/@bufbuild/protobuf": { - "version": "2.8.0", - "license": "(Apache-2.0 AND BSD-3-Clause)" - }, - "../../dir-js/node_modules/@connectrpc/connect": { - "version": "2.1.0", - "license": "Apache-2.0", - "peerDependencies": { - "@bufbuild/protobuf": "^2.7.0" - } - }, - "../../dir-js/node_modules/@connectrpc/connect-node": { - "version": "2.1.0", - "license": "Apache-2.0", - "engines": { - "node": ">=20" - }, - "peerDependencies": { - "@bufbuild/protobuf": "^2.7.0", - "@connectrpc/connect": "2.1.0" - } - }, - "../../dir-js/node_modules/@cspotcode/source-map-support": { - "version": "0.8.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@jridgewell/trace-mapping": "0.3.9" - }, - "engines": { - "node": ">=12" - } - }, - "../../dir-js/node_modules/@esbuild/darwin-arm64": { - "version": "0.25.9", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=18" - } - }, - "../../dir-js/node_modules/@eslint-community/eslint-utils": { - "version": "4.9.0", - "dev": true, - "license": "MIT", - "dependencies": { - "eslint-visitor-keys": "^3.4.3" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - }, - "peerDependencies": { - "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" - } - }, - "../../dir-js/node_modules/@eslint-community/regexpp": { - "version": "4.12.1", - "dev": true, - "license": "MIT", - "engines": { - "node": "^12.0.0 || ^14.0.0 || >=16.0.0" - } - }, - "../../dir-js/node_modules/@eslint/eslintrc": { - "version": "2.1.4", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "ajv": "^6.12.4", - "debug": "^4.3.2", - "espree": "^9.6.0", - "globals": "^13.19.0", - "ignore": "^5.2.0", - "import-fresh": "^3.2.1", - "js-yaml": "^4.1.0", - "minimatch": "^3.1.2", - "strip-json-comments": "^3.1.1" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "../../dir-js/node_modules/@eslint/js": { - "version": "8.57.1", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - } - }, - "../../dir-js/node_modules/@grpc/grpc-js": { - "version": "1.13.4", - "license": "Apache-2.0", - "dependencies": { - "@grpc/proto-loader": "^0.7.13", - "@js-sdsl/ordered-map": "^4.4.2" - }, - "engines": { - "node": ">=12.10.0" - } - }, - "../../dir-js/node_modules/@grpc/proto-loader": { - "version": "0.7.15", - "license": "Apache-2.0", - "dependencies": { - "lodash.camelcase": "^4.3.0", - "long": "^5.0.0", - "protobufjs": "^7.2.5", - "yargs": "^17.7.2" - }, - "bin": { - "proto-loader-gen-types": "build/bin/proto-loader-gen-types.js" - }, - "engines": { - "node": ">=6" - } - }, - "../../dir-js/node_modules/@humanwhocodes/config-array": { - "version": "0.13.0", - "dev": true, - "license": "Apache-2.0", - "peer": true, - "dependencies": { - "@humanwhocodes/object-schema": "^2.0.3", - "debug": "^4.3.1", - "minimatch": "^3.0.5" - }, - "engines": { - "node": ">=10.10.0" - } - }, - "../../dir-js/node_modules/@humanwhocodes/module-importer": { - "version": "1.0.1", - "dev": true, - "license": "Apache-2.0", - "peer": true, - "engines": { - "node": ">=12.22" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/nzakas" - } - }, - "../../dir-js/node_modules/@humanwhocodes/object-schema": { - "version": "2.0.3", - "dev": true, - "license": "BSD-3-Clause", - "peer": true - }, - "../../dir-js/node_modules/@isaacs/balanced-match": { - "version": "4.0.1", - "dev": true, - "license": "MIT", - "engines": { - "node": "20 || >=22" - } - }, - "../../dir-js/node_modules/@isaacs/brace-expansion": { - "version": "5.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@isaacs/balanced-match": "^4.0.1" - }, - "engines": { - "node": "20 || >=22" - } - }, - "../../dir-js/node_modules/@jridgewell/resolve-uri": { - "version": "3.1.2", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6.0.0" - } - }, - "../../dir-js/node_modules/@jridgewell/sourcemap-codec": { - "version": "1.5.5", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/@jridgewell/trace-mapping": { - "version": "0.3.9", - "dev": true, - "license": "MIT", - "dependencies": { - "@jridgewell/resolve-uri": "^3.0.3", - "@jridgewell/sourcemap-codec": "^1.4.10" - } - }, - "../../dir-js/node_modules/@js-sdsl/ordered-map": { - "version": "4.4.2", - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/js-sdsl" - } - }, - "../../dir-js/node_modules/@microsoft/api-extractor": { - "version": "7.52.13", - "dev": true, - "license": "MIT", - "dependencies": { - "@microsoft/api-extractor-model": "7.30.7", - "@microsoft/tsdoc": "~0.15.1", - "@microsoft/tsdoc-config": "~0.17.1", - "@rushstack/node-core-library": "5.14.0", - "@rushstack/rig-package": "0.5.3", - "@rushstack/terminal": "0.16.0", - "@rushstack/ts-command-line": "5.0.3", - "lodash": "~4.17.15", - "minimatch": "10.0.3", - "resolve": "~1.22.1", - "semver": "~7.5.4", - "source-map": "~0.6.1", - "typescript": "5.8.2" - }, - "bin": { - "api-extractor": "bin/api-extractor" - } - }, - "../../dir-js/node_modules/@microsoft/api-extractor-model": { - "version": "7.30.7", - "dev": true, - "license": "MIT", - "dependencies": { - "@microsoft/tsdoc": "~0.15.1", - "@microsoft/tsdoc-config": "~0.17.1", - "@rushstack/node-core-library": "5.14.0" - } - }, - "../../dir-js/node_modules/@microsoft/api-extractor/node_modules/minimatch": { - "version": "10.0.3", - "dev": true, - "license": "ISC", - "dependencies": { - "@isaacs/brace-expansion": "^5.0.0" - }, - "engines": { - "node": "20 || >=22" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "../../dir-js/node_modules/@microsoft/api-extractor/node_modules/semver": { - "version": "7.5.4", - "dev": true, - "license": "ISC", - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "../../dir-js/node_modules/@microsoft/api-extractor/node_modules/typescript": { - "version": "5.8.2", - "dev": true, - "license": "Apache-2.0", - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=14.17" - } - }, - "../../dir-js/node_modules/@microsoft/tsdoc": { - "version": "0.15.1", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/@microsoft/tsdoc-config": { - "version": "0.17.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@microsoft/tsdoc": "0.15.1", - "ajv": "~8.12.0", - "jju": "~1.4.0", - "resolve": "~1.22.2" - } - }, - "../../dir-js/node_modules/@microsoft/tsdoc-config/node_modules/ajv": { - "version": "8.12.0", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "../../dir-js/node_modules/@microsoft/tsdoc-config/node_modules/json-schema-traverse": { - "version": "1.0.0", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/@nodelib/fs.scandir": { - "version": "2.1.5", - "dev": true, - "license": "MIT", - "dependencies": { - "@nodelib/fs.stat": "2.0.5", - "run-parallel": "^1.1.9" - }, - "engines": { - "node": ">= 8" - } - }, - "../../dir-js/node_modules/@nodelib/fs.stat": { - "version": "2.0.5", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 8" - } - }, - "../../dir-js/node_modules/@nodelib/fs.walk": { - "version": "1.2.8", - "dev": true, - "license": "MIT", - "dependencies": { - "@nodelib/fs.scandir": "2.1.5", - "fastq": "^1.6.0" - }, - "engines": { - "node": ">= 8" - } - }, - "../../dir-js/node_modules/@protobufjs/aspromise": { - "version": "1.1.2", - "license": "BSD-3-Clause" - }, - "../../dir-js/node_modules/@protobufjs/base64": { - "version": "1.1.2", - "license": "BSD-3-Clause" - }, - "../../dir-js/node_modules/@protobufjs/codegen": { - "version": "2.0.4", - "license": "BSD-3-Clause" - }, - "../../dir-js/node_modules/@protobufjs/eventemitter": { - "version": "1.1.0", - "license": "BSD-3-Clause" - }, - "../../dir-js/node_modules/@protobufjs/fetch": { - "version": "1.1.0", - "license": "BSD-3-Clause", - "dependencies": { - "@protobufjs/aspromise": "^1.1.1", - "@protobufjs/inquire": "^1.1.0" - } - }, - "../../dir-js/node_modules/@protobufjs/float": { - "version": "1.0.2", - "license": "BSD-3-Clause" - }, - "../../dir-js/node_modules/@protobufjs/inquire": { - "version": "1.1.0", - "license": "BSD-3-Clause" - }, - "../../dir-js/node_modules/@protobufjs/path": { - "version": "1.1.2", - "license": "BSD-3-Clause" - }, - "../../dir-js/node_modules/@protobufjs/pool": { - "version": "1.1.0", - "license": "BSD-3-Clause" - }, - "../../dir-js/node_modules/@protobufjs/utf8": { - "version": "1.1.0", - "license": "BSD-3-Clause" - }, - "../../dir-js/node_modules/@rollup/plugin-json": { - "version": "6.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@rollup/pluginutils": "^5.1.0" - }, - "engines": { - "node": ">=14.0.0" - }, - "peerDependencies": { - "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" - }, - "peerDependenciesMeta": { - "rollup": { - "optional": true - } - } - }, - "../../dir-js/node_modules/@rollup/plugin-node-resolve": { - "version": "16.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "@rollup/pluginutils": "^5.0.1", - "@types/resolve": "1.20.2", - "deepmerge": "^4.2.2", - "is-module": "^1.0.0", - "resolve": "^1.22.1" - }, - "engines": { - "node": ">=14.0.0" - }, - "peerDependencies": { - "rollup": "^2.78.0||^3.0.0||^4.0.0" - }, - "peerDependenciesMeta": { - "rollup": { - "optional": true - } - } - }, - "../../dir-js/node_modules/@rollup/pluginutils": { - "version": "5.3.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0", - "estree-walker": "^2.0.2", - "picomatch": "^4.0.2" - }, - "engines": { - "node": ">=14.0.0" - }, - "peerDependencies": { - "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" - }, - "peerDependenciesMeta": { - "rollup": { - "optional": true - } - } - }, - "../../dir-js/node_modules/@rollup/pluginutils/node_modules/picomatch": { - "version": "4.0.3", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "../../dir-js/node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.50.2", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ] - }, - "../../dir-js/node_modules/@rushstack/node-core-library": { - "version": "5.14.0", - "dev": true, - "license": "MIT", - "dependencies": { - "ajv": "~8.13.0", - "ajv-draft-04": "~1.0.0", - "ajv-formats": "~3.0.1", - "fs-extra": "~11.3.0", - "import-lazy": "~4.0.0", - "jju": "~1.4.0", - "resolve": "~1.22.1", - "semver": "~7.5.4" - }, - "peerDependencies": { - "@types/node": "*" - }, - "peerDependenciesMeta": { - "@types/node": { - "optional": true - } - } - }, - "../../dir-js/node_modules/@rushstack/node-core-library/node_modules/ajv": { - "version": "8.13.0", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.3", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.4.1" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "../../dir-js/node_modules/@rushstack/node-core-library/node_modules/ajv-draft-04": { - "version": "1.0.0", - "dev": true, - "license": "MIT", - "peerDependencies": { - "ajv": "^8.5.0" - }, - "peerDependenciesMeta": { - "ajv": { - "optional": true - } - } - }, - "../../dir-js/node_modules/@rushstack/node-core-library/node_modules/fs-extra": { - "version": "11.3.2", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=14.14" - } - }, - "../../dir-js/node_modules/@rushstack/node-core-library/node_modules/json-schema-traverse": { - "version": "1.0.0", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/@rushstack/node-core-library/node_modules/semver": { - "version": "7.5.4", - "dev": true, - "license": "ISC", - "dependencies": { - "lru-cache": "^6.0.0" - }, - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "../../dir-js/node_modules/@rushstack/rig-package": { - "version": "0.5.3", - "dev": true, - "license": "MIT", - "dependencies": { - "resolve": "~1.22.1", - "strip-json-comments": "~3.1.1" - } - }, - "../../dir-js/node_modules/@rushstack/terminal": { - "version": "0.16.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@rushstack/node-core-library": "5.14.0", - "supports-color": "~8.1.1" - }, - "peerDependencies": { - "@types/node": "*" - }, - "peerDependenciesMeta": { - "@types/node": { - "optional": true - } - } - }, - "../../dir-js/node_modules/@rushstack/terminal/node_modules/supports-color": { - "version": "8.1.1", - "dev": true, - "license": "MIT", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, - "../../dir-js/node_modules/@rushstack/ts-command-line": { - "version": "5.0.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@rushstack/terminal": "0.16.0", - "@types/argparse": "1.0.38", - "argparse": "~1.0.9", - "string-argv": "~0.3.1" - } - }, - "../../dir-js/node_modules/@rushstack/ts-command-line/node_modules/argparse": { - "version": "1.0.10", - "dev": true, - "license": "MIT", - "dependencies": { - "sprintf-js": "~1.0.2" - } - }, - "../../dir-js/node_modules/@tsconfig/node10": { - "version": "1.0.11", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/@tsconfig/node12": { - "version": "1.0.11", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/@tsconfig/node14": { - "version": "1.0.3", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/@tsconfig/node16": { - "version": "1.0.4", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/@types/argparse": { - "version": "1.0.38", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/@types/chai": { - "version": "5.2.2", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/deep-eql": "*" - } - }, - "../../dir-js/node_modules/@types/deep-eql": { - "version": "4.0.2", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/@types/estree": { - "version": "1.0.8", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/@types/node": { - "version": "22.18.4", - "license": "MIT", - "dependencies": { - "undici-types": "~6.21.0" - } - }, - "../../dir-js/node_modules/@types/resolve": { - "version": "1.20.2", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/@types/uuid": { - "version": "10.0.0", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/@typescript-eslint/project-service": { - "version": "8.44.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/tsconfig-utils": "^8.44.0", - "@typescript-eslint/types": "^8.44.0", - "debug": "^4.3.4" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "typescript": ">=4.8.4 <6.0.0" - } - }, - "../../dir-js/node_modules/@typescript-eslint/project-service/node_modules/@typescript-eslint/types": { - "version": "8.44.0", - "dev": true, - "license": "MIT", - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "../../dir-js/node_modules/@typescript-eslint/tsconfig-utils": { - "version": "8.44.0", - "dev": true, - "license": "MIT", - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "typescript": ">=4.8.4 <6.0.0" - } - }, - "../../dir-js/node_modules/@ungap/structured-clone": { - "version": "1.3.0", - "dev": true, - "license": "ISC", - "peer": true - }, - "../../dir-js/node_modules/@vitest/expect": { - "version": "3.2.4", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/chai": "^5.2.2", - "@vitest/spy": "3.2.4", - "@vitest/utils": "3.2.4", - "chai": "^5.2.0", - "tinyrainbow": "^2.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "../../dir-js/node_modules/@vitest/mocker": { - "version": "3.2.4", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/spy": "3.2.4", - "estree-walker": "^3.0.3", - "magic-string": "^0.30.17" - }, - "funding": { - "url": "https://opencollective.com/vitest" - }, - "peerDependencies": { - "msw": "^2.4.9", - "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" - }, - "peerDependenciesMeta": { - "msw": { - "optional": true - }, - "vite": { - "optional": true - } - } - }, - "../../dir-js/node_modules/@vitest/mocker/node_modules/estree-walker": { - "version": "3.0.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0" - } - }, - "../../dir-js/node_modules/@vitest/pretty-format": { - "version": "3.2.4", - "dev": true, - "license": "MIT", - "dependencies": { - "tinyrainbow": "^2.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "../../dir-js/node_modules/@vitest/runner": { - "version": "3.2.4", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/utils": "3.2.4", - "pathe": "^2.0.3", - "strip-literal": "^3.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "../../dir-js/node_modules/@vitest/snapshot": { - "version": "3.2.4", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/pretty-format": "3.2.4", - "magic-string": "^0.30.17", - "pathe": "^2.0.3" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "../../dir-js/node_modules/@vitest/spy": { - "version": "3.2.4", - "dev": true, - "license": "MIT", - "dependencies": { - "tinyspy": "^4.0.3" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "../../dir-js/node_modules/@vitest/utils": { - "version": "3.2.4", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/pretty-format": "3.2.4", - "loupe": "^3.1.4", - "tinyrainbow": "^2.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "../../dir-js/node_modules/acorn": { - "version": "8.15.0", - "dev": true, - "license": "MIT", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "../../dir-js/node_modules/acorn-jsx": { - "version": "5.3.2", - "dev": true, - "license": "MIT", - "peer": true, - "peerDependencies": { - "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" - } - }, - "../../dir-js/node_modules/acorn-walk": { - "version": "8.3.4", - "dev": true, - "license": "MIT", - "dependencies": { - "acorn": "^8.11.0" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "../../dir-js/node_modules/ajv": { - "version": "6.12.6", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "../../dir-js/node_modules/ajv-formats": { - "version": "3.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "ajv": "^8.0.0" - }, - "peerDependencies": { - "ajv": "^8.0.0" - }, - "peerDependenciesMeta": { - "ajv": { - "optional": true - } - } - }, - "../../dir-js/node_modules/ajv-formats/node_modules/ajv": { - "version": "8.17.1", - "dev": true, - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.3", - "fast-uri": "^3.0.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "../../dir-js/node_modules/ajv-formats/node_modules/json-schema-traverse": { - "version": "1.0.0", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/ansi-regex": { - "version": "5.0.1", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/ansi-styles": { - "version": "4.3.0", - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "../../dir-js/node_modules/arg": { - "version": "4.1.3", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/argparse": { - "version": "2.0.1", - "dev": true, - "license": "Python-2.0", - "peer": true - }, - "../../dir-js/node_modules/assertion-error": { - "version": "2.0.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - } - }, - "../../dir-js/node_modules/balanced-match": { - "version": "1.0.2", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/brace-expansion": { - "version": "1.1.12", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "../../dir-js/node_modules/braces": { - "version": "3.0.3", - "dev": true, - "license": "MIT", - "dependencies": { - "fill-range": "^7.1.1" - }, - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/cac": { - "version": "6.7.14", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/callsites": { - "version": "3.1.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=6" - } - }, - "../../dir-js/node_modules/chai": { - "version": "5.3.3", - "dev": true, - "license": "MIT", - "dependencies": { - "assertion-error": "^2.0.1", - "check-error": "^2.1.1", - "deep-eql": "^5.0.1", - "loupe": "^3.1.0", - "pathval": "^2.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "../../dir-js/node_modules/chalk": { - "version": "4.1.2", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "../../dir-js/node_modules/check-error": { - "version": "2.1.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 16" - } - }, - "../../dir-js/node_modules/cliui": { - "version": "8.0.1", - "license": "ISC", - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.1", - "wrap-ansi": "^7.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "../../dir-js/node_modules/color-convert": { - "version": "2.0.1", - "license": "MIT", - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "../../dir-js/node_modules/color-name": { - "version": "1.1.4", - "license": "MIT" - }, - "../../dir-js/node_modules/commondir": { - "version": "1.0.1", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/concat-map": { - "version": "0.0.1", - "dev": true, - "license": "MIT", - "peer": true - }, - "../../dir-js/node_modules/create-require": { - "version": "1.1.1", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/cross-spawn": { - "version": "7.0.6", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, - "../../dir-js/node_modules/debug": { - "version": "4.4.3", - "dev": true, - "license": "MIT", - "dependencies": { - "ms": "^2.1.3" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "../../dir-js/node_modules/deep-eql": { - "version": "5.0.2", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "../../dir-js/node_modules/deep-is": { - "version": "0.1.4", - "dev": true, - "license": "MIT", - "peer": true - }, - "../../dir-js/node_modules/deepmerge": { - "version": "4.3.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "../../dir-js/node_modules/diff": { - "version": "4.0.2", - "dev": true, - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.3.1" - } - }, - "../../dir-js/node_modules/doctrine": { - "version": "3.0.0", - "dev": true, - "license": "Apache-2.0", - "peer": true, - "dependencies": { - "esutils": "^2.0.2" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "../../dir-js/node_modules/emoji-regex": { - "version": "8.0.0", - "license": "MIT" - }, - "../../dir-js/node_modules/es-module-lexer": { - "version": "1.7.0", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/esbuild": { - "version": "0.25.9", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "bin": { - "esbuild": "bin/esbuild" - }, - "engines": { - "node": ">=18" - }, - "optionalDependencies": { - "@esbuild/aix-ppc64": "0.25.9", - "@esbuild/android-arm": "0.25.9", - "@esbuild/android-arm64": "0.25.9", - "@esbuild/android-x64": "0.25.9", - "@esbuild/darwin-arm64": "0.25.9", - "@esbuild/darwin-x64": "0.25.9", - "@esbuild/freebsd-arm64": "0.25.9", - "@esbuild/freebsd-x64": "0.25.9", - "@esbuild/linux-arm": "0.25.9", - "@esbuild/linux-arm64": "0.25.9", - "@esbuild/linux-ia32": "0.25.9", - "@esbuild/linux-loong64": "0.25.9", - "@esbuild/linux-mips64el": "0.25.9", - "@esbuild/linux-ppc64": "0.25.9", - "@esbuild/linux-riscv64": "0.25.9", - "@esbuild/linux-s390x": "0.25.9", - "@esbuild/linux-x64": "0.25.9", - "@esbuild/netbsd-arm64": "0.25.9", - "@esbuild/netbsd-x64": "0.25.9", - "@esbuild/openbsd-arm64": "0.25.9", - "@esbuild/openbsd-x64": "0.25.9", - "@esbuild/openharmony-arm64": "0.25.9", - "@esbuild/sunos-x64": "0.25.9", - "@esbuild/win32-arm64": "0.25.9", - "@esbuild/win32-ia32": "0.25.9", - "@esbuild/win32-x64": "0.25.9" - } - }, - "../../dir-js/node_modules/escalade": { - "version": "3.2.0", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "../../dir-js/node_modules/escape-string-regexp": { - "version": "4.0.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "../../dir-js/node_modules/eslint": { - "version": "8.57.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@eslint-community/eslint-utils": "^4.2.0", - "@eslint-community/regexpp": "^4.6.1", - "@eslint/eslintrc": "^2.1.4", - "@eslint/js": "8.57.1", - "@humanwhocodes/config-array": "^0.13.0", - "@humanwhocodes/module-importer": "^1.0.1", - "@nodelib/fs.walk": "^1.2.8", - "@ungap/structured-clone": "^1.2.0", - "ajv": "^6.12.4", - "chalk": "^4.0.0", - "cross-spawn": "^7.0.2", - "debug": "^4.3.2", - "doctrine": "^3.0.0", - "escape-string-regexp": "^4.0.0", - "eslint-scope": "^7.2.2", - "eslint-visitor-keys": "^3.4.3", - "espree": "^9.6.1", - "esquery": "^1.4.2", - "esutils": "^2.0.2", - "fast-deep-equal": "^3.1.3", - "file-entry-cache": "^6.0.1", - "find-up": "^5.0.0", - "glob-parent": "^6.0.2", - "globals": "^13.19.0", - "graphemer": "^1.4.0", - "ignore": "^5.2.0", - "imurmurhash": "^0.1.4", - "is-glob": "^4.0.0", - "is-path-inside": "^3.0.3", - "js-yaml": "^4.1.0", - "json-stable-stringify-without-jsonify": "^1.0.1", - "levn": "^0.4.1", - "lodash.merge": "^4.6.2", - "minimatch": "^3.1.2", - "natural-compare": "^1.4.0", - "optionator": "^0.9.3", - "strip-ansi": "^6.0.1", - "text-table": "^0.2.0" - }, - "bin": { - "eslint": "bin/eslint.js" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "../../dir-js/node_modules/eslint-visitor-keys": { - "version": "3.4.3", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "../../dir-js/node_modules/eslint/node_modules/eslint-scope": { - "version": "7.2.2", - "dev": true, - "license": "BSD-2-Clause", - "peer": true, - "dependencies": { - "esrecurse": "^4.3.0", - "estraverse": "^5.2.0" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "../../dir-js/node_modules/eslint/node_modules/estraverse": { - "version": "5.3.0", - "dev": true, - "license": "BSD-2-Clause", - "peer": true, - "engines": { - "node": ">=4.0" - } - }, - "../../dir-js/node_modules/espree": { - "version": "9.6.1", - "dev": true, - "license": "BSD-2-Clause", - "peer": true, - "dependencies": { - "acorn": "^8.9.0", - "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^3.4.1" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "../../dir-js/node_modules/esquery": { - "version": "1.6.0", - "dev": true, - "license": "BSD-3-Clause", - "peer": true, - "dependencies": { - "estraverse": "^5.1.0" - }, - "engines": { - "node": ">=0.10" - } - }, - "../../dir-js/node_modules/esquery/node_modules/estraverse": { - "version": "5.3.0", - "dev": true, - "license": "BSD-2-Clause", - "peer": true, - "engines": { - "node": ">=4.0" - } - }, - "../../dir-js/node_modules/esrecurse": { - "version": "4.3.0", - "dev": true, - "license": "BSD-2-Clause", - "peer": true, - "dependencies": { - "estraverse": "^5.2.0" - }, - "engines": { - "node": ">=4.0" - } - }, - "../../dir-js/node_modules/esrecurse/node_modules/estraverse": { - "version": "5.3.0", - "dev": true, - "license": "BSD-2-Clause", - "peer": true, - "engines": { - "node": ">=4.0" - } - }, - "../../dir-js/node_modules/estree-walker": { - "version": "2.0.2", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/esutils": { - "version": "2.0.3", - "dev": true, - "license": "BSD-2-Clause", - "peer": true, - "engines": { - "node": ">=0.10.0" - } - }, - "../../dir-js/node_modules/expect-type": { - "version": "1.2.2", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=12.0.0" - } - }, - "../../dir-js/node_modules/fast-deep-equal": { - "version": "3.1.3", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/fast-glob": { - "version": "3.3.3", - "dev": true, - "license": "MIT", - "dependencies": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.2", - "merge2": "^1.3.0", - "micromatch": "^4.0.8" - }, - "engines": { - "node": ">=8.6.0" - } - }, - "../../dir-js/node_modules/fast-glob/node_modules/glob-parent": { - "version": "5.1.2", - "dev": true, - "license": "ISC", - "dependencies": { - "is-glob": "^4.0.1" - }, - "engines": { - "node": ">= 6" - } - }, - "../../dir-js/node_modules/fast-json-stable-stringify": { - "version": "2.1.0", - "dev": true, - "license": "MIT", - "peer": true - }, - "../../dir-js/node_modules/fast-levenshtein": { - "version": "2.0.6", - "dev": true, - "license": "MIT", - "peer": true - }, - "../../dir-js/node_modules/fast-uri": { - "version": "3.1.0", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/fastify" - }, - { - "type": "opencollective", - "url": "https://opencollective.com/fastify" - } - ], - "license": "BSD-3-Clause" - }, - "../../dir-js/node_modules/fastq": { - "version": "1.19.1", - "dev": true, - "license": "ISC", - "dependencies": { - "reusify": "^1.0.4" - } - }, - "../../dir-js/node_modules/file-entry-cache": { - "version": "6.0.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "flat-cache": "^3.0.4" - }, - "engines": { - "node": "^10.12.0 || >=12.0.0" - } - }, - "../../dir-js/node_modules/fill-range": { - "version": "7.1.1", - "dev": true, - "license": "MIT", - "dependencies": { - "to-regex-range": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/find-cache-dir": { - "version": "3.3.2", - "dev": true, - "license": "MIT", - "dependencies": { - "commondir": "^1.0.1", - "make-dir": "^3.0.2", - "pkg-dir": "^4.1.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/avajs/find-cache-dir?sponsor=1" - } - }, - "../../dir-js/node_modules/find-up": { - "version": "5.0.0", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "locate-path": "^6.0.0", - "path-exists": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "../../dir-js/node_modules/flat-cache": { - "version": "3.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "flatted": "^3.2.9", - "keyv": "^4.5.3", - "rimraf": "^3.0.2" - }, - "engines": { - "node": "^10.12.0 || >=12.0.0" - } - }, - "../../dir-js/node_modules/flatted": { - "version": "3.3.3", - "dev": true, - "license": "ISC", - "peer": true - }, - "../../dir-js/node_modules/fs-extra": { - "version": "10.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "../../dir-js/node_modules/fs.realpath": { - "version": "1.0.0", - "dev": true, - "license": "ISC", - "peer": true - }, - "../../dir-js/node_modules/fsevents": { - "version": "2.3.3", - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } - }, - "../../dir-js/node_modules/function-bind": { - "version": "1.1.2", - "dev": true, - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "../../dir-js/node_modules/get-caller-file": { - "version": "2.0.5", - "license": "ISC", - "engines": { - "node": "6.* || 8.* || >= 10.*" - } - }, - "../../dir-js/node_modules/glob": { - "version": "7.2.3", - "dev": true, - "license": "ISC", - "peer": true, - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "../../dir-js/node_modules/glob-parent": { - "version": "6.0.2", - "dev": true, - "license": "ISC", - "peer": true, - "dependencies": { - "is-glob": "^4.0.3" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "../../dir-js/node_modules/globals": { - "version": "13.24.0", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "type-fest": "^0.20.2" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "../../dir-js/node_modules/graceful-fs": { - "version": "4.2.11", - "dev": true, - "license": "ISC" - }, - "../../dir-js/node_modules/graphemer": { - "version": "1.4.0", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/has-flag": { - "version": "4.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/hasown": { - "version": "2.0.2", - "dev": true, - "license": "MIT", - "dependencies": { - "function-bind": "^1.1.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "../../dir-js/node_modules/ignore": { - "version": "5.3.2", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">= 4" - } - }, - "../../dir-js/node_modules/import-fresh": { - "version": "3.3.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "parent-module": "^1.0.0", - "resolve-from": "^4.0.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "../../dir-js/node_modules/import-lazy": { - "version": "4.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/imurmurhash": { - "version": "0.1.4", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=0.8.19" - } - }, - "../../dir-js/node_modules/inflight": { - "version": "1.0.6", - "dev": true, - "license": "ISC", - "peer": true, - "dependencies": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "../../dir-js/node_modules/inherits": { - "version": "2.0.4", - "dev": true, - "license": "ISC", - "peer": true - }, - "../../dir-js/node_modules/is-core-module": { - "version": "2.16.1", - "dev": true, - "license": "MIT", - "dependencies": { - "hasown": "^2.0.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "../../dir-js/node_modules/is-extglob": { - "version": "2.1.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "../../dir-js/node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/is-glob": { - "version": "4.0.3", - "dev": true, - "license": "MIT", - "dependencies": { - "is-extglob": "^2.1.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "../../dir-js/node_modules/is-module": { - "version": "1.0.0", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/is-number": { - "version": "7.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.12.0" - } - }, - "../../dir-js/node_modules/is-path-inside": { - "version": "3.0.3", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/isexe": { - "version": "2.0.0", - "dev": true, - "license": "ISC", - "peer": true - }, - "../../dir-js/node_modules/jju": { - "version": "1.4.0", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/js-yaml": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", - "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "../../dir-js/node_modules/json-buffer": { - "version": "3.0.1", - "dev": true, - "license": "MIT", - "peer": true - }, - "../../dir-js/node_modules/json-schema-traverse": { - "version": "0.4.1", - "dev": true, - "license": "MIT", - "peer": true - }, - "../../dir-js/node_modules/json-stable-stringify-without-jsonify": { - "version": "1.0.1", - "dev": true, - "license": "MIT", - "peer": true - }, - "../../dir-js/node_modules/jsonfile": { - "version": "6.2.0", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "../../dir-js/node_modules/keyv": { - "version": "4.5.4", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "json-buffer": "3.0.1" - } - }, - "../../dir-js/node_modules/levn": { - "version": "0.4.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "prelude-ls": "^1.2.1", - "type-check": "~0.4.0" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "../../dir-js/node_modules/locate-path": { - "version": "6.0.0", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "p-locate": "^5.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "../../dir-js/node_modules/lodash": { - "version": "4.17.21", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/lodash.camelcase": { - "version": "4.3.0", - "license": "MIT" - }, - "../../dir-js/node_modules/lodash.merge": { - "version": "4.6.2", - "dev": true, - "license": "MIT", - "peer": true - }, - "../../dir-js/node_modules/long": { - "version": "5.3.2", - "license": "Apache-2.0" - }, - "../../dir-js/node_modules/loupe": { - "version": "3.2.1", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/lru-cache": { - "version": "6.0.0", - "dev": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "../../dir-js/node_modules/magic-string": { - "version": "0.30.19", - "dev": true, - "license": "MIT", - "dependencies": { - "@jridgewell/sourcemap-codec": "^1.5.5" - } - }, - "../../dir-js/node_modules/make-dir": { - "version": "3.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "semver": "^6.0.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "../../dir-js/node_modules/make-dir/node_modules/semver": { - "version": "6.3.1", - "dev": true, - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - } - }, - "../../dir-js/node_modules/make-error": { - "version": "1.3.6", - "dev": true, - "license": "ISC" - }, - "../../dir-js/node_modules/merge2": { - "version": "1.4.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 8" - } - }, - "../../dir-js/node_modules/micromatch": { - "version": "4.0.8", - "dev": true, - "license": "MIT", - "dependencies": { - "braces": "^3.0.3", - "picomatch": "^2.3.1" - }, - "engines": { - "node": ">=8.6" - } - }, - "../../dir-js/node_modules/minimatch": { - "version": "3.1.2", - "dev": true, - "license": "ISC", - "peer": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "../../dir-js/node_modules/ms": { - "version": "2.1.3", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/nanoid": { - "version": "3.3.11", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "bin": { - "nanoid": "bin/nanoid.cjs" - }, - "engines": { - "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" - } - }, - "../../dir-js/node_modules/natural-compare": { - "version": "1.4.0", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/once": { - "version": "1.4.0", - "dev": true, - "license": "ISC", - "peer": true, - "dependencies": { - "wrappy": "1" - } - }, - "../../dir-js/node_modules/optionator": { - "version": "0.9.4", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "deep-is": "^0.1.3", - "fast-levenshtein": "^2.0.6", - "levn": "^0.4.1", - "prelude-ls": "^1.2.1", - "type-check": "^0.4.0", - "word-wrap": "^1.2.5" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "../../dir-js/node_modules/p-limit": { - "version": "3.1.0", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "yocto-queue": "^0.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "../../dir-js/node_modules/p-locate": { - "version": "5.0.0", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "p-limit": "^3.0.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "../../dir-js/node_modules/p-try": { - "version": "2.2.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "../../dir-js/node_modules/parent-module": { - "version": "1.0.1", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "callsites": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "../../dir-js/node_modules/path-exists": { - "version": "4.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/path-is-absolute": { - "version": "1.0.1", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=0.10.0" - } - }, - "../../dir-js/node_modules/path-key": { - "version": "3.1.1", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/path-parse": { - "version": "1.0.7", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/pathe": { - "version": "2.0.3", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/pathval": { - "version": "2.0.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 14.16" - } - }, - "../../dir-js/node_modules/picocolors": { - "version": "1.1.1", - "dev": true, - "license": "ISC" - }, - "../../dir-js/node_modules/picomatch": { - "version": "2.3.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8.6" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "../../dir-js/node_modules/pkg-dir": { - "version": "4.2.0", - "dev": true, - "license": "MIT", - "dependencies": { - "find-up": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/pkg-dir/node_modules/find-up": { - "version": "4.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/pkg-dir/node_modules/locate-path": { - "version": "5.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "p-locate": "^4.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/pkg-dir/node_modules/p-limit": { - "version": "2.3.0", - "dev": true, - "license": "MIT", - "dependencies": { - "p-try": "^2.0.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "../../dir-js/node_modules/pkg-dir/node_modules/p-locate": { - "version": "4.1.0", - "dev": true, - "license": "MIT", - "dependencies": { - "p-limit": "^2.2.0" - }, - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/postcss": { - "version": "8.5.6", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/postcss" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "nanoid": "^3.3.11", - "picocolors": "^1.1.1", - "source-map-js": "^1.2.1" - }, - "engines": { - "node": "^10 || ^12 || >=14" - } - }, - "../../dir-js/node_modules/prelude-ls": { - "version": "1.2.1", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">= 0.8.0" - } - }, - "../../dir-js/node_modules/protobufjs": { - "version": "7.5.4", - "hasInstallScript": true, - "license": "BSD-3-Clause", - "dependencies": { - "@protobufjs/aspromise": "^1.1.2", - "@protobufjs/base64": "^1.1.2", - "@protobufjs/codegen": "^2.0.4", - "@protobufjs/eventemitter": "^1.1.0", - "@protobufjs/fetch": "^1.1.0", - "@protobufjs/float": "^1.0.2", - "@protobufjs/inquire": "^1.1.0", - "@protobufjs/path": "^1.1.2", - "@protobufjs/pool": "^1.1.0", - "@protobufjs/utf8": "^1.1.0", - "@types/node": ">=13.7.0", - "long": "^5.0.0" - }, - "engines": { - "node": ">=12.0.0" - } - }, - "../../dir-js/node_modules/punycode": { - "version": "2.3.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "../../dir-js/node_modules/queue-microtask": { - "version": "1.2.3", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "../../dir-js/node_modules/require-directory": { - "version": "2.1.1", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "../../dir-js/node_modules/require-from-string": { - "version": "2.0.2", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "../../dir-js/node_modules/resolve": { - "version": "1.22.10", - "dev": true, - "license": "MIT", - "dependencies": { - "is-core-module": "^2.16.0", - "path-parse": "^1.0.7", - "supports-preserve-symlinks-flag": "^1.0.0" - }, - "bin": { - "resolve": "bin/resolve" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "../../dir-js/node_modules/resolve-from": { - "version": "4.0.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=4" - } - }, - "../../dir-js/node_modules/reusify": { - "version": "1.1.0", - "dev": true, - "license": "MIT", - "engines": { - "iojs": ">=1.0.0", - "node": ">=0.10.0" - } - }, - "../../dir-js/node_modules/rimraf": { - "version": "3.0.2", - "dev": true, - "license": "ISC", - "peer": true, - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "../../dir-js/node_modules/rollup": { - "version": "4.50.2", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/estree": "1.0.8" - }, - "bin": { - "rollup": "dist/bin/rollup" - }, - "engines": { - "node": ">=18.0.0", - "npm": ">=8.0.0" - }, - "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.50.2", - "@rollup/rollup-android-arm64": "4.50.2", - "@rollup/rollup-darwin-arm64": "4.50.2", - "@rollup/rollup-darwin-x64": "4.50.2", - "@rollup/rollup-freebsd-arm64": "4.50.2", - "@rollup/rollup-freebsd-x64": "4.50.2", - "@rollup/rollup-linux-arm-gnueabihf": "4.50.2", - "@rollup/rollup-linux-arm-musleabihf": "4.50.2", - "@rollup/rollup-linux-arm64-gnu": "4.50.2", - "@rollup/rollup-linux-arm64-musl": "4.50.2", - "@rollup/rollup-linux-loong64-gnu": "4.50.2", - "@rollup/rollup-linux-ppc64-gnu": "4.50.2", - "@rollup/rollup-linux-riscv64-gnu": "4.50.2", - "@rollup/rollup-linux-riscv64-musl": "4.50.2", - "@rollup/rollup-linux-s390x-gnu": "4.50.2", - "@rollup/rollup-linux-x64-gnu": "4.50.2", - "@rollup/rollup-linux-x64-musl": "4.50.2", - "@rollup/rollup-openharmony-arm64": "4.50.2", - "@rollup/rollup-win32-arm64-msvc": "4.50.2", - "@rollup/rollup-win32-ia32-msvc": "4.50.2", - "@rollup/rollup-win32-x64-msvc": "4.50.2", - "fsevents": "~2.3.2" - } - }, - "../../dir-js/node_modules/rollup-plugin-typescript2": { - "version": "0.36.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@rollup/pluginutils": "^4.1.2", - "find-cache-dir": "^3.3.2", - "fs-extra": "^10.0.0", - "semver": "^7.5.4", - "tslib": "^2.6.2" - }, - "peerDependencies": { - "rollup": ">=1.26.3", - "typescript": ">=2.4.0" - } - }, - "../../dir-js/node_modules/rollup-plugin-typescript2/node_modules/@rollup/pluginutils": { - "version": "4.2.1", - "dev": true, - "license": "MIT", - "dependencies": { - "estree-walker": "^2.0.1", - "picomatch": "^2.2.2" - }, - "engines": { - "node": ">= 8.0.0" - } - }, - "../../dir-js/node_modules/rollup-plugin-typescript2/node_modules/tslib": { - "version": "2.8.1", - "dev": true, - "license": "0BSD" - }, - "../../dir-js/node_modules/run-parallel": { - "version": "1.2.0", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT", - "dependencies": { - "queue-microtask": "^1.2.2" - } - }, - "../../dir-js/node_modules/semver": { - "version": "7.7.2", - "dev": true, - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "../../dir-js/node_modules/shebang-command": { - "version": "2.0.0", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/shebang-regex": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/siginfo": { - "version": "2.0.0", - "dev": true, - "license": "ISC" - }, - "../../dir-js/node_modules/source-map": { - "version": "0.6.1", - "dev": true, - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "../../dir-js/node_modules/source-map-js": { - "version": "1.2.1", - "dev": true, - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "../../dir-js/node_modules/sprintf-js": { - "version": "1.0.3", - "dev": true, - "license": "BSD-3-Clause" - }, - "../../dir-js/node_modules/stackback": { - "version": "0.0.2", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/std-env": { - "version": "3.9.0", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/string-argv": { - "version": "0.3.2", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.6.19" - } - }, - "../../dir-js/node_modules/string-width": { - "version": "4.2.3", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/strip-ansi": { - "version": "6.0.1", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/strip-json-comments": { - "version": "3.1.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "../../dir-js/node_modules/strip-literal": { - "version": "3.0.0", - "dev": true, - "license": "MIT", - "dependencies": { - "js-tokens": "^9.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/antfu" - } - }, - "../../dir-js/node_modules/strip-literal/node_modules/js-tokens": { - "version": "9.0.1", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/supports-color": { - "version": "7.2.0", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/supports-preserve-symlinks-flag": { - "version": "1.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "../../dir-js/node_modules/text-table": { - "version": "0.2.0", - "dev": true, - "license": "MIT", - "peer": true - }, - "../../dir-js/node_modules/tinybench": { - "version": "2.9.0", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/tinyexec": { - "version": "0.3.2", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/tinyglobby": { - "version": "0.2.15", - "dev": true, - "license": "MIT", - "dependencies": { - "fdir": "^6.5.0", - "picomatch": "^4.0.3" - }, - "engines": { - "node": ">=12.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/SuperchupuDev" - } - }, - "../../dir-js/node_modules/tinyglobby/node_modules/fdir": { - "version": "6.5.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12.0.0" - }, - "peerDependencies": { - "picomatch": "^3 || ^4" - }, - "peerDependenciesMeta": { - "picomatch": { - "optional": true - } - } - }, - "../../dir-js/node_modules/tinyglobby/node_modules/picomatch": { - "version": "4.0.3", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "../../dir-js/node_modules/tinypool": { - "version": "1.1.1", - "dev": true, - "license": "MIT", - "engines": { - "node": "^18.0.0 || >=20.0.0" - } - }, - "../../dir-js/node_modules/tinyrainbow": { - "version": "2.0.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14.0.0" - } - }, - "../../dir-js/node_modules/tinyspy": { - "version": "4.0.3", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14.0.0" - } - }, - "../../dir-js/node_modules/to-regex-range": { - "version": "5.0.1", - "dev": true, - "license": "MIT", - "dependencies": { - "is-number": "^7.0.0" - }, - "engines": { - "node": ">=8.0" - } - }, - "../../dir-js/node_modules/ts-api-utils": { - "version": "2.1.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=18.12" - }, - "peerDependencies": { - "typescript": ">=4.8.4" - } - }, - "../../dir-js/node_modules/ts-node": { - "version": "10.9.2", - "dev": true, - "license": "MIT", - "dependencies": { - "@cspotcode/source-map-support": "^0.8.0", - "@tsconfig/node10": "^1.0.7", - "@tsconfig/node12": "^1.0.7", - "@tsconfig/node14": "^1.0.0", - "@tsconfig/node16": "^1.0.2", - "acorn": "^8.4.1", - "acorn-walk": "^8.1.1", - "arg": "^4.1.0", - "create-require": "^1.1.0", - "diff": "^4.0.1", - "make-error": "^1.1.1", - "v8-compile-cache-lib": "^3.0.1", - "yn": "3.1.1" - }, - "bin": { - "ts-node": "dist/bin.js", - "ts-node-cwd": "dist/bin-cwd.js", - "ts-node-esm": "dist/bin-esm.js", - "ts-node-script": "dist/bin-script.js", - "ts-node-transpile-only": "dist/bin-transpile.js", - "ts-script": "dist/bin-script-deprecated.js" - }, - "peerDependencies": { - "@swc/core": ">=1.2.50", - "@swc/wasm": ">=1.2.50", - "@types/node": "*", - "typescript": ">=2.7" - }, - "peerDependenciesMeta": { - "@swc/core": { - "optional": true - }, - "@swc/wasm": { - "optional": true - } - } - }, - "../../dir-js/node_modules/type-check": { - "version": "0.4.0", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "prelude-ls": "^1.2.1" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "../../dir-js/node_modules/type-fest": { - "version": "0.20.2", - "dev": true, - "license": "(MIT OR CC0-1.0)", - "peer": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "../../dir-js/node_modules/typescript": { - "version": "5.9.2", - "dev": true, - "license": "Apache-2.0", - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=14.17" - } - }, - "../../dir-js/node_modules/typescript-eslint": { - "version": "8.44.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/eslint-plugin": "8.44.0", - "@typescript-eslint/parser": "8.44.0", - "@typescript-eslint/typescript-estree": "8.44.0", - "@typescript-eslint/utils": "8.44.0" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^8.57.0 || ^9.0.0", - "typescript": ">=4.8.4 <6.0.0" - } - }, - "../../dir-js/node_modules/typescript-eslint/node_modules/@typescript-eslint/eslint-plugin": { - "version": "8.44.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "8.44.0", - "@typescript-eslint/type-utils": "8.44.0", - "@typescript-eslint/utils": "8.44.0", - "@typescript-eslint/visitor-keys": "8.44.0", - "graphemer": "^1.4.0", - "ignore": "^7.0.0", - "natural-compare": "^1.4.0", - "ts-api-utils": "^2.1.0" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "@typescript-eslint/parser": "^8.44.0", - "eslint": "^8.57.0 || ^9.0.0", - "typescript": ">=4.8.4 <6.0.0" - } - }, - "../../dir-js/node_modules/typescript-eslint/node_modules/@typescript-eslint/parser": { - "version": "8.44.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/scope-manager": "8.44.0", - "@typescript-eslint/types": "8.44.0", - "@typescript-eslint/typescript-estree": "8.44.0", - "@typescript-eslint/visitor-keys": "8.44.0", - "debug": "^4.3.4" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^8.57.0 || ^9.0.0", - "typescript": ">=4.8.4 <6.0.0" - } - }, - "../../dir-js/node_modules/typescript-eslint/node_modules/@typescript-eslint/scope-manager": { - "version": "8.44.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/types": "8.44.0", - "@typescript-eslint/visitor-keys": "8.44.0" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "../../dir-js/node_modules/typescript-eslint/node_modules/@typescript-eslint/type-utils": { - "version": "8.44.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/types": "8.44.0", - "@typescript-eslint/typescript-estree": "8.44.0", - "@typescript-eslint/utils": "8.44.0", - "debug": "^4.3.4", - "ts-api-utils": "^2.1.0" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^8.57.0 || ^9.0.0", - "typescript": ">=4.8.4 <6.0.0" - } - }, - "../../dir-js/node_modules/typescript-eslint/node_modules/@typescript-eslint/types": { - "version": "8.44.0", - "dev": true, - "license": "MIT", - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "../../dir-js/node_modules/typescript-eslint/node_modules/@typescript-eslint/typescript-estree": { - "version": "8.44.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/project-service": "8.44.0", - "@typescript-eslint/tsconfig-utils": "8.44.0", - "@typescript-eslint/types": "8.44.0", - "@typescript-eslint/visitor-keys": "8.44.0", - "debug": "^4.3.4", - "fast-glob": "^3.3.2", - "is-glob": "^4.0.3", - "minimatch": "^9.0.4", - "semver": "^7.6.0", - "ts-api-utils": "^2.1.0" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "typescript": ">=4.8.4 <6.0.0" - } - }, - "../../dir-js/node_modules/typescript-eslint/node_modules/@typescript-eslint/utils": { - "version": "8.44.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@eslint-community/eslint-utils": "^4.7.0", - "@typescript-eslint/scope-manager": "8.44.0", - "@typescript-eslint/types": "8.44.0", - "@typescript-eslint/typescript-estree": "8.44.0" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^8.57.0 || ^9.0.0", - "typescript": ">=4.8.4 <6.0.0" - } - }, - "../../dir-js/node_modules/typescript-eslint/node_modules/@typescript-eslint/visitor-keys": { - "version": "8.44.0", - "dev": true, - "license": "MIT", - "dependencies": { - "@typescript-eslint/types": "8.44.0", - "eslint-visitor-keys": "^4.2.1" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "../../dir-js/node_modules/typescript-eslint/node_modules/brace-expansion": { - "version": "2.0.2", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "../../dir-js/node_modules/typescript-eslint/node_modules/eslint-visitor-keys": { - "version": "4.2.1", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "../../dir-js/node_modules/typescript-eslint/node_modules/ignore": { - "version": "7.0.5", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, - "../../dir-js/node_modules/typescript-eslint/node_modules/minimatch": { - "version": "9.0.5", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "../../dir-js/node_modules/undici-types": { - "version": "6.21.0", - "license": "MIT" - }, - "../../dir-js/node_modules/universalify": { - "version": "2.0.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, - "../../dir-js/node_modules/uri-js": { - "version": "4.4.1", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "punycode": "^2.1.0" - } - }, - "../../dir-js/node_modules/uuid": { - "version": "11.1.0", - "dev": true, - "funding": [ - "https://github.com/sponsors/broofa", - "https://github.com/sponsors/ctavan" - ], - "license": "MIT", - "bin": { - "uuid": "dist/esm/bin/uuid" - } - }, - "../../dir-js/node_modules/v8-compile-cache-lib": { - "version": "3.0.1", - "dev": true, - "license": "MIT" - }, - "../../dir-js/node_modules/vite": { - "version": "7.1.12", - "dev": true, - "license": "MIT", - "dependencies": { - "esbuild": "^0.25.0", - "fdir": "^6.5.0", - "picomatch": "^4.0.3", - "postcss": "^8.5.6", - "rollup": "^4.43.0", - "tinyglobby": "^0.2.15" - }, - "bin": { - "vite": "bin/vite.js" - }, - "engines": { - "node": "^20.19.0 || >=22.12.0" - }, - "funding": { - "url": "https://github.com/vitejs/vite?sponsor=1" - }, - "optionalDependencies": { - "fsevents": "~2.3.3" - }, - "peerDependencies": { - "@types/node": "^20.19.0 || >=22.12.0", - "jiti": ">=1.21.0", - "less": "^4.0.0", - "lightningcss": "^1.21.0", - "sass": "^1.70.0", - "sass-embedded": "^1.70.0", - "stylus": ">=0.54.8", - "sugarss": "^5.0.0", - "terser": "^5.16.0", - "tsx": "^4.8.1", - "yaml": "^2.4.2" - }, - "peerDependenciesMeta": { - "@types/node": { - "optional": true - }, - "jiti": { - "optional": true - }, - "less": { - "optional": true - }, - "lightningcss": { - "optional": true - }, - "sass": { - "optional": true - }, - "sass-embedded": { - "optional": true - }, - "stylus": { - "optional": true - }, - "sugarss": { - "optional": true - }, - "terser": { - "optional": true - }, - "tsx": { - "optional": true - }, - "yaml": { - "optional": true - } - } - }, - "../../dir-js/node_modules/vite-node": { - "version": "3.2.4", - "dev": true, - "license": "MIT", - "dependencies": { - "cac": "^6.7.14", - "debug": "^4.4.1", - "es-module-lexer": "^1.7.0", - "pathe": "^2.0.3", - "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" - }, - "bin": { - "vite-node": "vite-node.mjs" - }, - "engines": { - "node": "^18.0.0 || ^20.0.0 || >=22.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "../../dir-js/node_modules/vite/node_modules/fdir": { - "version": "6.5.0", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12.0.0" - }, - "peerDependencies": { - "picomatch": "^3 || ^4" - }, - "peerDependenciesMeta": { - "picomatch": { - "optional": true - } - } - }, - "../../dir-js/node_modules/vite/node_modules/picomatch": { - "version": "4.0.3", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "../../dir-js/node_modules/vitest": { - "version": "3.2.4", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/chai": "^5.2.2", - "@vitest/expect": "3.2.4", - "@vitest/mocker": "3.2.4", - "@vitest/pretty-format": "^3.2.4", - "@vitest/runner": "3.2.4", - "@vitest/snapshot": "3.2.4", - "@vitest/spy": "3.2.4", - "@vitest/utils": "3.2.4", - "chai": "^5.2.0", - "debug": "^4.4.1", - "expect-type": "^1.2.1", - "magic-string": "^0.30.17", - "pathe": "^2.0.3", - "picomatch": "^4.0.2", - "std-env": "^3.9.0", - "tinybench": "^2.9.0", - "tinyexec": "^0.3.2", - "tinyglobby": "^0.2.14", - "tinypool": "^1.1.1", - "tinyrainbow": "^2.0.0", - "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0", - "vite-node": "3.2.4", - "why-is-node-running": "^2.3.0" - }, - "bin": { - "vitest": "vitest.mjs" - }, - "engines": { - "node": "^18.0.0 || ^20.0.0 || >=22.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - }, - "peerDependencies": { - "@edge-runtime/vm": "*", - "@types/debug": "^4.1.12", - "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", - "@vitest/browser": "3.2.4", - "@vitest/ui": "3.2.4", - "happy-dom": "*", - "jsdom": "*" - }, - "peerDependenciesMeta": { - "@edge-runtime/vm": { - "optional": true - }, - "@types/debug": { - "optional": true - }, - "@types/node": { - "optional": true - }, - "@vitest/browser": { - "optional": true - }, - "@vitest/ui": { - "optional": true - }, - "happy-dom": { - "optional": true - }, - "jsdom": { - "optional": true - } - } - }, - "../../dir-js/node_modules/vitest/node_modules/picomatch": { - "version": "4.0.3", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "../../dir-js/node_modules/which": { - "version": "2.0.2", - "dev": true, - "license": "ISC", - "peer": true, - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, - "../../dir-js/node_modules/why-is-node-running": { - "version": "2.3.0", - "dev": true, - "license": "MIT", - "dependencies": { - "siginfo": "^2.0.0", - "stackback": "0.0.2" - }, - "bin": { - "why-is-node-running": "cli.js" - }, - "engines": { - "node": ">=8" - } - }, - "../../dir-js/node_modules/word-wrap": { - "version": "1.2.5", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=0.10.0" - } - }, - "../../dir-js/node_modules/wrap-ansi": { - "version": "7.0.0", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "../../dir-js/node_modules/wrappy": { - "version": "1.0.2", - "dev": true, - "license": "ISC", - "peer": true - }, - "../../dir-js/node_modules/y18n": { - "version": "5.0.8", - "license": "ISC", - "engines": { - "node": ">=10" - } - }, - "../../dir-js/node_modules/yallist": { - "version": "4.0.0", - "dev": true, - "license": "ISC" - }, - "../../dir-js/node_modules/yargs": { - "version": "17.7.2", - "license": "MIT", - "dependencies": { - "cliui": "^8.0.1", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.3", - "y18n": "^5.0.5", - "yargs-parser": "^21.1.1" - }, - "engines": { - "node": ">=12" - } - }, - "../../dir-js/node_modules/yargs/node_modules/yargs-parser": { - "version": "21.1.1", - "license": "ISC", - "engines": { - "node": ">=12" - } - }, - "../../dir-js/node_modules/yn": { - "version": "3.1.1", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "../../dir-js/node_modules/yocto-queue": { - "version": "0.1.0", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/agntcy-dir": { - "resolved": "../../dir-js", - "link": true - } - } +{ + "name": "dir-example", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "dir-example", + "version": "0.0.0", + "dependencies": { + "agntcy-dir": "file:../../dir-js" + } + }, + "../../dir-js": { + "name": "agntcy-dir", + "version": "0.4.0", + "license": "Apache-2.0", + "dependencies": { + "@buf/agntcy_dir.bufbuild_es": "^2.9.0-20251007090412-102a9af80c74.1", + "@bufbuild/protobuf": "^2.8.0", + "@connectrpc/connect": "^2.1.0", + "@connectrpc/connect-node": "^2.1.0", + "@grpc/grpc-js": "^1.13.4", + "spiffe": "^0.4.0" + }, + "devDependencies": { + "@microsoft/api-extractor": "^7.52.13", + "@rollup/plugin-json": "^6.1.0", + "@rollup/plugin-node-resolve": "^16.0.1", + "@types/node": "^22.7.5", + "@types/uuid": "^10.0.0", + "rollup-plugin-typescript2": "^0.36.0", + "ts-node": "^10.9.2", + "typescript": "^5.9.2", + "typescript-eslint": "^8.44.0", + "uuid": "^11.1.0", + "vitest": "^3.2.4" + }, + "engines": { + "node": ">=20.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-linux-x64-gnu": "4.50.2" + } + }, + "../../dir-js/node_modules/@buf/agntcy_dir.bufbuild_es": { + "version": "2.8.0-20250915154105-152c4b7b4ec6.1", + "peerDependencies": { + "@bufbuild/protobuf": "^2.8.0" + } + }, + "../../dir-js/node_modules/@bufbuild/protobuf": { + "version": "2.8.0", + "license": "(Apache-2.0 AND BSD-3-Clause)" + }, + "../../dir-js/node_modules/@connectrpc/connect": { + "version": "2.1.0", + "license": "Apache-2.0", + "peerDependencies": { + "@bufbuild/protobuf": "^2.7.0" + } + }, + "../../dir-js/node_modules/@connectrpc/connect-node": { + "version": "2.1.0", + "license": "Apache-2.0", + "engines": { + "node": ">=20" + }, + "peerDependencies": { + "@bufbuild/protobuf": "^2.7.0", + "@connectrpc/connect": "2.1.0" + } + }, + "../../dir-js/node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "engines": { + "node": ">=12" + } + }, + "../../dir-js/node_modules/@esbuild/darwin-arm64": { + "version": "0.25.9", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "../../dir-js/node_modules/@eslint-community/eslint-utils": { + "version": "4.9.0", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "../../dir-js/node_modules/@eslint-community/regexpp": { + "version": "4.12.1", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "../../dir-js/node_modules/@eslint/eslintrc": { + "version": "2.1.4", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "../../dir-js/node_modules/@eslint/js": { + "version": "8.57.1", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "../../dir-js/node_modules/@grpc/grpc-js": { + "version": "1.13.4", + "license": "Apache-2.0", + "dependencies": { + "@grpc/proto-loader": "^0.7.13", + "@js-sdsl/ordered-map": "^4.4.2" + }, + "engines": { + "node": ">=12.10.0" + } + }, + "../../dir-js/node_modules/@grpc/proto-loader": { + "version": "0.7.15", + "license": "Apache-2.0", + "dependencies": { + "lodash.camelcase": "^4.3.0", + "long": "^5.0.0", + "protobufjs": "^7.2.5", + "yargs": "^17.7.2" + }, + "bin": { + "proto-loader-gen-types": "build/bin/proto-loader-gen-types.js" + }, + "engines": { + "node": ">=6" + } + }, + "../../dir-js/node_modules/@humanwhocodes/config-array": { + "version": "0.13.0", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "@humanwhocodes/object-schema": "^2.0.3", + "debug": "^4.3.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "../../dir-js/node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "../../dir-js/node_modules/@humanwhocodes/object-schema": { + "version": "2.0.3", + "dev": true, + "license": "BSD-3-Clause", + "peer": true + }, + "../../dir-js/node_modules/@isaacs/balanced-match": { + "version": "4.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": "20 || >=22" + } + }, + "../../dir-js/node_modules/@isaacs/brace-expansion": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@isaacs/balanced-match": "^4.0.1" + }, + "engines": { + "node": "20 || >=22" + } + }, + "../../dir-js/node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "../../dir-js/node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, + "../../dir-js/node_modules/@js-sdsl/ordered-map": { + "version": "4.4.2", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/js-sdsl" + } + }, + "../../dir-js/node_modules/@microsoft/api-extractor": { + "version": "7.52.13", + "dev": true, + "license": "MIT", + "dependencies": { + "@microsoft/api-extractor-model": "7.30.7", + "@microsoft/tsdoc": "~0.15.1", + "@microsoft/tsdoc-config": "~0.17.1", + "@rushstack/node-core-library": "5.14.0", + "@rushstack/rig-package": "0.5.3", + "@rushstack/terminal": "0.16.0", + "@rushstack/ts-command-line": "5.0.3", + "lodash": "~4.17.15", + "minimatch": "10.0.3", + "resolve": "~1.22.1", + "semver": "~7.5.4", + "source-map": "~0.6.1", + "typescript": "5.8.2" + }, + "bin": { + "api-extractor": "bin/api-extractor" + } + }, + "../../dir-js/node_modules/@microsoft/api-extractor-model": { + "version": "7.30.7", + "dev": true, + "license": "MIT", + "dependencies": { + "@microsoft/tsdoc": "~0.15.1", + "@microsoft/tsdoc-config": "~0.17.1", + "@rushstack/node-core-library": "5.14.0" + } + }, + "../../dir-js/node_modules/@microsoft/api-extractor/node_modules/minimatch": { + "version": "10.0.3", + "dev": true, + "license": "ISC", + "dependencies": { + "@isaacs/brace-expansion": "^5.0.0" + }, + "engines": { + "node": "20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "../../dir-js/node_modules/@microsoft/api-extractor/node_modules/semver": { + "version": "7.5.4", + "dev": true, + "license": "ISC", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "../../dir-js/node_modules/@microsoft/api-extractor/node_modules/typescript": { + "version": "5.8.2", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "../../dir-js/node_modules/@microsoft/tsdoc": { + "version": "0.15.1", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/@microsoft/tsdoc-config": { + "version": "0.17.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@microsoft/tsdoc": "0.15.1", + "ajv": "~8.12.0", + "jju": "~1.4.0", + "resolve": "~1.22.2" + } + }, + "../../dir-js/node_modules/@microsoft/tsdoc-config/node_modules/ajv": { + "version": "8.12.0", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "../../dir-js/node_modules/@microsoft/tsdoc-config/node_modules/json-schema-traverse": { + "version": "1.0.0", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "../../dir-js/node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "../../dir-js/node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "../../dir-js/node_modules/@protobufjs/aspromise": { + "version": "1.1.2", + "license": "BSD-3-Clause" + }, + "../../dir-js/node_modules/@protobufjs/base64": { + "version": "1.1.2", + "license": "BSD-3-Clause" + }, + "../../dir-js/node_modules/@protobufjs/codegen": { + "version": "2.0.4", + "license": "BSD-3-Clause" + }, + "../../dir-js/node_modules/@protobufjs/eventemitter": { + "version": "1.1.0", + "license": "BSD-3-Clause" + }, + "../../dir-js/node_modules/@protobufjs/fetch": { + "version": "1.1.0", + "license": "BSD-3-Clause", + "dependencies": { + "@protobufjs/aspromise": "^1.1.1", + "@protobufjs/inquire": "^1.1.0" + } + }, + "../../dir-js/node_modules/@protobufjs/float": { + "version": "1.0.2", + "license": "BSD-3-Clause" + }, + "../../dir-js/node_modules/@protobufjs/inquire": { + "version": "1.1.0", + "license": "BSD-3-Clause" + }, + "../../dir-js/node_modules/@protobufjs/path": { + "version": "1.1.2", + "license": "BSD-3-Clause" + }, + "../../dir-js/node_modules/@protobufjs/pool": { + "version": "1.1.0", + "license": "BSD-3-Clause" + }, + "../../dir-js/node_modules/@protobufjs/utf8": { + "version": "1.1.0", + "license": "BSD-3-Clause" + }, + "../../dir-js/node_modules/@rollup/plugin-json": { + "version": "6.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@rollup/pluginutils": "^5.1.0" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } + } + }, + "../../dir-js/node_modules/@rollup/plugin-node-resolve": { + "version": "16.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@rollup/pluginutils": "^5.0.1", + "@types/resolve": "1.20.2", + "deepmerge": "^4.2.2", + "is-module": "^1.0.0", + "resolve": "^1.22.1" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "rollup": "^2.78.0||^3.0.0||^4.0.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } + } + }, + "../../dir-js/node_modules/@rollup/pluginutils": { + "version": "5.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-walker": "^2.0.2", + "picomatch": "^4.0.2" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } + } + }, + "../../dir-js/node_modules/@rollup/pluginutils/node_modules/picomatch": { + "version": "4.0.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "../../dir-js/node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.50.2", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "../../dir-js/node_modules/@rushstack/node-core-library": { + "version": "5.14.0", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "~8.13.0", + "ajv-draft-04": "~1.0.0", + "ajv-formats": "~3.0.1", + "fs-extra": "~11.3.0", + "import-lazy": "~4.0.0", + "jju": "~1.4.0", + "resolve": "~1.22.1", + "semver": "~7.5.4" + }, + "peerDependencies": { + "@types/node": "*" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "../../dir-js/node_modules/@rushstack/node-core-library/node_modules/ajv": { + "version": "8.13.0", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.4.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "../../dir-js/node_modules/@rushstack/node-core-library/node_modules/ajv-draft-04": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "peerDependencies": { + "ajv": "^8.5.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "../../dir-js/node_modules/@rushstack/node-core-library/node_modules/fs-extra": { + "version": "11.3.2", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=14.14" + } + }, + "../../dir-js/node_modules/@rushstack/node-core-library/node_modules/json-schema-traverse": { + "version": "1.0.0", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/@rushstack/node-core-library/node_modules/semver": { + "version": "7.5.4", + "dev": true, + "license": "ISC", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "../../dir-js/node_modules/@rushstack/rig-package": { + "version": "0.5.3", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve": "~1.22.1", + "strip-json-comments": "~3.1.1" + } + }, + "../../dir-js/node_modules/@rushstack/terminal": { + "version": "0.16.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@rushstack/node-core-library": "5.14.0", + "supports-color": "~8.1.1" + }, + "peerDependencies": { + "@types/node": "*" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "../../dir-js/node_modules/@rushstack/terminal/node_modules/supports-color": { + "version": "8.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "../../dir-js/node_modules/@rushstack/ts-command-line": { + "version": "5.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@rushstack/terminal": "0.16.0", + "@types/argparse": "1.0.38", + "argparse": "~1.0.9", + "string-argv": "~0.3.1" + } + }, + "../../dir-js/node_modules/@rushstack/ts-command-line/node_modules/argparse": { + "version": "1.0.10", + "dev": true, + "license": "MIT", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "../../dir-js/node_modules/@tsconfig/node10": { + "version": "1.0.11", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/@tsconfig/node12": { + "version": "1.0.11", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/@tsconfig/node14": { + "version": "1.0.3", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/@tsconfig/node16": { + "version": "1.0.4", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/@types/argparse": { + "version": "1.0.38", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/@types/chai": { + "version": "5.2.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*" + } + }, + "../../dir-js/node_modules/@types/deep-eql": { + "version": "4.0.2", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/@types/estree": { + "version": "1.0.8", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/@types/node": { + "version": "22.18.4", + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "../../dir-js/node_modules/@types/resolve": { + "version": "1.20.2", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/@types/uuid": { + "version": "10.0.0", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/@typescript-eslint/project-service": { + "version": "8.44.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/tsconfig-utils": "^8.44.0", + "@typescript-eslint/types": "^8.44.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "../../dir-js/node_modules/@typescript-eslint/project-service/node_modules/@typescript-eslint/types": { + "version": "8.44.0", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "../../dir-js/node_modules/@typescript-eslint/tsconfig-utils": { + "version": "8.44.0", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "../../dir-js/node_modules/@ungap/structured-clone": { + "version": "1.3.0", + "dev": true, + "license": "ISC", + "peer": true + }, + "../../dir-js/node_modules/@vitest/expect": { + "version": "3.2.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/chai": "^5.2.2", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "../../dir-js/node_modules/@vitest/mocker": { + "version": "3.2.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "3.2.4", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.17" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "../../dir-js/node_modules/@vitest/mocker/node_modules/estree-walker": { + "version": "3.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "../../dir-js/node_modules/@vitest/pretty-format": { + "version": "3.2.4", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "../../dir-js/node_modules/@vitest/runner": { + "version": "3.2.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/utils": "3.2.4", + "pathe": "^2.0.3", + "strip-literal": "^3.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "../../dir-js/node_modules/@vitest/snapshot": { + "version": "3.2.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "magic-string": "^0.30.17", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "../../dir-js/node_modules/@vitest/spy": { + "version": "3.2.4", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyspy": "^4.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "../../dir-js/node_modules/@vitest/utils": { + "version": "3.2.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "3.2.4", + "loupe": "^3.1.4", + "tinyrainbow": "^2.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "../../dir-js/node_modules/acorn": { + "version": "8.15.0", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "../../dir-js/node_modules/acorn-jsx": { + "version": "5.3.2", + "dev": true, + "license": "MIT", + "peer": true, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "../../dir-js/node_modules/acorn-walk": { + "version": "8.3.4", + "dev": true, + "license": "MIT", + "dependencies": { + "acorn": "^8.11.0" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "../../dir-js/node_modules/ajv": { + "version": "6.12.6", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "../../dir-js/node_modules/ajv-formats": { + "version": "3.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "../../dir-js/node_modules/ajv-formats/node_modules/ajv": { + "version": "8.17.1", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "../../dir-js/node_modules/ajv-formats/node_modules/json-schema-traverse": { + "version": "1.0.0", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/ansi-regex": { + "version": "5.0.1", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/ansi-styles": { + "version": "4.3.0", + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "../../dir-js/node_modules/arg": { + "version": "4.1.3", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/argparse": { + "version": "2.0.1", + "dev": true, + "license": "Python-2.0", + "peer": true + }, + "../../dir-js/node_modules/assertion-error": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "../../dir-js/node_modules/balanced-match": { + "version": "1.0.2", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/brace-expansion": { + "version": "1.1.12", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "../../dir-js/node_modules/braces": { + "version": "3.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/cac": { + "version": "6.7.14", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/callsites": { + "version": "3.1.0", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=6" + } + }, + "../../dir-js/node_modules/chai": { + "version": "5.3.3", + "dev": true, + "license": "MIT", + "dependencies": { + "assertion-error": "^2.0.1", + "check-error": "^2.1.1", + "deep-eql": "^5.0.1", + "loupe": "^3.1.0", + "pathval": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "../../dir-js/node_modules/chalk": { + "version": "4.1.2", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "../../dir-js/node_modules/check-error": { + "version": "2.1.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 16" + } + }, + "../../dir-js/node_modules/cliui": { + "version": "8.0.1", + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "../../dir-js/node_modules/color-convert": { + "version": "2.0.1", + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "../../dir-js/node_modules/color-name": { + "version": "1.1.4", + "license": "MIT" + }, + "../../dir-js/node_modules/commondir": { + "version": "1.0.1", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/concat-map": { + "version": "0.0.1", + "dev": true, + "license": "MIT", + "peer": true + }, + "../../dir-js/node_modules/create-require": { + "version": "1.1.1", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/cross-spawn": { + "version": "7.0.6", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "../../dir-js/node_modules/debug": { + "version": "4.4.3", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "../../dir-js/node_modules/deep-eql": { + "version": "5.0.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "../../dir-js/node_modules/deep-is": { + "version": "0.1.4", + "dev": true, + "license": "MIT", + "peer": true + }, + "../../dir-js/node_modules/deepmerge": { + "version": "4.3.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "../../dir-js/node_modules/diff": { + "version": "4.0.2", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "../../dir-js/node_modules/doctrine": { + "version": "3.0.0", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "../../dir-js/node_modules/emoji-regex": { + "version": "8.0.0", + "license": "MIT" + }, + "../../dir-js/node_modules/es-module-lexer": { + "version": "1.7.0", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/esbuild": { + "version": "0.25.9", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.25.9", + "@esbuild/android-arm": "0.25.9", + "@esbuild/android-arm64": "0.25.9", + "@esbuild/android-x64": "0.25.9", + "@esbuild/darwin-arm64": "0.25.9", + "@esbuild/darwin-x64": "0.25.9", + "@esbuild/freebsd-arm64": "0.25.9", + "@esbuild/freebsd-x64": "0.25.9", + "@esbuild/linux-arm": "0.25.9", + "@esbuild/linux-arm64": "0.25.9", + "@esbuild/linux-ia32": "0.25.9", + "@esbuild/linux-loong64": "0.25.9", + "@esbuild/linux-mips64el": "0.25.9", + "@esbuild/linux-ppc64": "0.25.9", + "@esbuild/linux-riscv64": "0.25.9", + "@esbuild/linux-s390x": "0.25.9", + "@esbuild/linux-x64": "0.25.9", + "@esbuild/netbsd-arm64": "0.25.9", + "@esbuild/netbsd-x64": "0.25.9", + "@esbuild/openbsd-arm64": "0.25.9", + "@esbuild/openbsd-x64": "0.25.9", + "@esbuild/openharmony-arm64": "0.25.9", + "@esbuild/sunos-x64": "0.25.9", + "@esbuild/win32-arm64": "0.25.9", + "@esbuild/win32-ia32": "0.25.9", + "@esbuild/win32-x64": "0.25.9" + } + }, + "../../dir-js/node_modules/escalade": { + "version": "3.2.0", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "../../dir-js/node_modules/escape-string-regexp": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "../../dir-js/node_modules/eslint": { + "version": "8.57.1", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.57.1", + "@humanwhocodes/config-array": "^0.13.0", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "../../dir-js/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "../../dir-js/node_modules/eslint/node_modules/eslint-scope": { + "version": "7.2.2", + "dev": true, + "license": "BSD-2-Clause", + "peer": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "../../dir-js/node_modules/eslint/node_modules/estraverse": { + "version": "5.3.0", + "dev": true, + "license": "BSD-2-Clause", + "peer": true, + "engines": { + "node": ">=4.0" + } + }, + "../../dir-js/node_modules/espree": { + "version": "9.6.1", + "dev": true, + "license": "BSD-2-Clause", + "peer": true, + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "../../dir-js/node_modules/esquery": { + "version": "1.6.0", + "dev": true, + "license": "BSD-3-Clause", + "peer": true, + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "../../dir-js/node_modules/esquery/node_modules/estraverse": { + "version": "5.3.0", + "dev": true, + "license": "BSD-2-Clause", + "peer": true, + "engines": { + "node": ">=4.0" + } + }, + "../../dir-js/node_modules/esrecurse": { + "version": "4.3.0", + "dev": true, + "license": "BSD-2-Clause", + "peer": true, + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "../../dir-js/node_modules/esrecurse/node_modules/estraverse": { + "version": "5.3.0", + "dev": true, + "license": "BSD-2-Clause", + "peer": true, + "engines": { + "node": ">=4.0" + } + }, + "../../dir-js/node_modules/estree-walker": { + "version": "2.0.2", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/esutils": { + "version": "2.0.3", + "dev": true, + "license": "BSD-2-Clause", + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "../../dir-js/node_modules/expect-type": { + "version": "1.2.2", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.0.0" + } + }, + "../../dir-js/node_modules/fast-deep-equal": { + "version": "3.1.3", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/fast-glob": { + "version": "3.3.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "../../dir-js/node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "../../dir-js/node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "peer": true + }, + "../../dir-js/node_modules/fast-levenshtein": { + "version": "2.0.6", + "dev": true, + "license": "MIT", + "peer": true + }, + "../../dir-js/node_modules/fast-uri": { + "version": "3.1.0", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ], + "license": "BSD-3-Clause" + }, + "../../dir-js/node_modules/fastq": { + "version": "1.19.1", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "../../dir-js/node_modules/file-entry-cache": { + "version": "6.0.1", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "../../dir-js/node_modules/fill-range": { + "version": "7.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/find-cache-dir": { + "version": "3.3.2", + "dev": true, + "license": "MIT", + "dependencies": { + "commondir": "^1.0.1", + "make-dir": "^3.0.2", + "pkg-dir": "^4.1.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/avajs/find-cache-dir?sponsor=1" + } + }, + "../../dir-js/node_modules/find-up": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "../../dir-js/node_modules/flat-cache": { + "version": "3.2.0", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "../../dir-js/node_modules/flatted": { + "version": "3.3.3", + "dev": true, + "license": "ISC", + "peer": true + }, + "../../dir-js/node_modules/fs-extra": { + "version": "10.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "../../dir-js/node_modules/fs.realpath": { + "version": "1.0.0", + "dev": true, + "license": "ISC", + "peer": true + }, + "../../dir-js/node_modules/fsevents": { + "version": "2.3.3", + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "../../dir-js/node_modules/function-bind": { + "version": "1.1.2", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "../../dir-js/node_modules/get-caller-file": { + "version": "2.0.5", + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "../../dir-js/node_modules/glob": { + "version": "7.2.3", + "dev": true, + "license": "ISC", + "peer": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "../../dir-js/node_modules/glob-parent": { + "version": "6.0.2", + "dev": true, + "license": "ISC", + "peer": true, + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "../../dir-js/node_modules/globals": { + "version": "13.24.0", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "../../dir-js/node_modules/graceful-fs": { + "version": "4.2.11", + "dev": true, + "license": "ISC" + }, + "../../dir-js/node_modules/graphemer": { + "version": "1.4.0", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/has-flag": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/hasown": { + "version": "2.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "../../dir-js/node_modules/ignore": { + "version": "5.3.2", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">= 4" + } + }, + "../../dir-js/node_modules/import-fresh": { + "version": "3.3.1", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "../../dir-js/node_modules/import-lazy": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/imurmurhash": { + "version": "0.1.4", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=0.8.19" + } + }, + "../../dir-js/node_modules/inflight": { + "version": "1.0.6", + "dev": true, + "license": "ISC", + "peer": true, + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "../../dir-js/node_modules/inherits": { + "version": "2.0.4", + "dev": true, + "license": "ISC", + "peer": true + }, + "../../dir-js/node_modules/is-core-module": { + "version": "2.16.1", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "../../dir-js/node_modules/is-extglob": { + "version": "2.1.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "../../dir-js/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/is-glob": { + "version": "4.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "../../dir-js/node_modules/is-module": { + "version": "1.0.0", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/is-number": { + "version": "7.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "../../dir-js/node_modules/is-path-inside": { + "version": "3.0.3", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/isexe": { + "version": "2.0.0", + "dev": true, + "license": "ISC", + "peer": true + }, + "../../dir-js/node_modules/jju": { + "version": "1.4.0", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "../../dir-js/node_modules/json-buffer": { + "version": "3.0.1", + "dev": true, + "license": "MIT", + "peer": true + }, + "../../dir-js/node_modules/json-schema-traverse": { + "version": "0.4.1", + "dev": true, + "license": "MIT", + "peer": true + }, + "../../dir-js/node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "peer": true + }, + "../../dir-js/node_modules/jsonfile": { + "version": "6.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "../../dir-js/node_modules/keyv": { + "version": "4.5.4", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "../../dir-js/node_modules/levn": { + "version": "0.4.1", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "../../dir-js/node_modules/locate-path": { + "version": "6.0.0", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "../../dir-js/node_modules/lodash": { + "version": "4.17.21", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/lodash.camelcase": { + "version": "4.3.0", + "license": "MIT" + }, + "../../dir-js/node_modules/lodash.merge": { + "version": "4.6.2", + "dev": true, + "license": "MIT", + "peer": true + }, + "../../dir-js/node_modules/long": { + "version": "5.3.2", + "license": "Apache-2.0" + }, + "../../dir-js/node_modules/loupe": { + "version": "3.2.1", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/lru-cache": { + "version": "6.0.0", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "../../dir-js/node_modules/magic-string": { + "version": "0.30.19", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "../../dir-js/node_modules/make-dir": { + "version": "3.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^6.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "../../dir-js/node_modules/make-dir/node_modules/semver": { + "version": "6.3.1", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "../../dir-js/node_modules/make-error": { + "version": "1.3.6", + "dev": true, + "license": "ISC" + }, + "../../dir-js/node_modules/merge2": { + "version": "1.4.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "../../dir-js/node_modules/micromatch": { + "version": "4.0.8", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "../../dir-js/node_modules/minimatch": { + "version": "3.1.2", + "dev": true, + "license": "ISC", + "peer": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "../../dir-js/node_modules/ms": { + "version": "2.1.3", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/nanoid": { + "version": "3.3.11", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "../../dir-js/node_modules/natural-compare": { + "version": "1.4.0", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/once": { + "version": "1.4.0", + "dev": true, + "license": "ISC", + "peer": true, + "dependencies": { + "wrappy": "1" + } + }, + "../../dir-js/node_modules/optionator": { + "version": "0.9.4", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "../../dir-js/node_modules/p-limit": { + "version": "3.1.0", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "../../dir-js/node_modules/p-locate": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "../../dir-js/node_modules/p-try": { + "version": "2.2.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "../../dir-js/node_modules/parent-module": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "../../dir-js/node_modules/path-exists": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/path-is-absolute": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "../../dir-js/node_modules/path-key": { + "version": "3.1.1", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/path-parse": { + "version": "1.0.7", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/pathe": { + "version": "2.0.3", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/pathval": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.16" + } + }, + "../../dir-js/node_modules/picocolors": { + "version": "1.1.1", + "dev": true, + "license": "ISC" + }, + "../../dir-js/node_modules/picomatch": { + "version": "2.3.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "../../dir-js/node_modules/pkg-dir": { + "version": "4.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/pkg-dir/node_modules/find-up": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/pkg-dir/node_modules/locate-path": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/pkg-dir/node_modules/p-limit": { + "version": "2.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "../../dir-js/node_modules/pkg-dir/node_modules/p-locate": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/postcss": { + "version": "8.5.6", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "../../dir-js/node_modules/prelude-ls": { + "version": "1.2.1", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "../../dir-js/node_modules/protobufjs": { + "version": "7.5.4", + "hasInstallScript": true, + "license": "BSD-3-Clause", + "dependencies": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.4", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.0", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.0", + "@types/node": ">=13.7.0", + "long": "^5.0.0" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "../../dir-js/node_modules/punycode": { + "version": "2.3.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "../../dir-js/node_modules/queue-microtask": { + "version": "1.2.3", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "../../dir-js/node_modules/require-directory": { + "version": "2.1.1", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "../../dir-js/node_modules/require-from-string": { + "version": "2.0.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "../../dir-js/node_modules/resolve": { + "version": "1.22.10", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "../../dir-js/node_modules/resolve-from": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=4" + } + }, + "../../dir-js/node_modules/reusify": { + "version": "1.1.0", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "../../dir-js/node_modules/rimraf": { + "version": "3.0.2", + "dev": true, + "license": "ISC", + "peer": true, + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "../../dir-js/node_modules/rollup": { + "version": "4.50.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.50.2", + "@rollup/rollup-android-arm64": "4.50.2", + "@rollup/rollup-darwin-arm64": "4.50.2", + "@rollup/rollup-darwin-x64": "4.50.2", + "@rollup/rollup-freebsd-arm64": "4.50.2", + "@rollup/rollup-freebsd-x64": "4.50.2", + "@rollup/rollup-linux-arm-gnueabihf": "4.50.2", + "@rollup/rollup-linux-arm-musleabihf": "4.50.2", + "@rollup/rollup-linux-arm64-gnu": "4.50.2", + "@rollup/rollup-linux-arm64-musl": "4.50.2", + "@rollup/rollup-linux-loong64-gnu": "4.50.2", + "@rollup/rollup-linux-ppc64-gnu": "4.50.2", + "@rollup/rollup-linux-riscv64-gnu": "4.50.2", + "@rollup/rollup-linux-riscv64-musl": "4.50.2", + "@rollup/rollup-linux-s390x-gnu": "4.50.2", + "@rollup/rollup-linux-x64-gnu": "4.50.2", + "@rollup/rollup-linux-x64-musl": "4.50.2", + "@rollup/rollup-openharmony-arm64": "4.50.2", + "@rollup/rollup-win32-arm64-msvc": "4.50.2", + "@rollup/rollup-win32-ia32-msvc": "4.50.2", + "@rollup/rollup-win32-x64-msvc": "4.50.2", + "fsevents": "~2.3.2" + } + }, + "../../dir-js/node_modules/rollup-plugin-typescript2": { + "version": "0.36.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@rollup/pluginutils": "^4.1.2", + "find-cache-dir": "^3.3.2", + "fs-extra": "^10.0.0", + "semver": "^7.5.4", + "tslib": "^2.6.2" + }, + "peerDependencies": { + "rollup": ">=1.26.3", + "typescript": ">=2.4.0" + } + }, + "../../dir-js/node_modules/rollup-plugin-typescript2/node_modules/@rollup/pluginutils": { + "version": "4.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "estree-walker": "^2.0.1", + "picomatch": "^2.2.2" + }, + "engines": { + "node": ">= 8.0.0" + } + }, + "../../dir-js/node_modules/rollup-plugin-typescript2/node_modules/tslib": { + "version": "2.8.1", + "dev": true, + "license": "0BSD" + }, + "../../dir-js/node_modules/run-parallel": { + "version": "1.2.0", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "../../dir-js/node_modules/semver": { + "version": "7.7.2", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "../../dir-js/node_modules/shebang-command": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/shebang-regex": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/siginfo": { + "version": "2.0.0", + "dev": true, + "license": "ISC" + }, + "../../dir-js/node_modules/source-map": { + "version": "0.6.1", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "../../dir-js/node_modules/source-map-js": { + "version": "1.2.1", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "../../dir-js/node_modules/sprintf-js": { + "version": "1.0.3", + "dev": true, + "license": "BSD-3-Clause" + }, + "../../dir-js/node_modules/stackback": { + "version": "0.0.2", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/std-env": { + "version": "3.9.0", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/string-argv": { + "version": "0.3.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.6.19" + } + }, + "../../dir-js/node_modules/string-width": { + "version": "4.2.3", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/strip-ansi": { + "version": "6.0.1", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/strip-json-comments": { + "version": "3.1.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "../../dir-js/node_modules/strip-literal": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "js-tokens": "^9.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "../../dir-js/node_modules/strip-literal/node_modules/js-tokens": { + "version": "9.0.1", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/supports-color": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "../../dir-js/node_modules/text-table": { + "version": "0.2.0", + "dev": true, + "license": "MIT", + "peer": true + }, + "../../dir-js/node_modules/tinybench": { + "version": "2.9.0", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/tinyexec": { + "version": "0.3.2", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/tinyglobby": { + "version": "0.2.15", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "../../dir-js/node_modules/tinyglobby/node_modules/fdir": { + "version": "6.5.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "../../dir-js/node_modules/tinyglobby/node_modules/picomatch": { + "version": "4.0.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "../../dir-js/node_modules/tinypool": { + "version": "1.1.1", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.0.0 || >=20.0.0" + } + }, + "../../dir-js/node_modules/tinyrainbow": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "../../dir-js/node_modules/tinyspy": { + "version": "4.0.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "../../dir-js/node_modules/to-regex-range": { + "version": "5.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "../../dir-js/node_modules/ts-api-utils": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, + "../../dir-js/node_modules/ts-node": { + "version": "10.9.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@cspotcode/source-map-support": "^0.8.0", + "@tsconfig/node10": "^1.0.7", + "@tsconfig/node12": "^1.0.7", + "@tsconfig/node14": "^1.0.0", + "@tsconfig/node16": "^1.0.2", + "acorn": "^8.4.1", + "acorn-walk": "^8.1.1", + "arg": "^4.1.0", + "create-require": "^1.1.0", + "diff": "^4.0.1", + "make-error": "^1.1.1", + "v8-compile-cache-lib": "^3.0.1", + "yn": "3.1.1" + }, + "bin": { + "ts-node": "dist/bin.js", + "ts-node-cwd": "dist/bin-cwd.js", + "ts-node-esm": "dist/bin-esm.js", + "ts-node-script": "dist/bin-script.js", + "ts-node-transpile-only": "dist/bin-transpile.js", + "ts-script": "dist/bin-script-deprecated.js" + }, + "peerDependencies": { + "@swc/core": ">=1.2.50", + "@swc/wasm": ">=1.2.50", + "@types/node": "*", + "typescript": ">=2.7" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "@swc/wasm": { + "optional": true + } + } + }, + "../../dir-js/node_modules/type-check": { + "version": "0.4.0", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "../../dir-js/node_modules/type-fest": { + "version": "0.20.2", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "peer": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "../../dir-js/node_modules/typescript": { + "version": "5.9.2", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "../../dir-js/node_modules/typescript-eslint": { + "version": "8.44.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/eslint-plugin": "8.44.0", + "@typescript-eslint/parser": "8.44.0", + "@typescript-eslint/typescript-estree": "8.44.0", + "@typescript-eslint/utils": "8.44.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "../../dir-js/node_modules/typescript-eslint/node_modules/@typescript-eslint/eslint-plugin": { + "version": "8.44.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.10.0", + "@typescript-eslint/scope-manager": "8.44.0", + "@typescript-eslint/type-utils": "8.44.0", + "@typescript-eslint/utils": "8.44.0", + "@typescript-eslint/visitor-keys": "8.44.0", + "graphemer": "^1.4.0", + "ignore": "^7.0.0", + "natural-compare": "^1.4.0", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^8.44.0", + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "../../dir-js/node_modules/typescript-eslint/node_modules/@typescript-eslint/parser": { + "version": "8.44.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/scope-manager": "8.44.0", + "@typescript-eslint/types": "8.44.0", + "@typescript-eslint/typescript-estree": "8.44.0", + "@typescript-eslint/visitor-keys": "8.44.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "../../dir-js/node_modules/typescript-eslint/node_modules/@typescript-eslint/scope-manager": { + "version": "8.44.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.44.0", + "@typescript-eslint/visitor-keys": "8.44.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "../../dir-js/node_modules/typescript-eslint/node_modules/@typescript-eslint/type-utils": { + "version": "8.44.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.44.0", + "@typescript-eslint/typescript-estree": "8.44.0", + "@typescript-eslint/utils": "8.44.0", + "debug": "^4.3.4", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "../../dir-js/node_modules/typescript-eslint/node_modules/@typescript-eslint/types": { + "version": "8.44.0", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "../../dir-js/node_modules/typescript-eslint/node_modules/@typescript-eslint/typescript-estree": { + "version": "8.44.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/project-service": "8.44.0", + "@typescript-eslint/tsconfig-utils": "8.44.0", + "@typescript-eslint/types": "8.44.0", + "@typescript-eslint/visitor-keys": "8.44.0", + "debug": "^4.3.4", + "fast-glob": "^3.3.2", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^2.1.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.0.0" + } + }, + "../../dir-js/node_modules/typescript-eslint/node_modules/@typescript-eslint/utils": { + "version": "8.44.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.7.0", + "@typescript-eslint/scope-manager": "8.44.0", + "@typescript-eslint/types": "8.44.0", + "@typescript-eslint/typescript-estree": "8.44.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0", + "typescript": ">=4.8.4 <6.0.0" + } + }, + "../../dir-js/node_modules/typescript-eslint/node_modules/@typescript-eslint/visitor-keys": { + "version": "8.44.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.44.0", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "../../dir-js/node_modules/typescript-eslint/node_modules/brace-expansion": { + "version": "2.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "../../dir-js/node_modules/typescript-eslint/node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "../../dir-js/node_modules/typescript-eslint/node_modules/ignore": { + "version": "7.0.5", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "../../dir-js/node_modules/typescript-eslint/node_modules/minimatch": { + "version": "9.0.5", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "../../dir-js/node_modules/undici-types": { + "version": "6.21.0", + "license": "MIT" + }, + "../../dir-js/node_modules/universalify": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "../../dir-js/node_modules/uri-js": { + "version": "4.4.1", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "../../dir-js/node_modules/uuid": { + "version": "11.1.0", + "dev": true, + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist/esm/bin/uuid" + } + }, + "../../dir-js/node_modules/v8-compile-cache-lib": { + "version": "3.0.1", + "dev": true, + "license": "MIT" + }, + "../../dir-js/node_modules/vite": { + "version": "7.1.12", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.25.0", + "fdir": "^6.5.0", + "picomatch": "^4.0.3", + "postcss": "^8.5.6", + "rollup": "^4.43.0", + "tinyglobby": "^0.2.15" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "lightningcss": "^1.21.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "../../dir-js/node_modules/vite-node": { + "version": "3.2.4", + "dev": true, + "license": "MIT", + "dependencies": { + "cac": "^6.7.14", + "debug": "^4.4.1", + "es-module-lexer": "^1.7.0", + "pathe": "^2.0.3", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" + }, + "bin": { + "vite-node": "vite-node.mjs" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "../../dir-js/node_modules/vite/node_modules/fdir": { + "version": "6.5.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "../../dir-js/node_modules/vite/node_modules/picomatch": { + "version": "4.0.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "../../dir-js/node_modules/vitest": { + "version": "3.2.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/chai": "^5.2.2", + "@vitest/expect": "3.2.4", + "@vitest/mocker": "3.2.4", + "@vitest/pretty-format": "^3.2.4", + "@vitest/runner": "3.2.4", + "@vitest/snapshot": "3.2.4", + "@vitest/spy": "3.2.4", + "@vitest/utils": "3.2.4", + "chai": "^5.2.0", + "debug": "^4.4.1", + "expect-type": "^1.2.1", + "magic-string": "^0.30.17", + "pathe": "^2.0.3", + "picomatch": "^4.0.2", + "std-env": "^3.9.0", + "tinybench": "^2.9.0", + "tinyexec": "^0.3.2", + "tinyglobby": "^0.2.14", + "tinypool": "^1.1.1", + "tinyrainbow": "^2.0.0", + "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0", + "vite-node": "3.2.4", + "why-is-node-running": "^2.3.0" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@types/debug": "^4.1.12", + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "@vitest/browser": "3.2.4", + "@vitest/ui": "3.2.4", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@types/debug": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "../../dir-js/node_modules/vitest/node_modules/picomatch": { + "version": "4.0.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "../../dir-js/node_modules/which": { + "version": "2.0.2", + "dev": true, + "license": "ISC", + "peer": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "../../dir-js/node_modules/why-is-node-running": { + "version": "2.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + }, + "../../dir-js/node_modules/word-wrap": { + "version": "1.2.5", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "../../dir-js/node_modules/wrap-ansi": { + "version": "7.0.0", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "../../dir-js/node_modules/wrappy": { + "version": "1.0.2", + "dev": true, + "license": "ISC", + "peer": true + }, + "../../dir-js/node_modules/y18n": { + "version": "5.0.8", + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "../../dir-js/node_modules/yallist": { + "version": "4.0.0", + "dev": true, + "license": "ISC" + }, + "../../dir-js/node_modules/yargs": { + "version": "17.7.2", + "license": "MIT", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "../../dir-js/node_modules/yargs/node_modules/yargs-parser": { + "version": "21.1.1", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "../../dir-js/node_modules/yn": { + "version": "3.1.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "../../dir-js/node_modules/yocto-queue": { + "version": "0.1.0", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/agntcy-dir": { + "resolved": "../../dir-js", + "link": true + } + } } \ No newline at end of file diff --git a/sdk/examples/example-js/package.json b/sdk/examples/example-js/package.json index 4a0726799..7d9b085ce 100644 --- a/sdk/examples/example-js/package.json +++ b/sdk/examples/example-js/package.json @@ -1,11 +1,11 @@ -{ - "name": "dir-example", - "version": "0.0.0", - "type": "module", - "scripts": { - "example": "node example.js" - }, - "dependencies": { - "agntcy-dir": "file:../../dir-js" - } -} +{ + "name": "dir-example", + "version": "0.0.0", + "type": "module", + "scripts": { + "example": "node example.js" + }, + "dependencies": { + "agntcy-dir": "file:../../dir-js" + } +} diff --git a/sdk/examples/example-py/.gitignore b/sdk/examples/example-py/.gitignore index ef897f8f9..81be88a68 100644 --- a/sdk/examples/example-py/.gitignore +++ b/sdk/examples/example-py/.gitignore @@ -1,10 +1,10 @@ -# Python -dist/ -__pycache__/ -*.egg-info/ -*.pyc -*.pyo -*.pyd - -# Virtual Environments -.venv/ +# Python +dist/ +__pycache__/ +*.egg-info/ +*.pyc +*.pyo +*.pyd + +# Virtual Environments +.venv/ diff --git a/sdk/examples/example-py/example.py b/sdk/examples/example-py/example.py index d66fc4a7e..aa9d7c63c 100644 --- a/sdk/examples/example-py/example.py +++ b/sdk/examples/example-py/example.py @@ -1,131 +1,131 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -from google.protobuf.json_format import MessageToJson - -from agntcy.dir_sdk.client import Client, Config -from agntcy.dir_sdk.models import core_v1, search_v1, routing_v1 - - -def generate_record(name): - return core_v1.Record( - data={ - "name": name, - "version": "v1.0.0", - "schema_version": "0.7.0", - "description": "My example agent", - "authors": ["AGNTCY"], - "created_at": "2025-03-19T17:06:37Z", - "skills": [ - { - "name": "natural_language_processing/natural_language_generation/text_completion", - "id": 10201 - }, - { - "name": "natural_language_processing/analytical_reasoning/problem_solving", - "id": 10702 - } - ], - "locators": [ - { - "type": "docker_image", - "url": "https://ghcr.io/agntcy/marketing-strategy" - } - ], - "domains": [ - { - "name": "technology/networking", - "id": 103 - } - ], - "modules": [ - { - "name": "integration/a2a", - "id": 203, - "prompts": "", - "data": { - "protocol_version": "lightweight orchestra moral", - "card_data": "centres", - "capabilities": [ - "state_transition_history", - "push_notifications" - ], - "transports": [ - "grpc", - "http" - ], - "output_modes": [ - "text/html" - ] - } - } - ] - }, - ) - - -def main() -> None: - # Initialize the client - client = Client() - - records = [generate_record(x) for x in ["example-record", "example-record2"]] - - # Push objects to the store - refs = client.push(records) - - for ref in refs: - print("Pushed object ref:", ref.cid) - - # Pull objects from the store - pulled_records = client.pull(refs) - - for pulled_record in pulled_records: - print("Pulled object data:", MessageToJson(pulled_record)) - - # Lookup the object - metadatas = client.lookup(refs) - - for metadata in metadatas: - print("Lookup object metadata:", MessageToJson(metadata)) - - # Publish the object - record_refs = routing_v1.RecordRefs(refs=[refs[0]]) - publish_request = routing_v1.PublishRequest(record_refs=record_refs) - client.publish(publish_request) - print("Object published.") - - # List objects in the store - query = routing_v1.RecordQuery( - type=routing_v1.RECORD_QUERY_TYPE_SKILL, - value="/skills/Natural Language Processing/Text Completion", - ) - - list_request = routing_v1.ListRequest(queries=[query]) - objects = list(client.list(list_request)) - - for o in objects: - print("Listed object:", MessageToJson(o)) - - # Search objects - search_query = search_v1.RecordQuery( - type=search_v1.RECORD_QUERY_TYPE_SKILL_ID, value="1", - ) - - search_request = search_v1.SearchRequest(queries=[search_query], limit=3) - objects = list(client.search(search_request)) - - print("Searched objects:",objects) - - # Unpublish the object - record_refs = routing_v1.RecordRefs(refs=[refs[0]]) - unpublish_request = routing_v1.UnpublishRequest(record_refs=record_refs) - client.unpublish(unpublish_request) - print("Object unpublished.") - - # Delete the object - client.delete(refs) - print("Objects are deleted.") - - -if __name__ == "__main__": - main() +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +from google.protobuf.json_format import MessageToJson + +from agntcy.dir_sdk.client import Client, Config +from agntcy.dir_sdk.models import core_v1, search_v1, routing_v1 + + +def generate_record(name): + return core_v1.Record( + data={ + "name": name, + "version": "v1.0.0", + "schema_version": "0.7.0", + "description": "My example agent", + "authors": ["AGNTCY"], + "created_at": "2025-03-19T17:06:37Z", + "skills": [ + { + "name": "natural_language_processing/natural_language_generation/text_completion", + "id": 10201 + }, + { + "name": "natural_language_processing/analytical_reasoning/problem_solving", + "id": 10702 + } + ], + "locators": [ + { + "type": "docker_image", + "url": "https://ghcr.io/agntcy/marketing-strategy" + } + ], + "domains": [ + { + "name": "technology/networking", + "id": 103 + } + ], + "modules": [ + { + "name": "integration/a2a", + "id": 203, + "prompts": "", + "data": { + "protocol_version": "lightweight orchestra moral", + "card_data": "centres", + "capabilities": [ + "state_transition_history", + "push_notifications" + ], + "transports": [ + "grpc", + "http" + ], + "output_modes": [ + "text/html" + ] + } + } + ] + }, + ) + + +def main() -> None: + # Initialize the client + client = Client() + + records = [generate_record(x) for x in ["example-record", "example-record2"]] + + # Push objects to the store + refs = client.push(records) + + for ref in refs: + print("Pushed object ref:", ref.cid) + + # Pull objects from the store + pulled_records = client.pull(refs) + + for pulled_record in pulled_records: + print("Pulled object data:", MessageToJson(pulled_record)) + + # Lookup the object + metadatas = client.lookup(refs) + + for metadata in metadatas: + print("Lookup object metadata:", MessageToJson(metadata)) + + # Publish the object + record_refs = routing_v1.RecordRefs(refs=[refs[0]]) + publish_request = routing_v1.PublishRequest(record_refs=record_refs) + client.publish(publish_request) + print("Object published.") + + # List objects in the store + query = routing_v1.RecordQuery( + type=routing_v1.RECORD_QUERY_TYPE_SKILL, + value="/skills/Natural Language Processing/Text Completion", + ) + + list_request = routing_v1.ListRequest(queries=[query]) + objects = list(client.list(list_request)) + + for o in objects: + print("Listed object:", MessageToJson(o)) + + # Search objects + search_query = search_v1.RecordQuery( + type=search_v1.RECORD_QUERY_TYPE_SKILL_ID, value="1", + ) + + search_request = search_v1.SearchRequest(queries=[search_query], limit=3) + objects = list(client.search(search_request)) + + print("Searched objects:",objects) + + # Unpublish the object + record_refs = routing_v1.RecordRefs(refs=[refs[0]]) + unpublish_request = routing_v1.UnpublishRequest(record_refs=record_refs) + client.unpublish(unpublish_request) + print("Object unpublished.") + + # Delete the object + client.delete(refs) + print("Objects are deleted.") + + +if __name__ == "__main__": + main() diff --git a/sdk/examples/example-py/pyproject.toml b/sdk/examples/example-py/pyproject.toml index b5d8654a6..7dd8c71c6 100644 --- a/sdk/examples/example-py/pyproject.toml +++ b/sdk/examples/example-py/pyproject.toml @@ -1,13 +1,13 @@ -[project] -name = "dir-example" -version = "0.0.0" -requires-python = ">=3.13" -dependencies = [ - "agntcy-dir", -] - -[[tool.uv.index]] -url = "https://buf.build/gen/python" - -[tool.uv.sources] -agntcy-dir = { path = "../../dir-py", editable = true } +[project] +name = "dir-example" +version = "0.0.0" +requires-python = ">=3.13" +dependencies = [ + "agntcy-dir", +] + +[[tool.uv.index]] +url = "https://buf.build/gen/python" + +[tool.uv.sources] +agntcy-dir = { path = "../../dir-py", editable = true } diff --git a/sdk/examples/example-py/uv.lock b/sdk/examples/example-py/uv.lock index d9409c84e..6825b521c 100644 --- a/sdk/examples/example-py/uv.lock +++ b/sdk/examples/example-py/uv.lock @@ -1,245 +1,245 @@ -version = 1 -revision = 1 -requires-python = ">=3.13" - -[[package]] -name = "agntcy-dir" -version = "0.5.0rc2" -source = { editable = "../../dir-py" } -dependencies = [ - { name = "grpcio" }, - { name = "spiffe" }, - { name = "spiffe-tls" }, -] - -[package.metadata] -requires-dist = [ - { name = "grpcio", specifier = ">=1.74.0" }, - { name = "spiffe", specifier = ">=0.2.2" }, - { name = "spiffe-tls", specifier = ">=0.2.1" }, -] - -[package.metadata.requires-dev] -dev = [ - { name = "pytest", specifier = ">=8.4.1" }, - { name = "uuid", specifier = ">=1.30" }, -] - -[[package]] -name = "cffi" -version = "2.0.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pycparser", marker = "implementation_name != 'PyPy'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230 }, - { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043 }, - { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446 }, - { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101 }, - { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948 }, - { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422 }, - { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499 }, - { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928 }, - { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302 }, - { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909 }, - { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402 }, - { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780 }, - { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320 }, - { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487 }, - { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049 }, - { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793 }, - { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300 }, - { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244 }, - { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828 }, - { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926 }, - { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328 }, - { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650 }, - { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687 }, - { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773 }, - { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013 }, - { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593 }, - { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354 }, - { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480 }, - { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584 }, - { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443 }, - { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437 }, - { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487 }, - { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726 }, - { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195 }, -] - -[[package]] -name = "cryptography" -version = "45.0.7" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/a7/35/c495bffc2056f2dadb32434f1feedd79abde2a7f8363e1974afa9c33c7e2/cryptography-45.0.7.tar.gz", hash = "sha256:4b1654dfc64ea479c242508eb8c724044f1e964a47d1d1cacc5132292d851971", size = 744980 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/0c/91/925c0ac74362172ae4516000fe877912e33b5983df735ff290c653de4913/cryptography-45.0.7-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:3be4f21c6245930688bd9e162829480de027f8bf962ede33d4f8ba7d67a00cee", size = 7041105 }, - { url = "https://files.pythonhosted.org/packages/fc/63/43641c5acce3a6105cf8bd5baeceeb1846bb63067d26dae3e5db59f1513a/cryptography-45.0.7-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:67285f8a611b0ebc0857ced2081e30302909f571a46bfa7a3cc0ad303fe015c6", size = 4205799 }, - { url = "https://files.pythonhosted.org/packages/bc/29/c238dd9107f10bfde09a4d1c52fd38828b1aa353ced11f358b5dd2507d24/cryptography-45.0.7-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:577470e39e60a6cd7780793202e63536026d9b8641de011ed9d8174da9ca5339", size = 4430504 }, - { url = "https://files.pythonhosted.org/packages/62/62/24203e7cbcc9bd7c94739428cd30680b18ae6b18377ae66075c8e4771b1b/cryptography-45.0.7-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:4bd3e5c4b9682bc112d634f2c6ccc6736ed3635fc3319ac2bb11d768cc5a00d8", size = 4209542 }, - { url = "https://files.pythonhosted.org/packages/cd/e3/e7de4771a08620eef2389b86cd87a2c50326827dea5528feb70595439ce4/cryptography-45.0.7-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:465ccac9d70115cd4de7186e60cfe989de73f7bb23e8a7aa45af18f7412e75bf", size = 3889244 }, - { url = "https://files.pythonhosted.org/packages/96/b8/bca71059e79a0bb2f8e4ec61d9c205fbe97876318566cde3b5092529faa9/cryptography-45.0.7-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:16ede8a4f7929b4b7ff3642eba2bf79aa1d71f24ab6ee443935c0d269b6bc513", size = 4461975 }, - { url = "https://files.pythonhosted.org/packages/58/67/3f5b26937fe1218c40e95ef4ff8d23c8dc05aa950d54200cc7ea5fb58d28/cryptography-45.0.7-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:8978132287a9d3ad6b54fcd1e08548033cc09dc6aacacb6c004c73c3eb5d3ac3", size = 4209082 }, - { url = "https://files.pythonhosted.org/packages/0e/e4/b3e68a4ac363406a56cf7b741eeb80d05284d8c60ee1a55cdc7587e2a553/cryptography-45.0.7-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:b6a0e535baec27b528cb07a119f321ac024592388c5681a5ced167ae98e9fff3", size = 4460397 }, - { url = "https://files.pythonhosted.org/packages/22/49/2c93f3cd4e3efc8cb22b02678c1fad691cff9dd71bb889e030d100acbfe0/cryptography-45.0.7-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:a24ee598d10befaec178efdff6054bc4d7e883f615bfbcd08126a0f4931c83a6", size = 4337244 }, - { url = "https://files.pythonhosted.org/packages/04/19/030f400de0bccccc09aa262706d90f2ec23d56bc4eb4f4e8268d0ddf3fb8/cryptography-45.0.7-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:fa26fa54c0a9384c27fcdc905a2fb7d60ac6e47d14bc2692145f2b3b1e2cfdbd", size = 4568862 }, - { url = "https://files.pythonhosted.org/packages/29/56/3034a3a353efa65116fa20eb3c990a8c9f0d3db4085429040a7eef9ada5f/cryptography-45.0.7-cp311-abi3-win32.whl", hash = "sha256:bef32a5e327bd8e5af915d3416ffefdbe65ed975b646b3805be81b23580b57b8", size = 2936578 }, - { url = "https://files.pythonhosted.org/packages/b3/61/0ab90f421c6194705a99d0fa9f6ee2045d916e4455fdbb095a9c2c9a520f/cryptography-45.0.7-cp311-abi3-win_amd64.whl", hash = "sha256:3808e6b2e5f0b46d981c24d79648e5c25c35e59902ea4391a0dcb3e667bf7443", size = 3405400 }, - { url = "https://files.pythonhosted.org/packages/63/e8/c436233ddf19c5f15b25ace33979a9dd2e7aa1a59209a0ee8554179f1cc0/cryptography-45.0.7-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bfb4c801f65dd61cedfc61a83732327fafbac55a47282e6f26f073ca7a41c3b2", size = 7021824 }, - { url = "https://files.pythonhosted.org/packages/bc/4c/8f57f2500d0ccd2675c5d0cc462095adf3faa8c52294ba085c036befb901/cryptography-45.0.7-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:81823935e2f8d476707e85a78a405953a03ef7b7b4f55f93f7c2d9680e5e0691", size = 4202233 }, - { url = "https://files.pythonhosted.org/packages/eb/ac/59b7790b4ccaed739fc44775ce4645c9b8ce54cbec53edf16c74fd80cb2b/cryptography-45.0.7-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3994c809c17fc570c2af12c9b840d7cea85a9fd3e5c0e0491f4fa3c029216d59", size = 4423075 }, - { url = "https://files.pythonhosted.org/packages/b8/56/d4f07ea21434bf891faa088a6ac15d6d98093a66e75e30ad08e88aa2b9ba/cryptography-45.0.7-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dad43797959a74103cb59c5dac71409f9c27d34c8a05921341fb64ea8ccb1dd4", size = 4204517 }, - { url = "https://files.pythonhosted.org/packages/e8/ac/924a723299848b4c741c1059752c7cfe09473b6fd77d2920398fc26bfb53/cryptography-45.0.7-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:ce7a453385e4c4693985b4a4a3533e041558851eae061a58a5405363b098fcd3", size = 3882893 }, - { url = "https://files.pythonhosted.org/packages/83/dc/4dab2ff0a871cc2d81d3ae6d780991c0192b259c35e4d83fe1de18b20c70/cryptography-45.0.7-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b04f85ac3a90c227b6e5890acb0edbaf3140938dbecf07bff618bf3638578cf1", size = 4450132 }, - { url = "https://files.pythonhosted.org/packages/12/dd/b2882b65db8fc944585d7fb00d67cf84a9cef4e77d9ba8f69082e911d0de/cryptography-45.0.7-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:48c41a44ef8b8c2e80ca4527ee81daa4c527df3ecbc9423c41a420a9559d0e27", size = 4204086 }, - { url = "https://files.pythonhosted.org/packages/5d/fa/1d5745d878048699b8eb87c984d4ccc5da4f5008dfd3ad7a94040caca23a/cryptography-45.0.7-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:f3df7b3d0f91b88b2106031fd995802a2e9ae13e02c36c1fc075b43f420f3a17", size = 4449383 }, - { url = "https://files.pythonhosted.org/packages/36/8b/fc61f87931bc030598e1876c45b936867bb72777eac693e905ab89832670/cryptography-45.0.7-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd342f085542f6eb894ca00ef70236ea46070c8a13824c6bde0dfdcd36065b9b", size = 4332186 }, - { url = "https://files.pythonhosted.org/packages/0b/11/09700ddad7443ccb11d674efdbe9a832b4455dc1f16566d9bd3834922ce5/cryptography-45.0.7-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1993a1bb7e4eccfb922b6cd414f072e08ff5816702a0bdb8941c247a6b1b287c", size = 4561639 }, - { url = "https://files.pythonhosted.org/packages/71/ed/8f4c1337e9d3b94d8e50ae0b08ad0304a5709d483bfcadfcc77a23dbcb52/cryptography-45.0.7-cp37-abi3-win32.whl", hash = "sha256:18fcf70f243fe07252dcb1b268a687f2358025ce32f9f88028ca5c364b123ef5", size = 2926552 }, - { url = "https://files.pythonhosted.org/packages/bc/ff/026513ecad58dacd45d1d24ebe52b852165a26e287177de1d545325c0c25/cryptography-45.0.7-cp37-abi3-win_amd64.whl", hash = "sha256:7285a89df4900ed3bfaad5679b1e668cb4b38a8de1ccbfc84b05f34512da0a90", size = 3392742 }, -] - -[[package]] -name = "dir-example" -version = "0.0.0" -source = { virtual = "." } -dependencies = [ - { name = "agntcy-dir" }, -] - -[package.metadata] -requires-dist = [{ name = "agntcy-dir", editable = "../../dir-py" }] - -[[package]] -name = "grpcio" -version = "1.74.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/38/b4/35feb8f7cab7239c5b94bd2db71abb3d6adb5f335ad8f131abb6060840b6/grpcio-1.74.0.tar.gz", hash = "sha256:80d1f4fbb35b0742d3e3d3bb654b7381cd5f015f8497279a1e9c21ba623e01b1", size = 12756048 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d4/d8/1004a5f468715221450e66b051c839c2ce9a985aa3ee427422061fcbb6aa/grpcio-1.74.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:2bc2d7d8d184e2362b53905cb1708c84cb16354771c04b490485fa07ce3a1d89", size = 5449488 }, - { url = "https://files.pythonhosted.org/packages/94/0e/33731a03f63740d7743dced423846c831d8e6da808fcd02821a4416df7fa/grpcio-1.74.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:c14e803037e572c177ba54a3e090d6eb12efd795d49327c5ee2b3bddb836bf01", size = 10974059 }, - { url = "https://files.pythonhosted.org/packages/0d/c6/3d2c14d87771a421205bdca991467cfe473ee4c6a1231c1ede5248c62ab8/grpcio-1.74.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:f6ec94f0e50eb8fa1744a731088b966427575e40c2944a980049798b127a687e", size = 5945647 }, - { url = "https://files.pythonhosted.org/packages/c5/83/5a354c8aaff58594eef7fffebae41a0f8995a6258bbc6809b800c33d4c13/grpcio-1.74.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:566b9395b90cc3d0d0c6404bc8572c7c18786ede549cdb540ae27b58afe0fb91", size = 6626101 }, - { url = "https://files.pythonhosted.org/packages/3f/ca/4fdc7bf59bf6994aa45cbd4ef1055cd65e2884de6113dbd49f75498ddb08/grpcio-1.74.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1ea6176d7dfd5b941ea01c2ec34de9531ba494d541fe2057c904e601879f249", size = 6182562 }, - { url = "https://files.pythonhosted.org/packages/fd/48/2869e5b2c1922583686f7ae674937986807c2f676d08be70d0a541316270/grpcio-1.74.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:64229c1e9cea079420527fa8ac45d80fc1e8d3f94deaa35643c381fa8d98f362", size = 6303425 }, - { url = "https://files.pythonhosted.org/packages/a6/0e/bac93147b9a164f759497bc6913e74af1cb632c733c7af62c0336782bd38/grpcio-1.74.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:0f87bddd6e27fc776aacf7ebfec367b6d49cad0455123951e4488ea99d9b9b8f", size = 6996533 }, - { url = "https://files.pythonhosted.org/packages/84/35/9f6b2503c1fd86d068b46818bbd7329db26a87cdd8c01e0d1a9abea1104c/grpcio-1.74.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:3b03d8f2a07f0fea8c8f74deb59f8352b770e3900d143b3d1475effcb08eec20", size = 6491489 }, - { url = "https://files.pythonhosted.org/packages/75/33/a04e99be2a82c4cbc4039eb3a76f6c3632932b9d5d295221389d10ac9ca7/grpcio-1.74.0-cp313-cp313-win32.whl", hash = "sha256:b6a73b2ba83e663b2480a90b82fdae6a7aa6427f62bf43b29912c0cfd1aa2bfa", size = 3805811 }, - { url = "https://files.pythonhosted.org/packages/34/80/de3eb55eb581815342d097214bed4c59e806b05f1b3110df03b2280d6dfd/grpcio-1.74.0-cp313-cp313-win_amd64.whl", hash = "sha256:fd3c71aeee838299c5887230b8a1822795325ddfea635edd82954c1eaa831e24", size = 4489214 }, -] - -[[package]] -name = "pem" -version = "23.1.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/05/86/16c0b6789816f8d53f2f208b5a090c9197da8a6dae4d490554bb1bedbb09/pem-23.1.0.tar.gz", hash = "sha256:06503ff2441a111f853ce4e8b9eb9d5fedb488ebdbf560115d3dd53a1b4afc73", size = 43796 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c9/97/8299a481ae6c08494b5d53511e6a4746775d8a354c685c69d8796b2ed482/pem-23.1.0-py3-none-any.whl", hash = "sha256:78bbb1e75b737891350cb9499cbba31da5d59545f360f44163c0bc751cad55d3", size = 9195 }, -] - -[[package]] -name = "protobuf" -version = "6.32.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fa/a4/cc17347aa2897568beece2e674674359f911d6fe21b0b8d6268cd42727ac/protobuf-6.32.1.tar.gz", hash = "sha256:ee2469e4a021474ab9baafea6cd070e5bf27c7d29433504ddea1a4ee5850f68d", size = 440635 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c0/98/645183ea03ab3995d29086b8bf4f7562ebd3d10c9a4b14ee3f20d47cfe50/protobuf-6.32.1-cp310-abi3-win32.whl", hash = "sha256:a8a32a84bc9f2aad712041b8b366190f71dde248926da517bde9e832e4412085", size = 424411 }, - { url = "https://files.pythonhosted.org/packages/8c/f3/6f58f841f6ebafe076cebeae33fc336e900619d34b1c93e4b5c97a81fdfa/protobuf-6.32.1-cp310-abi3-win_amd64.whl", hash = "sha256:b00a7d8c25fa471f16bc8153d0e53d6c9e827f0953f3c09aaa4331c718cae5e1", size = 435738 }, - { url = "https://files.pythonhosted.org/packages/10/56/a8a3f4e7190837139e68c7002ec749190a163af3e330f65d90309145a210/protobuf-6.32.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d8c7e6eb619ffdf105ee4ab76af5a68b60a9d0f66da3ea12d1640e6d8dab7281", size = 426454 }, - { url = "https://files.pythonhosted.org/packages/3f/be/8dd0a927c559b37d7a6c8ab79034fd167dcc1f851595f2e641ad62be8643/protobuf-6.32.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:2f5b80a49e1eb7b86d85fcd23fe92df154b9730a725c3b38c4e43b9d77018bf4", size = 322874 }, - { url = "https://files.pythonhosted.org/packages/5c/f6/88d77011b605ef979aace37b7703e4eefad066f7e84d935e5a696515c2dd/protobuf-6.32.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:b1864818300c297265c83a4982fd3169f97122c299f56a56e2445c3698d34710", size = 322013 }, - { url = "https://files.pythonhosted.org/packages/97/b7/15cc7d93443d6c6a84626ae3258a91f4c6ac8c0edd5df35ea7658f71b79c/protobuf-6.32.1-py3-none-any.whl", hash = "sha256:2601b779fc7d32a866c6b4404f9d42a3f67c5b9f3f15b4db3cccabe06b95c346", size = 169289 }, -] - -[[package]] -name = "pyasn1" -version = "0.6.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135 }, -] - -[[package]] -name = "pyasn1-modules" -version = "0.4.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pyasn1" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259 }, -] - -[[package]] -name = "pycparser" -version = "2.23" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140 }, -] - -[[package]] -name = "pyjwt" -version = "2.10.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997 }, -] - -[package.optional-dependencies] -crypto = [ - { name = "cryptography" }, -] - -[[package]] -name = "pyopenssl" -version = "25.3.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "cryptography" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/80/be/97b83a464498a79103036bc74d1038df4a7ef0e402cfaf4d5e113fb14759/pyopenssl-25.3.0.tar.gz", hash = "sha256:c981cb0a3fd84e8602d7afc209522773b94c1c2446a3c710a75b06fe1beae329", size = 184073 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d1/81/ef2b1dfd1862567d573a4fdbc9f969067621764fbb74338496840a1d2977/pyopenssl-25.3.0-py3-none-any.whl", hash = "sha256:1fda6fc034d5e3d179d39e59c1895c9faeaf40a79de5fc4cbbfbe0d36f4a77b6", size = 57268 }, -] - -[[package]] -name = "spiffe" -version = "0.2.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "cryptography" }, - { name = "grpcio" }, - { name = "pem" }, - { name = "protobuf" }, - { name = "pyasn1" }, - { name = "pyasn1-modules" }, - { name = "pyjwt", extra = ["crypto"] }, -] -sdist = { url = "https://files.pythonhosted.org/packages/e5/f9/716f29e5e0cb13d2786cf30fd2b001898cfe7cf33134f2bcd52da3e2b49c/spiffe-0.2.2.tar.gz", hash = "sha256:e4ca1247b1a08631a3f822eec7db70447b6d99734ff50670f2c9020dfb006231", size = 34912 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/01/7e/50b8f8d4bbeed9b7598cb2e15a7f3b9a55b1635d50d67d763f02ffc4cc93/spiffe-0.2.2-py3-none-any.whl", hash = "sha256:a53fb39ab59408b15dd2f969989045d68bc6b3ebfd283bf2f77e9ff9a66b047b", size = 56009 }, -] - -[[package]] -name = "spiffe-tls" -version = "0.2.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pyopenssl" }, - { name = "spiffe" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/d7/19/2228e634dfa76ba41ef2896d69205441beea5abce7b871430ca618210d9d/spiffe_tls-0.2.1.tar.gz", hash = "sha256:5898b91f7e4f8db9f8cbdd00f625a113e2ce5fc7379cf0104c6d19b73d53ddef", size = 10520 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f6/e1/fe672f042adcd86291a39c0ed7ada5fdc13be945dbf7b53f2f3df59d0c7e/spiffe_tls-0.2.1-py3-none-any.whl", hash = "sha256:b90d302c92deaedd8278339b0c41d6f98d7c512a76e947e013e6189c9f75a53d", size = 15522 }, -] +version = 1 +revision = 1 +requires-python = ">=3.13" + +[[package]] +name = "agntcy-dir" +version = "0.5.0rc2" +source = { editable = "../../dir-py" } +dependencies = [ + { name = "grpcio" }, + { name = "spiffe" }, + { name = "spiffe-tls" }, +] + +[package.metadata] +requires-dist = [ + { name = "grpcio", specifier = ">=1.74.0" }, + { name = "spiffe", specifier = ">=0.2.2" }, + { name = "spiffe-tls", specifier = ">=0.2.1" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "pytest", specifier = ">=8.4.1" }, + { name = "uuid", specifier = ">=1.30" }, +] + +[[package]] +name = "cffi" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser", marker = "implementation_name != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230 }, + { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043 }, + { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446 }, + { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101 }, + { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948 }, + { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422 }, + { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499 }, + { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928 }, + { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302 }, + { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909 }, + { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402 }, + { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780 }, + { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320 }, + { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487 }, + { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049 }, + { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793 }, + { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300 }, + { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244 }, + { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828 }, + { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926 }, + { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328 }, + { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650 }, + { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687 }, + { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773 }, + { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013 }, + { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593 }, + { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354 }, + { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480 }, + { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584 }, + { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443 }, + { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437 }, + { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487 }, + { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726 }, + { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195 }, +] + +[[package]] +name = "cryptography" +version = "45.0.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/35/c495bffc2056f2dadb32434f1feedd79abde2a7f8363e1974afa9c33c7e2/cryptography-45.0.7.tar.gz", hash = "sha256:4b1654dfc64ea479c242508eb8c724044f1e964a47d1d1cacc5132292d851971", size = 744980 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/91/925c0ac74362172ae4516000fe877912e33b5983df735ff290c653de4913/cryptography-45.0.7-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:3be4f21c6245930688bd9e162829480de027f8bf962ede33d4f8ba7d67a00cee", size = 7041105 }, + { url = "https://files.pythonhosted.org/packages/fc/63/43641c5acce3a6105cf8bd5baeceeb1846bb63067d26dae3e5db59f1513a/cryptography-45.0.7-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:67285f8a611b0ebc0857ced2081e30302909f571a46bfa7a3cc0ad303fe015c6", size = 4205799 }, + { url = "https://files.pythonhosted.org/packages/bc/29/c238dd9107f10bfde09a4d1c52fd38828b1aa353ced11f358b5dd2507d24/cryptography-45.0.7-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:577470e39e60a6cd7780793202e63536026d9b8641de011ed9d8174da9ca5339", size = 4430504 }, + { url = "https://files.pythonhosted.org/packages/62/62/24203e7cbcc9bd7c94739428cd30680b18ae6b18377ae66075c8e4771b1b/cryptography-45.0.7-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:4bd3e5c4b9682bc112d634f2c6ccc6736ed3635fc3319ac2bb11d768cc5a00d8", size = 4209542 }, + { url = "https://files.pythonhosted.org/packages/cd/e3/e7de4771a08620eef2389b86cd87a2c50326827dea5528feb70595439ce4/cryptography-45.0.7-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:465ccac9d70115cd4de7186e60cfe989de73f7bb23e8a7aa45af18f7412e75bf", size = 3889244 }, + { url = "https://files.pythonhosted.org/packages/96/b8/bca71059e79a0bb2f8e4ec61d9c205fbe97876318566cde3b5092529faa9/cryptography-45.0.7-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:16ede8a4f7929b4b7ff3642eba2bf79aa1d71f24ab6ee443935c0d269b6bc513", size = 4461975 }, + { url = "https://files.pythonhosted.org/packages/58/67/3f5b26937fe1218c40e95ef4ff8d23c8dc05aa950d54200cc7ea5fb58d28/cryptography-45.0.7-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:8978132287a9d3ad6b54fcd1e08548033cc09dc6aacacb6c004c73c3eb5d3ac3", size = 4209082 }, + { url = "https://files.pythonhosted.org/packages/0e/e4/b3e68a4ac363406a56cf7b741eeb80d05284d8c60ee1a55cdc7587e2a553/cryptography-45.0.7-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:b6a0e535baec27b528cb07a119f321ac024592388c5681a5ced167ae98e9fff3", size = 4460397 }, + { url = "https://files.pythonhosted.org/packages/22/49/2c93f3cd4e3efc8cb22b02678c1fad691cff9dd71bb889e030d100acbfe0/cryptography-45.0.7-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:a24ee598d10befaec178efdff6054bc4d7e883f615bfbcd08126a0f4931c83a6", size = 4337244 }, + { url = "https://files.pythonhosted.org/packages/04/19/030f400de0bccccc09aa262706d90f2ec23d56bc4eb4f4e8268d0ddf3fb8/cryptography-45.0.7-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:fa26fa54c0a9384c27fcdc905a2fb7d60ac6e47d14bc2692145f2b3b1e2cfdbd", size = 4568862 }, + { url = "https://files.pythonhosted.org/packages/29/56/3034a3a353efa65116fa20eb3c990a8c9f0d3db4085429040a7eef9ada5f/cryptography-45.0.7-cp311-abi3-win32.whl", hash = "sha256:bef32a5e327bd8e5af915d3416ffefdbe65ed975b646b3805be81b23580b57b8", size = 2936578 }, + { url = "https://files.pythonhosted.org/packages/b3/61/0ab90f421c6194705a99d0fa9f6ee2045d916e4455fdbb095a9c2c9a520f/cryptography-45.0.7-cp311-abi3-win_amd64.whl", hash = "sha256:3808e6b2e5f0b46d981c24d79648e5c25c35e59902ea4391a0dcb3e667bf7443", size = 3405400 }, + { url = "https://files.pythonhosted.org/packages/63/e8/c436233ddf19c5f15b25ace33979a9dd2e7aa1a59209a0ee8554179f1cc0/cryptography-45.0.7-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bfb4c801f65dd61cedfc61a83732327fafbac55a47282e6f26f073ca7a41c3b2", size = 7021824 }, + { url = "https://files.pythonhosted.org/packages/bc/4c/8f57f2500d0ccd2675c5d0cc462095adf3faa8c52294ba085c036befb901/cryptography-45.0.7-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:81823935e2f8d476707e85a78a405953a03ef7b7b4f55f93f7c2d9680e5e0691", size = 4202233 }, + { url = "https://files.pythonhosted.org/packages/eb/ac/59b7790b4ccaed739fc44775ce4645c9b8ce54cbec53edf16c74fd80cb2b/cryptography-45.0.7-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3994c809c17fc570c2af12c9b840d7cea85a9fd3e5c0e0491f4fa3c029216d59", size = 4423075 }, + { url = "https://files.pythonhosted.org/packages/b8/56/d4f07ea21434bf891faa088a6ac15d6d98093a66e75e30ad08e88aa2b9ba/cryptography-45.0.7-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dad43797959a74103cb59c5dac71409f9c27d34c8a05921341fb64ea8ccb1dd4", size = 4204517 }, + { url = "https://files.pythonhosted.org/packages/e8/ac/924a723299848b4c741c1059752c7cfe09473b6fd77d2920398fc26bfb53/cryptography-45.0.7-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:ce7a453385e4c4693985b4a4a3533e041558851eae061a58a5405363b098fcd3", size = 3882893 }, + { url = "https://files.pythonhosted.org/packages/83/dc/4dab2ff0a871cc2d81d3ae6d780991c0192b259c35e4d83fe1de18b20c70/cryptography-45.0.7-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b04f85ac3a90c227b6e5890acb0edbaf3140938dbecf07bff618bf3638578cf1", size = 4450132 }, + { url = "https://files.pythonhosted.org/packages/12/dd/b2882b65db8fc944585d7fb00d67cf84a9cef4e77d9ba8f69082e911d0de/cryptography-45.0.7-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:48c41a44ef8b8c2e80ca4527ee81daa4c527df3ecbc9423c41a420a9559d0e27", size = 4204086 }, + { url = "https://files.pythonhosted.org/packages/5d/fa/1d5745d878048699b8eb87c984d4ccc5da4f5008dfd3ad7a94040caca23a/cryptography-45.0.7-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:f3df7b3d0f91b88b2106031fd995802a2e9ae13e02c36c1fc075b43f420f3a17", size = 4449383 }, + { url = "https://files.pythonhosted.org/packages/36/8b/fc61f87931bc030598e1876c45b936867bb72777eac693e905ab89832670/cryptography-45.0.7-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd342f085542f6eb894ca00ef70236ea46070c8a13824c6bde0dfdcd36065b9b", size = 4332186 }, + { url = "https://files.pythonhosted.org/packages/0b/11/09700ddad7443ccb11d674efdbe9a832b4455dc1f16566d9bd3834922ce5/cryptography-45.0.7-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1993a1bb7e4eccfb922b6cd414f072e08ff5816702a0bdb8941c247a6b1b287c", size = 4561639 }, + { url = "https://files.pythonhosted.org/packages/71/ed/8f4c1337e9d3b94d8e50ae0b08ad0304a5709d483bfcadfcc77a23dbcb52/cryptography-45.0.7-cp37-abi3-win32.whl", hash = "sha256:18fcf70f243fe07252dcb1b268a687f2358025ce32f9f88028ca5c364b123ef5", size = 2926552 }, + { url = "https://files.pythonhosted.org/packages/bc/ff/026513ecad58dacd45d1d24ebe52b852165a26e287177de1d545325c0c25/cryptography-45.0.7-cp37-abi3-win_amd64.whl", hash = "sha256:7285a89df4900ed3bfaad5679b1e668cb4b38a8de1ccbfc84b05f34512da0a90", size = 3392742 }, +] + +[[package]] +name = "dir-example" +version = "0.0.0" +source = { virtual = "." } +dependencies = [ + { name = "agntcy-dir" }, +] + +[package.metadata] +requires-dist = [{ name = "agntcy-dir", editable = "../../dir-py" }] + +[[package]] +name = "grpcio" +version = "1.74.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/38/b4/35feb8f7cab7239c5b94bd2db71abb3d6adb5f335ad8f131abb6060840b6/grpcio-1.74.0.tar.gz", hash = "sha256:80d1f4fbb35b0742d3e3d3bb654b7381cd5f015f8497279a1e9c21ba623e01b1", size = 12756048 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d4/d8/1004a5f468715221450e66b051c839c2ce9a985aa3ee427422061fcbb6aa/grpcio-1.74.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:2bc2d7d8d184e2362b53905cb1708c84cb16354771c04b490485fa07ce3a1d89", size = 5449488 }, + { url = "https://files.pythonhosted.org/packages/94/0e/33731a03f63740d7743dced423846c831d8e6da808fcd02821a4416df7fa/grpcio-1.74.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:c14e803037e572c177ba54a3e090d6eb12efd795d49327c5ee2b3bddb836bf01", size = 10974059 }, + { url = "https://files.pythonhosted.org/packages/0d/c6/3d2c14d87771a421205bdca991467cfe473ee4c6a1231c1ede5248c62ab8/grpcio-1.74.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:f6ec94f0e50eb8fa1744a731088b966427575e40c2944a980049798b127a687e", size = 5945647 }, + { url = "https://files.pythonhosted.org/packages/c5/83/5a354c8aaff58594eef7fffebae41a0f8995a6258bbc6809b800c33d4c13/grpcio-1.74.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:566b9395b90cc3d0d0c6404bc8572c7c18786ede549cdb540ae27b58afe0fb91", size = 6626101 }, + { url = "https://files.pythonhosted.org/packages/3f/ca/4fdc7bf59bf6994aa45cbd4ef1055cd65e2884de6113dbd49f75498ddb08/grpcio-1.74.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1ea6176d7dfd5b941ea01c2ec34de9531ba494d541fe2057c904e601879f249", size = 6182562 }, + { url = "https://files.pythonhosted.org/packages/fd/48/2869e5b2c1922583686f7ae674937986807c2f676d08be70d0a541316270/grpcio-1.74.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:64229c1e9cea079420527fa8ac45d80fc1e8d3f94deaa35643c381fa8d98f362", size = 6303425 }, + { url = "https://files.pythonhosted.org/packages/a6/0e/bac93147b9a164f759497bc6913e74af1cb632c733c7af62c0336782bd38/grpcio-1.74.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:0f87bddd6e27fc776aacf7ebfec367b6d49cad0455123951e4488ea99d9b9b8f", size = 6996533 }, + { url = "https://files.pythonhosted.org/packages/84/35/9f6b2503c1fd86d068b46818bbd7329db26a87cdd8c01e0d1a9abea1104c/grpcio-1.74.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:3b03d8f2a07f0fea8c8f74deb59f8352b770e3900d143b3d1475effcb08eec20", size = 6491489 }, + { url = "https://files.pythonhosted.org/packages/75/33/a04e99be2a82c4cbc4039eb3a76f6c3632932b9d5d295221389d10ac9ca7/grpcio-1.74.0-cp313-cp313-win32.whl", hash = "sha256:b6a73b2ba83e663b2480a90b82fdae6a7aa6427f62bf43b29912c0cfd1aa2bfa", size = 3805811 }, + { url = "https://files.pythonhosted.org/packages/34/80/de3eb55eb581815342d097214bed4c59e806b05f1b3110df03b2280d6dfd/grpcio-1.74.0-cp313-cp313-win_amd64.whl", hash = "sha256:fd3c71aeee838299c5887230b8a1822795325ddfea635edd82954c1eaa831e24", size = 4489214 }, +] + +[[package]] +name = "pem" +version = "23.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/05/86/16c0b6789816f8d53f2f208b5a090c9197da8a6dae4d490554bb1bedbb09/pem-23.1.0.tar.gz", hash = "sha256:06503ff2441a111f853ce4e8b9eb9d5fedb488ebdbf560115d3dd53a1b4afc73", size = 43796 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/97/8299a481ae6c08494b5d53511e6a4746775d8a354c685c69d8796b2ed482/pem-23.1.0-py3-none-any.whl", hash = "sha256:78bbb1e75b737891350cb9499cbba31da5d59545f360f44163c0bc751cad55d3", size = 9195 }, +] + +[[package]] +name = "protobuf" +version = "6.32.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fa/a4/cc17347aa2897568beece2e674674359f911d6fe21b0b8d6268cd42727ac/protobuf-6.32.1.tar.gz", hash = "sha256:ee2469e4a021474ab9baafea6cd070e5bf27c7d29433504ddea1a4ee5850f68d", size = 440635 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/98/645183ea03ab3995d29086b8bf4f7562ebd3d10c9a4b14ee3f20d47cfe50/protobuf-6.32.1-cp310-abi3-win32.whl", hash = "sha256:a8a32a84bc9f2aad712041b8b366190f71dde248926da517bde9e832e4412085", size = 424411 }, + { url = "https://files.pythonhosted.org/packages/8c/f3/6f58f841f6ebafe076cebeae33fc336e900619d34b1c93e4b5c97a81fdfa/protobuf-6.32.1-cp310-abi3-win_amd64.whl", hash = "sha256:b00a7d8c25fa471f16bc8153d0e53d6c9e827f0953f3c09aaa4331c718cae5e1", size = 435738 }, + { url = "https://files.pythonhosted.org/packages/10/56/a8a3f4e7190837139e68c7002ec749190a163af3e330f65d90309145a210/protobuf-6.32.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d8c7e6eb619ffdf105ee4ab76af5a68b60a9d0f66da3ea12d1640e6d8dab7281", size = 426454 }, + { url = "https://files.pythonhosted.org/packages/3f/be/8dd0a927c559b37d7a6c8ab79034fd167dcc1f851595f2e641ad62be8643/protobuf-6.32.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:2f5b80a49e1eb7b86d85fcd23fe92df154b9730a725c3b38c4e43b9d77018bf4", size = 322874 }, + { url = "https://files.pythonhosted.org/packages/5c/f6/88d77011b605ef979aace37b7703e4eefad066f7e84d935e5a696515c2dd/protobuf-6.32.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:b1864818300c297265c83a4982fd3169f97122c299f56a56e2445c3698d34710", size = 322013 }, + { url = "https://files.pythonhosted.org/packages/97/b7/15cc7d93443d6c6a84626ae3258a91f4c6ac8c0edd5df35ea7658f71b79c/protobuf-6.32.1-py3-none-any.whl", hash = "sha256:2601b779fc7d32a866c6b4404f9d42a3f67c5b9f3f15b4db3cccabe06b95c346", size = 169289 }, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135 }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259 }, +] + +[[package]] +name = "pycparser" +version = "2.23" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140 }, +] + +[[package]] +name = "pyjwt" +version = "2.10.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997 }, +] + +[package.optional-dependencies] +crypto = [ + { name = "cryptography" }, +] + +[[package]] +name = "pyopenssl" +version = "25.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/80/be/97b83a464498a79103036bc74d1038df4a7ef0e402cfaf4d5e113fb14759/pyopenssl-25.3.0.tar.gz", hash = "sha256:c981cb0a3fd84e8602d7afc209522773b94c1c2446a3c710a75b06fe1beae329", size = 184073 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/81/ef2b1dfd1862567d573a4fdbc9f969067621764fbb74338496840a1d2977/pyopenssl-25.3.0-py3-none-any.whl", hash = "sha256:1fda6fc034d5e3d179d39e59c1895c9faeaf40a79de5fc4cbbfbe0d36f4a77b6", size = 57268 }, +] + +[[package]] +name = "spiffe" +version = "0.2.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, + { name = "grpcio" }, + { name = "pem" }, + { name = "protobuf" }, + { name = "pyasn1" }, + { name = "pyasn1-modules" }, + { name = "pyjwt", extra = ["crypto"] }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/f9/716f29e5e0cb13d2786cf30fd2b001898cfe7cf33134f2bcd52da3e2b49c/spiffe-0.2.2.tar.gz", hash = "sha256:e4ca1247b1a08631a3f822eec7db70447b6d99734ff50670f2c9020dfb006231", size = 34912 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/01/7e/50b8f8d4bbeed9b7598cb2e15a7f3b9a55b1635d50d67d763f02ffc4cc93/spiffe-0.2.2-py3-none-any.whl", hash = "sha256:a53fb39ab59408b15dd2f969989045d68bc6b3ebfd283bf2f77e9ff9a66b047b", size = 56009 }, +] + +[[package]] +name = "spiffe-tls" +version = "0.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyopenssl" }, + { name = "spiffe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d7/19/2228e634dfa76ba41ef2896d69205441beea5abce7b871430ca618210d9d/spiffe_tls-0.2.1.tar.gz", hash = "sha256:5898b91f7e4f8db9f8cbdd00f625a113e2ce5fc7379cf0104c6d19b73d53ddef", size = 10520 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f6/e1/fe672f042adcd86291a39c0ed7ada5fdc13be945dbf7b53f2f3df59d0c7e/spiffe_tls-0.2.1-py3-none-any.whl", hash = "sha256:b90d302c92deaedd8278339b0c41d6f98d7c512a76e947e013e6189c9f75a53d", size = 15522 }, +] diff --git a/server/Dockerfile b/server/Dockerfile index 1f381c265..2146d8c53 100644 --- a/server/Dockerfile +++ b/server/Dockerfile @@ -1,68 +1,68 @@ -# syntax=docker/dockerfile:1@sha256:fe40cf4e92cd0c467be2cfc30657a680ae2398318afd50b0c80585784c604f28 - -# xx is a helper for cross-compilation -FROM --platform=$BUILDPLATFORM tonistiigi/xx:1.4.0@sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4 AS xx - -FROM --platform=$BUILDPLATFORM golang:1.25.2-bookworm@sha256:42d8e9dea06f23d0bfc908826455213ee7f3ed48c43e287a422064220c501be9 AS builder - -COPY --link --from=xx / / - -ARG TARGETPLATFORM - -RUN --mount=type=cache,id=${TARGETPLATFORM}-apt,target=/var/cache/apt,sharing=locked \ - apt-get update \ - && xx-apt-get install -y --no-install-recommends \ - gcc \ - libc6-dev - -WORKDIR /build/server - -RUN --mount=type=cache,target=/go/pkg/mod \ - --mount=type=cache,target=/root/.cache/go-build \ - --mount=type=bind,source=.,target=/build,ro \ - xx-go mod download -x - -ARG BUILD_OPTS -ARG EXTRA_LDFLAGS - -# TODO(adamtagscherer): Currently we don't need C libraries but in the future we may need to turn this on once we add -# security libraries, etc. -ENV CGO_ENABLED=0 - -RUN --mount=type=cache,target=/go/pkg/mod \ - --mount=type=cache,target=/root/.cache/go-build \ - --mount=type=bind,source=.,target=/build,ro \ - xx-go build ${BUILD_OPTS} -ldflags="-s -w -extldflags -static ${EXTRA_LDFLAGS}" \ - -o /bin/apiserver ./cmd/main.go - -RUN xx-verify /bin/apiserver - -# Production image - minimal distroless -FROM gcr.io/distroless/static:nonroot@sha256:c0f429e16b13e583da7e5a6ec20dd656d325d88e6819cafe0adb0828976529dc AS production - -WORKDIR / - -COPY --from=builder /bin/apiserver ./apiserver - -USER 65532:65532 - -ENTRYPOINT ["./apiserver", "run"] - -# Coverage image - includes tar for kubectl cp to work -FROM alpine:3.21@sha256:21dc6063fd678b478f57c0e13f47560d0ea4eeba26dfc947b2a4f81f686b9f45 AS coverage - -RUN apk add --no-cache tar - -WORKDIR / - -COPY --from=builder /bin/apiserver ./apiserver - -# Create a non-root user for coverage -RUN addgroup -g 65532 -S nonroot && adduser -u 65532 -S nonroot -G nonroot - -# Create coverage directory with proper permissions -RUN mkdir -p /tmp/coverage && chown -R 65532:65532 /tmp/coverage - -USER 65532:65532 - -ENTRYPOINT ["./apiserver", "run"] +# syntax=docker/dockerfile:1@sha256:fe40cf4e92cd0c467be2cfc30657a680ae2398318afd50b0c80585784c604f28 + +# xx is a helper for cross-compilation +FROM --platform=$BUILDPLATFORM tonistiigi/xx:1.4.0@sha256:0cd3f05c72d6c9b038eb135f91376ee1169ef3a330d34e418e65e2a5c2e9c0d4 AS xx + +FROM --platform=$BUILDPLATFORM golang:1.25.2-bookworm@sha256:42d8e9dea06f23d0bfc908826455213ee7f3ed48c43e287a422064220c501be9 AS builder + +COPY --link --from=xx / / + +ARG TARGETPLATFORM + +RUN --mount=type=cache,id=${TARGETPLATFORM}-apt,target=/var/cache/apt,sharing=locked \ + apt-get update \ + && xx-apt-get install -y --no-install-recommends \ + gcc \ + libc6-dev + +WORKDIR /build/server + +RUN --mount=type=cache,target=/go/pkg/mod \ + --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=bind,source=.,target=/build,ro \ + xx-go mod download -x + +ARG BUILD_OPTS +ARG EXTRA_LDFLAGS + +# TODO(adamtagscherer): Currently we don't need C libraries but in the future we may need to turn this on once we add +# security libraries, etc. +ENV CGO_ENABLED=0 + +RUN --mount=type=cache,target=/go/pkg/mod \ + --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=bind,source=.,target=/build,ro \ + xx-go build ${BUILD_OPTS} -ldflags="-s -w -extldflags -static ${EXTRA_LDFLAGS}" \ + -o /bin/apiserver ./cmd/main.go + +RUN xx-verify /bin/apiserver + +# Production image - minimal distroless +FROM gcr.io/distroless/static:nonroot@sha256:c0f429e16b13e583da7e5a6ec20dd656d325d88e6819cafe0adb0828976529dc AS production + +WORKDIR / + +COPY --from=builder /bin/apiserver ./apiserver + +USER 65532:65532 + +ENTRYPOINT ["./apiserver", "run"] + +# Coverage image - includes tar for kubectl cp to work +FROM alpine:3.21@sha256:21dc6063fd678b478f57c0e13f47560d0ea4eeba26dfc947b2a4f81f686b9f45 AS coverage + +RUN apk add --no-cache tar + +WORKDIR / + +COPY --from=builder /bin/apiserver ./apiserver + +# Create a non-root user for coverage +RUN addgroup -g 65532 -S nonroot && adduser -u 65532 -S nonroot -G nonroot + +# Create coverage directory with proper permissions +RUN mkdir -p /tmp/coverage && chown -R 65532:65532 /tmp/coverage + +USER 65532:65532 + +ENTRYPOINT ["./apiserver", "run"] diff --git a/server/README.md b/server/README.md index ba88d4f0b..8c6585730 100644 --- a/server/README.md +++ b/server/README.md @@ -1,126 +1,126 @@ -# Directory Server - -## Configuration - -The Directory server supports configuration via environment variables, YAML configuration files, or both. Environment variables follow the `DIRECTORY_SERVER_` prefix convention. - -### OASF Validation Configuration - -The server validates all records server-side. By default, records are validated using API validation in strict mode. This ensures consistent, strict validation for all records regardless of their source. - -- **`oasf_api_validation.schema_url`** / **`DIRECTORY_SERVER_OASF_API_VALIDATION_SCHEMA_URL`** - OASF schema URL for API-based validation - - **Default**: `https://schema.oasf.outshift.com` - - URL of the OASF server to use for validation - - This affects all record validation operations including push, sync, and import - -- **`oasf_api_validation.disable`** / **`DIRECTORY_SERVER_OASF_API_VALIDATION_DISABLE`** - Use embedded schema validation instead of API validator - - **Default**: `false` (uses API validation) - - When `true`, uses embedded schemas for validation (no HTTP calls to OASF server) - -- **`oasf_api_validation.strict_mode`** / **`DIRECTORY_SERVER_OASF_API_VALIDATION_STRICT_MODE`** - Use strict validation mode - - **Default**: `true` (strict mode - fails on warnings) - - When `false`, uses lax validation mode (allows warnings, only fails on errors) - - Only applies when `oasf_api_validation.disable` is `false` - -**Example with environment variables:** -```bash -# Use default OASF API validator with strict validation (default behavior) -./dirctl-apiserver - -# Use custom OASF server -DIRECTORY_SERVER_OASF_API_VALIDATION_SCHEMA_URL=http://localhost:8080 ./dirctl-apiserver - -# Use embedded schema validation (no API calls) -DIRECTORY_SERVER_OASF_API_VALIDATION_DISABLE=true ./dirctl-apiserver - -# Use lax API validation (allows warnings) -DIRECTORY_SERVER_OASF_API_VALIDATION_STRICT_MODE=false ./dirctl-apiserver -``` - -**Example with YAML configuration:** -```yaml -# server.config.yml -oasf_api_validation: - schema_url: "https://schema.oasf.outshift.com" - disable: false - strict_mode: true -listen_address: "0.0.0.0:8888" -``` - -#### Testing with Local OASF Server - -To test with a local OASF instance deployed alongside the directory server: - -1. **Enable OASF in Helm values** - Edit `install/charts/dir/values.yaml`: - ```yaml - apiserver: - oasf: - enabled: true - ``` - -2. **Set schema URL to use the deployed OASF instance** - In the same file, set: - ```yaml - apiserver: - config: - oasf_api_validation: - schema_url: "http://dir-ingress-controller.dir-server.svc.cluster.local" - ``` - Replace `dir` with your Helm release name and `dir-server` with your namespace if different. - -3. **Deploy**: - ```bash - task build - task deploy:local - ``` - -The OASF instance will be deployed as a subchart in the same namespace and automatically configured for multi-version routing via ingress. - -#### Using a Locally Built OASF Image - -If you want to deploy with a locally built OASF image (e.g., containing `0.9.0-dev` schema files), you need to load the image into Kind **before** deploying. The `task deploy:local` command automatically creates a cluster and loads images, but it doesn't load custom OASF images. Follow these steps: - -1. **Create the Kind cluster first**: - ```bash - task deploy:kubernetes:setup-cluster - ``` - This creates the cluster and loads the Directory server images. - -2. **Build and tag your local OASF image**: - ```bash - cd /path/to/oasf/server - docker build -t ghcr.io/agntcy/oasf-server:latest . - ``` - -3. **Load the OASF image into Kind**: - ```bash - kind load docker-image ghcr.io/agntcy/oasf-server:latest --name agntcy-cluster - ``` - -4. **Configure values.yaml** to use the local image: - ```yaml - oasf: - enabled: true - image: - repository: ghcr.io/agntcy/oasf-server - versions: - - server: latest - schema: 0.9.0-dev - default: true - ``` - -5. **Deploy with Helm** (don't use `task deploy:local` as it will recreate the cluster): - ```bash - helm upgrade --install dir ./install/charts/dir \ - -f ./install/charts/dir/values.yaml \ - -n dir-server --create-namespace - ``` - -**Note**: If you update the local OASF image, reload it into Kind and restart the deployment: -```bash -kind load docker-image ghcr.io/agntcy/oasf-server:latest --name agntcy-cluster -kubectl rollout restart deployment/dir-oasf-0-9-0-dev -n dir-server -``` - -### Other Configuration Options - -For complete server configuration including authentication, authorization, storage, routing, and database options, see the [server configuration reference](./config/config.go). +# Directory Server + +## Configuration + +The Directory server supports configuration via environment variables, YAML configuration files, or both. Environment variables follow the `DIRECTORY_SERVER_` prefix convention. + +### OASF Validation Configuration + +The server validates all records server-side. By default, records are validated using API validation in strict mode. This ensures consistent, strict validation for all records regardless of their source. + +- **`oasf_api_validation.schema_url`** / **`DIRECTORY_SERVER_OASF_API_VALIDATION_SCHEMA_URL`** - OASF schema URL for API-based validation + - **Default**: `https://schema.oasf.outshift.com` + - URL of the OASF server to use for validation + - This affects all record validation operations including push, sync, and import + +- **`oasf_api_validation.disable`** / **`DIRECTORY_SERVER_OASF_API_VALIDATION_DISABLE`** - Use embedded schema validation instead of API validator + - **Default**: `false` (uses API validation) + - When `true`, uses embedded schemas for validation (no HTTP calls to OASF server) + +- **`oasf_api_validation.strict_mode`** / **`DIRECTORY_SERVER_OASF_API_VALIDATION_STRICT_MODE`** - Use strict validation mode + - **Default**: `true` (strict mode - fails on warnings) + - When `false`, uses lax validation mode (allows warnings, only fails on errors) + - Only applies when `oasf_api_validation.disable` is `false` + +**Example with environment variables:** +```bash +# Use default OASF API validator with strict validation (default behavior) +./dirctl-apiserver + +# Use custom OASF server +DIRECTORY_SERVER_OASF_API_VALIDATION_SCHEMA_URL=http://localhost:8080 ./dirctl-apiserver + +# Use embedded schema validation (no API calls) +DIRECTORY_SERVER_OASF_API_VALIDATION_DISABLE=true ./dirctl-apiserver + +# Use lax API validation (allows warnings) +DIRECTORY_SERVER_OASF_API_VALIDATION_STRICT_MODE=false ./dirctl-apiserver +``` + +**Example with YAML configuration:** +```yaml +# server.config.yml +oasf_api_validation: + schema_url: "https://schema.oasf.outshift.com" + disable: false + strict_mode: true +listen_address: "0.0.0.0:8888" +``` + +#### Testing with Local OASF Server + +To test with a local OASF instance deployed alongside the directory server: + +1. **Enable OASF in Helm values** - Edit `install/charts/dir/values.yaml`: + ```yaml + apiserver: + oasf: + enabled: true + ``` + +2. **Set schema URL to use the deployed OASF instance** - In the same file, set: + ```yaml + apiserver: + config: + oasf_api_validation: + schema_url: "http://dir-ingress-controller.dir-server.svc.cluster.local" + ``` + Replace `dir` with your Helm release name and `dir-server` with your namespace if different. + +3. **Deploy**: + ```bash + task build + task deploy:local + ``` + +The OASF instance will be deployed as a subchart in the same namespace and automatically configured for multi-version routing via ingress. + +#### Using a Locally Built OASF Image + +If you want to deploy with a locally built OASF image (e.g., containing `0.9.0-dev` schema files), you need to load the image into Kind **before** deploying. The `task deploy:local` command automatically creates a cluster and loads images, but it doesn't load custom OASF images. Follow these steps: + +1. **Create the Kind cluster first**: + ```bash + task deploy:kubernetes:setup-cluster + ``` + This creates the cluster and loads the Directory server images. + +2. **Build and tag your local OASF image**: + ```bash + cd /path/to/oasf/server + docker build -t ghcr.io/agntcy/oasf-server:latest . + ``` + +3. **Load the OASF image into Kind**: + ```bash + kind load docker-image ghcr.io/agntcy/oasf-server:latest --name agntcy-cluster + ``` + +4. **Configure values.yaml** to use the local image: + ```yaml + oasf: + enabled: true + image: + repository: ghcr.io/agntcy/oasf-server + versions: + - server: latest + schema: 0.9.0-dev + default: true + ``` + +5. **Deploy with Helm** (don't use `task deploy:local` as it will recreate the cluster): + ```bash + helm upgrade --install dir ./install/charts/dir \ + -f ./install/charts/dir/values.yaml \ + -n dir-server --create-namespace + ``` + +**Note**: If you update the local OASF image, reload it into Kind and restart the deployment: +```bash +kind load docker-image ghcr.io/agntcy/oasf-server:latest --name agntcy-cluster +kubectl rollout restart deployment/dir-oasf-0-9-0-dev -n dir-server +``` + +### Other Configuration Options + +For complete server configuration including authentication, authorization, storage, routing, and database options, see the [server configuration reference](./config/config.go). diff --git a/server/authn/config/config.go b/server/authn/config/config.go index a22853c2d..0f0e20094 100644 --- a/server/authn/config/config.go +++ b/server/authn/config/config.go @@ -1,55 +1,55 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "errors" - "fmt" -) - -// AuthMode specifies the authentication mode (jwt or x509). -type AuthMode string - -const ( - AuthModeJWT AuthMode = "jwt" - AuthModeX509 AuthMode = "x509" -) - -// Config contains configuration for authentication services. -type Config struct { - // Indicates if authentication is enabled - Enabled bool `json:"enabled,omitempty" mapstructure:"enabled"` - - // Authentication mode: "jwt" or "x509" - Mode AuthMode `json:"mode,omitempty" mapstructure:"mode"` - - // SPIFFE socket path for authentication - SocketPath string `json:"socket_path,omitempty" mapstructure:"socket_path"` - - // Expected audiences for JWT validation (only used in JWT mode) - Audiences []string `json:"audiences,omitempty" mapstructure:"audiences"` -} - -func (c *Config) Validate() error { - if !c.Enabled { - return nil - } - - if c.SocketPath == "" { - return errors.New("socket path is required") - } - - switch c.Mode { - case AuthModeJWT: - if len(c.Audiences) == 0 { - return errors.New("at least one audience is required for JWT mode") - } - case AuthModeX509: - // No additional validation required for X.509 - default: - return fmt.Errorf("invalid auth mode: %s (must be 'jwt' or 'x509')", c.Mode) - } - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "errors" + "fmt" +) + +// AuthMode specifies the authentication mode (jwt or x509). +type AuthMode string + +const ( + AuthModeJWT AuthMode = "jwt" + AuthModeX509 AuthMode = "x509" +) + +// Config contains configuration for authentication services. +type Config struct { + // Indicates if authentication is enabled + Enabled bool `json:"enabled,omitempty" mapstructure:"enabled"` + + // Authentication mode: "jwt" or "x509" + Mode AuthMode `json:"mode,omitempty" mapstructure:"mode"` + + // SPIFFE socket path for authentication + SocketPath string `json:"socket_path,omitempty" mapstructure:"socket_path"` + + // Expected audiences for JWT validation (only used in JWT mode) + Audiences []string `json:"audiences,omitempty" mapstructure:"audiences"` +} + +func (c *Config) Validate() error { + if !c.Enabled { + return nil + } + + if c.SocketPath == "" { + return errors.New("socket path is required") + } + + switch c.Mode { + case AuthModeJWT: + if len(c.Audiences) == 0 { + return errors.New("at least one audience is required for JWT mode") + } + case AuthModeX509: + // No additional validation required for X.509 + default: + return fmt.Errorf("invalid auth mode: %s (must be 'jwt' or 'x509')", c.Mode) + } + + return nil +} diff --git a/server/authn/jwt.go b/server/authn/jwt.go index ac1de9175..2dcebe655 100644 --- a/server/authn/jwt.go +++ b/server/authn/jwt.go @@ -1,170 +1,170 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package authn - -import ( - "context" - "errors" - "fmt" - "strings" - - "github.com/agntcy/dir/server/healthcheck" - "github.com/spiffe/go-spiffe/v2/spiffeid" - "github.com/spiffe/go-spiffe/v2/svid/jwtsvid" - "github.com/spiffe/go-spiffe/v2/workloadapi" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Context key for storing authenticated SPIFFE ID. -type contextKey string - -const ( - // SpiffeIDContextKey is the context key for the authenticated SPIFFE ID. - SpiffeIDContextKey contextKey = "spiffe-id" -) - -// JWTInterceptorFn is a function that performs JWT authentication. -type JWTInterceptorFn func(ctx context.Context) (context.Context, error) - -// NewJWTInterceptor returns an interceptor function that validates JWT tokens. -func NewJWTInterceptor(jwtSource *workloadapi.JWTSource, audiences []string) JWTInterceptorFn { - return func(ctx context.Context) (context.Context, error) { - // Extract JWT from metadata - token, err := extractToken(ctx) - if err != nil { - return nil, status.Error(codes.Unauthenticated, fmt.Sprintf("failed to extract token: %v", err)) - } - - // Validate JWT for each audience until one succeeds - var ( - svid *jwtsvid.SVID - lastErr error - ) - - for _, audience := range audiences { - svid, lastErr = jwtsvid.ParseAndValidate(token, jwtSource, []string{audience}) - if lastErr == nil { - break - } - } - - if lastErr != nil { - logger.Warn("JWT validation failed", - "error", lastErr, - "audiences", audiences, - ) - - return nil, status.Error(codes.Unauthenticated, "invalid or expired token") - } - - // Extract SPIFFE ID - spiffeID := svid.ID - - logger.Debug("JWT authenticated", - "spiffe_id", spiffeID.String(), - "audience", svid.Audience, - ) - - // Store SPIFFE ID in context for downstream handlers - ctx = context.WithValue(ctx, SpiffeIDContextKey, spiffeID) - - return ctx, nil - } -} - -// extractToken extracts the JWT token from gRPC metadata. -func extractToken(ctx context.Context) (string, error) { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return "", errors.New("missing metadata") - } - - authHeader := md.Get("authorization") - if len(authHeader) == 0 { - return "", errors.New("missing authorization header") - } - - // Expected format: "Bearer " - const expectedParts = 2 - - parts := strings.SplitN(authHeader[0], " ", expectedParts) - - if len(parts) != expectedParts || strings.ToLower(parts[0]) != "bearer" { - return "", errors.New("invalid authorization header format") - } - - return parts[1], nil -} - -// SpiffeIDFromContext extracts the SPIFFE ID from the context. -func SpiffeIDFromContext(ctx context.Context) (spiffeid.ID, bool) { - id, ok := ctx.Value(SpiffeIDContextKey).(spiffeid.ID) - - return id, ok -} - -// jwtUnaryInterceptorFor wraps the JWT interceptor function for unary RPCs. -func jwtUnaryInterceptorFor(fn JWTInterceptorFn) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { - // Skip authentication for health check endpoints - if healthcheck.IsHealthCheckEndpoint(info.FullMethod) { - return handler(ctx, req) - } - - newCtx, err := fn(ctx) - if err != nil { - return nil, err - } - - return handler(newCtx, req) - } -} - -// jwtStreamInterceptorFor wraps the JWT interceptor function for stream RPCs. -func jwtStreamInterceptorFor(fn JWTInterceptorFn) grpc.StreamServerInterceptor { - return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - // Skip authentication for health check endpoints - if healthcheck.IsHealthCheckEndpoint(info.FullMethod) { - return handler(srv, ss) - } - - newCtx, err := fn(ss.Context()) - if err != nil { - return err - } - - // Wrap the stream to use the new context - wrappedStream := &wrappedServerStream{ - ServerStream: ss, - ctx: newCtx, - } - - return handler(srv, wrappedStream) - } -} - -// JWTUnaryInterceptor is a convenience wrapper for JWT unary authentication. -func JWTUnaryInterceptor(jwtSource *workloadapi.JWTSource, audiences []string) grpc.UnaryServerInterceptor { - return jwtUnaryInterceptorFor(NewJWTInterceptor(jwtSource, audiences)) -} - -// JWTStreamInterceptor is a convenience wrapper for JWT stream authentication. -func JWTStreamInterceptor(jwtSource *workloadapi.JWTSource, audiences []string) grpc.StreamServerInterceptor { - return jwtStreamInterceptorFor(NewJWTInterceptor(jwtSource, audiences)) -} - -// wrappedServerStream wraps a grpc.ServerStream to override the context. -// -//nolint:containedctx // Context is required for gRPC stream wrapping -type wrappedServerStream struct { - grpc.ServerStream - ctx context.Context -} - -func (w *wrappedServerStream) Context() context.Context { - return w.ctx -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package authn + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/agntcy/dir/server/healthcheck" + "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/spiffe/go-spiffe/v2/svid/jwtsvid" + "github.com/spiffe/go-spiffe/v2/workloadapi" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Context key for storing authenticated SPIFFE ID. +type contextKey string + +const ( + // SpiffeIDContextKey is the context key for the authenticated SPIFFE ID. + SpiffeIDContextKey contextKey = "spiffe-id" +) + +// JWTInterceptorFn is a function that performs JWT authentication. +type JWTInterceptorFn func(ctx context.Context) (context.Context, error) + +// NewJWTInterceptor returns an interceptor function that validates JWT tokens. +func NewJWTInterceptor(jwtSource *workloadapi.JWTSource, audiences []string) JWTInterceptorFn { + return func(ctx context.Context) (context.Context, error) { + // Extract JWT from metadata + token, err := extractToken(ctx) + if err != nil { + return nil, status.Error(codes.Unauthenticated, fmt.Sprintf("failed to extract token: %v", err)) + } + + // Validate JWT for each audience until one succeeds + var ( + svid *jwtsvid.SVID + lastErr error + ) + + for _, audience := range audiences { + svid, lastErr = jwtsvid.ParseAndValidate(token, jwtSource, []string{audience}) + if lastErr == nil { + break + } + } + + if lastErr != nil { + logger.Warn("JWT validation failed", + "error", lastErr, + "audiences", audiences, + ) + + return nil, status.Error(codes.Unauthenticated, "invalid or expired token") + } + + // Extract SPIFFE ID + spiffeID := svid.ID + + logger.Debug("JWT authenticated", + "spiffe_id", spiffeID.String(), + "audience", svid.Audience, + ) + + // Store SPIFFE ID in context for downstream handlers + ctx = context.WithValue(ctx, SpiffeIDContextKey, spiffeID) + + return ctx, nil + } +} + +// extractToken extracts the JWT token from gRPC metadata. +func extractToken(ctx context.Context) (string, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return "", errors.New("missing metadata") + } + + authHeader := md.Get("authorization") + if len(authHeader) == 0 { + return "", errors.New("missing authorization header") + } + + // Expected format: "Bearer " + const expectedParts = 2 + + parts := strings.SplitN(authHeader[0], " ", expectedParts) + + if len(parts) != expectedParts || strings.ToLower(parts[0]) != "bearer" { + return "", errors.New("invalid authorization header format") + } + + return parts[1], nil +} + +// SpiffeIDFromContext extracts the SPIFFE ID from the context. +func SpiffeIDFromContext(ctx context.Context) (spiffeid.ID, bool) { + id, ok := ctx.Value(SpiffeIDContextKey).(spiffeid.ID) + + return id, ok +} + +// jwtUnaryInterceptorFor wraps the JWT interceptor function for unary RPCs. +func jwtUnaryInterceptorFor(fn JWTInterceptorFn) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { + // Skip authentication for health check endpoints + if healthcheck.IsHealthCheckEndpoint(info.FullMethod) { + return handler(ctx, req) + } + + newCtx, err := fn(ctx) + if err != nil { + return nil, err + } + + return handler(newCtx, req) + } +} + +// jwtStreamInterceptorFor wraps the JWT interceptor function for stream RPCs. +func jwtStreamInterceptorFor(fn JWTInterceptorFn) grpc.StreamServerInterceptor { + return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + // Skip authentication for health check endpoints + if healthcheck.IsHealthCheckEndpoint(info.FullMethod) { + return handler(srv, ss) + } + + newCtx, err := fn(ss.Context()) + if err != nil { + return err + } + + // Wrap the stream to use the new context + wrappedStream := &wrappedServerStream{ + ServerStream: ss, + ctx: newCtx, + } + + return handler(srv, wrappedStream) + } +} + +// JWTUnaryInterceptor is a convenience wrapper for JWT unary authentication. +func JWTUnaryInterceptor(jwtSource *workloadapi.JWTSource, audiences []string) grpc.UnaryServerInterceptor { + return jwtUnaryInterceptorFor(NewJWTInterceptor(jwtSource, audiences)) +} + +// JWTStreamInterceptor is a convenience wrapper for JWT stream authentication. +func JWTStreamInterceptor(jwtSource *workloadapi.JWTSource, audiences []string) grpc.StreamServerInterceptor { + return jwtStreamInterceptorFor(NewJWTInterceptor(jwtSource, audiences)) +} + +// wrappedServerStream wraps a grpc.ServerStream to override the context. +// +//nolint:containedctx // Context is required for gRPC stream wrapping +type wrappedServerStream struct { + grpc.ServerStream + ctx context.Context +} + +func (w *wrappedServerStream) Context() context.Context { + return w.ctx +} diff --git a/server/authn/service.go b/server/authn/service.go index f8b99f34b..e49cb2dbc 100644 --- a/server/authn/service.go +++ b/server/authn/service.go @@ -1,272 +1,272 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package authn - -import ( - "context" - "fmt" - "io" - - "github.com/agntcy/dir/server/authn/config" - "github.com/agntcy/dir/utils/logging" - "github.com/agntcy/dir/utils/spiffe" - "github.com/spiffe/go-spiffe/v2/spiffegrpc/grpccredentials" - "github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig" - "github.com/spiffe/go-spiffe/v2/svid/x509svid" - "github.com/spiffe/go-spiffe/v2/workloadapi" - "google.golang.org/grpc" -) - -var logger = logging.Logger("authn") - -// Service manages authentication using SPIFFE (X.509 or JWT). -type Service struct { - mode config.AuthMode - audiences []string - client *workloadapi.Client - jwtSource *workloadapi.JWTSource - x509Src x509svid.Source // Use interface to allow wrapping with retry logic - bundleSrc *workloadapi.BundleSource - x509Closer io.Closer // Store original X509Source for cleanup -} - -// New creates a new authentication service (JWT or X.509 based on config). -func New(ctx context.Context, cfg config.Config) (*Service, error) { - // Validate - if err := cfg.Validate(); err != nil { - return nil, fmt.Errorf("invalid authn config: %w", err) - } - - // Create a client for SPIRE Workload API - client, err := workloadapi.New(ctx, workloadapi.WithAddr(cfg.SocketPath)) - if err != nil { - return nil, fmt.Errorf("failed to create workload API client: %w", err) - } - - service := &Service{ - mode: cfg.Mode, - client: client, - } - - // Initialize based on authentication mode - switch cfg.Mode { - case config.AuthModeJWT: - if err := service.initJWT(ctx, cfg); err != nil { - _ = client.Close() - - return nil, err - } - - logger.Info("JWT authentication service initialized", "audiences", cfg.Audiences) - - case config.AuthModeX509: - if err := service.initX509(ctx); err != nil { - _ = client.Close() - - return nil, err - } - - logger.Info("X.509 authentication service initialized") - - default: - _ = client.Close() - - return nil, fmt.Errorf("unsupported auth mode: %s", cfg.Mode) - } - - return service, nil -} - -// initJWT initializes JWT authentication components. -// For JWT mode, the server presents its X.509-SVID via TLS (for server authentication and encryption), -// while clients authenticate using JWT-SVIDs. This follows the official SPIFFE JWT pattern. -func (s *Service) initJWT(ctx context.Context, cfg config.Config) error { - // Create X.509 source for server's TLS certificate - x509Src, err := workloadapi.NewX509Source(ctx, workloadapi.WithClient(s.client)) - if err != nil { - return fmt.Errorf("failed to create X509 source: %w", err) - } - - logger.Debug("Created X509 source for JWT mode, waiting for valid SVID") - - // Wait for X509-SVID to be available with retry logic - // This ensures the server presents a certificate with URI SAN during TLS handshake - // Critical: If the server starts without a valid SPIFFE ID, clients will reject the connection - svid, svidErr := spiffe.GetX509SVIDWithRetry( - x509Src, - spiffe.DefaultMaxRetries, - spiffe.DefaultInitialBackoff, - spiffe.DefaultMaxBackoff, - logger, - ) - if svidErr != nil { - _ = x509Src.Close() - - logger.Error("Failed to get valid X509-SVID for server after retries", "error", svidErr, "max_retries", spiffe.DefaultMaxRetries) - - return fmt.Errorf("failed to get valid X509-SVID for server (SPIRE entry may not be synced yet): %w", svidErr) - } - - logger.Info("Successfully obtained valid X509-SVID for server", "spiffe_id", svid.ID.String()) - - // Wrap x509Src with retry logic so GetX509SVID() calls during TLS handshake also retry - // This is critical because grpccredentials.TLSServerCredentials calls GetX509SVID() - // during the actual TLS handshake, not just during setup. Without this wrapper, - // the TLS handshake may fail if the certificate doesn't have a URI SAN at that moment. - wrappedX509Src := spiffe.NewX509SourceWithRetry( - x509Src, - x509Src, - logger, - spiffe.DefaultMaxRetries, - spiffe.DefaultInitialBackoff, - spiffe.DefaultMaxBackoff, - ) - - logger.Debug("Created X509SourceWithRetry wrapper for server (JWT mode)", - "wrapped_type", fmt.Sprintf("%T", wrappedX509Src), - "src_type", fmt.Sprintf("%T", x509Src)) - - // Create bundle source for trust verification - bundleSrc, err := workloadapi.NewBundleSource(ctx, workloadapi.WithClient(s.client)) - if err != nil { - _ = x509Src.Close() - - return fmt.Errorf("failed to create bundle source: %w", err) - } - - // Create JWT source for validating client JWT-SVIDs - jwtSource, err := workloadapi.NewJWTSource(ctx, workloadapi.WithClient(s.client)) - if err != nil { - _ = x509Src.Close() - _ = bundleSrc.Close() - - return fmt.Errorf("failed to create JWT source: %w", err) - } - - s.x509Src = wrappedX509Src // Store wrapped source for use in GetServerOptions - s.x509Closer = x509Src // Store original source for cleanup - s.bundleSrc = bundleSrc - s.jwtSource = jwtSource - s.audiences = cfg.Audiences - - return nil -} - -// initX509 initializes X.509 authentication components. -func (s *Service) initX509(ctx context.Context) error { - // Create a new X509 source which periodically refetches X509-SVIDs and X.509 bundles - x509Src, err := workloadapi.NewX509Source(ctx, workloadapi.WithClient(s.client)) - if err != nil { - return fmt.Errorf("failed to create X509 source: %w", err) - } - - logger.Debug("Created X509 source for X509 mode, waiting for valid SVID") - - // Wait for X509-SVID to be available with retry logic - // This ensures the server presents a certificate with URI SAN during TLS handshake - // Critical: If the server starts without a valid SPIFFE ID, clients will reject the connection - // with "certificate contains no URI SAN" error - svid, svidErr := spiffe.GetX509SVIDWithRetry( - x509Src, - spiffe.DefaultMaxRetries, - spiffe.DefaultInitialBackoff, - spiffe.DefaultMaxBackoff, - logger, - ) - if svidErr != nil { - _ = x509Src.Close() - - logger.Error("Failed to get valid X509-SVID for server after retries", "error", svidErr, "max_retries", spiffe.DefaultMaxRetries) - - return fmt.Errorf("failed to get valid X509-SVID for server (SPIRE entry may not be synced yet): %w", svidErr) - } - - logger.Info("Successfully obtained valid X509-SVID for server", "spiffe_id", svid.ID.String()) - - // Wrap x509Src with retry logic so GetX509SVID() calls during TLS handshake also retry - // This is critical because grpccredentials.MTLSServerCredentials calls GetX509SVID() - // during the actual TLS handshake, not just during setup. Without this wrapper, - // the TLS handshake may fail if the certificate doesn't have a URI SAN at that moment. - wrappedX509Src := spiffe.NewX509SourceWithRetry( - x509Src, - x509Src, - logger, - spiffe.DefaultMaxRetries, - spiffe.DefaultInitialBackoff, - spiffe.DefaultMaxBackoff, - ) - - logger.Debug("Created X509SourceWithRetry wrapper for server", - "wrapped_type", fmt.Sprintf("%T", wrappedX509Src), - "src_type", fmt.Sprintf("%T", x509Src)) - - // Create a new Bundle source which periodically refetches SPIFFE bundles. - // Required when running Federation. - bundleSrc, err := workloadapi.NewBundleSource(ctx, workloadapi.WithClient(s.client)) - if err != nil { - _ = x509Src.Close() - - return fmt.Errorf("failed to create bundle source: %w", err) - } - - s.x509Src = wrappedX509Src // Store wrapped source for use in GetServerOptions - s.x509Closer = x509Src // Store original source for cleanup - s.bundleSrc = bundleSrc - - return nil -} - -// GetServerOptions returns gRPC server options for authentication. -func (s *Service) GetServerOptions() []grpc.ServerOption { - switch s.mode { - case config.AuthModeJWT: - // JWT mode: Server presents X.509-SVID via TLS, clients authenticate with JWT-SVID - return []grpc.ServerOption{ - grpc.Creds( - grpccredentials.TLSServerCredentials(s.x509Src), - ), - grpc.ChainUnaryInterceptor(JWTUnaryInterceptor(s.jwtSource, s.audiences)), - grpc.ChainStreamInterceptor(JWTStreamInterceptor(s.jwtSource, s.audiences)), - } - - case config.AuthModeX509: - return []grpc.ServerOption{ - grpc.Creds( - grpccredentials.MTLSServerCredentials(s.x509Src, s.bundleSrc, tlsconfig.AuthorizeAny()), - ), - grpc.ChainUnaryInterceptor(X509UnaryInterceptor()), - grpc.ChainStreamInterceptor(X509StreamInterceptor()), - } - - default: - logger.Error("Unsupported auth mode", "mode", s.mode) - - return []grpc.ServerOption{} - } -} - -// Stop closes the workload API client and all sources. -// -//nolint:wrapcheck -func (s *Service) Stop() error { - if s.jwtSource != nil { - if err := s.jwtSource.Close(); err != nil { - logger.Error("Failed to close JWT source", "error", err) - } - } - - if s.x509Closer != nil { - if err := s.x509Closer.Close(); err != nil { - logger.Error("Failed to close X509 source", "error", err) - } - } - - if s.bundleSrc != nil { - if err := s.bundleSrc.Close(); err != nil { - logger.Error("Failed to close bundle source", "error", err) - } - } - - return s.client.Close() -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package authn + +import ( + "context" + "fmt" + "io" + + "github.com/agntcy/dir/server/authn/config" + "github.com/agntcy/dir/utils/logging" + "github.com/agntcy/dir/utils/spiffe" + "github.com/spiffe/go-spiffe/v2/spiffegrpc/grpccredentials" + "github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig" + "github.com/spiffe/go-spiffe/v2/svid/x509svid" + "github.com/spiffe/go-spiffe/v2/workloadapi" + "google.golang.org/grpc" +) + +var logger = logging.Logger("authn") + +// Service manages authentication using SPIFFE (X.509 or JWT). +type Service struct { + mode config.AuthMode + audiences []string + client *workloadapi.Client + jwtSource *workloadapi.JWTSource + x509Src x509svid.Source // Use interface to allow wrapping with retry logic + bundleSrc *workloadapi.BundleSource + x509Closer io.Closer // Store original X509Source for cleanup +} + +// New creates a new authentication service (JWT or X.509 based on config). +func New(ctx context.Context, cfg config.Config) (*Service, error) { + // Validate + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("invalid authn config: %w", err) + } + + // Create a client for SPIRE Workload API + client, err := workloadapi.New(ctx, workloadapi.WithAddr(cfg.SocketPath)) + if err != nil { + return nil, fmt.Errorf("failed to create workload API client: %w", err) + } + + service := &Service{ + mode: cfg.Mode, + client: client, + } + + // Initialize based on authentication mode + switch cfg.Mode { + case config.AuthModeJWT: + if err := service.initJWT(ctx, cfg); err != nil { + _ = client.Close() + + return nil, err + } + + logger.Info("JWT authentication service initialized", "audiences", cfg.Audiences) + + case config.AuthModeX509: + if err := service.initX509(ctx); err != nil { + _ = client.Close() + + return nil, err + } + + logger.Info("X.509 authentication service initialized") + + default: + _ = client.Close() + + return nil, fmt.Errorf("unsupported auth mode: %s", cfg.Mode) + } + + return service, nil +} + +// initJWT initializes JWT authentication components. +// For JWT mode, the server presents its X.509-SVID via TLS (for server authentication and encryption), +// while clients authenticate using JWT-SVIDs. This follows the official SPIFFE JWT pattern. +func (s *Service) initJWT(ctx context.Context, cfg config.Config) error { + // Create X.509 source for server's TLS certificate + x509Src, err := workloadapi.NewX509Source(ctx, workloadapi.WithClient(s.client)) + if err != nil { + return fmt.Errorf("failed to create X509 source: %w", err) + } + + logger.Debug("Created X509 source for JWT mode, waiting for valid SVID") + + // Wait for X509-SVID to be available with retry logic + // This ensures the server presents a certificate with URI SAN during TLS handshake + // Critical: If the server starts without a valid SPIFFE ID, clients will reject the connection + svid, svidErr := spiffe.GetX509SVIDWithRetry( + x509Src, + spiffe.DefaultMaxRetries, + spiffe.DefaultInitialBackoff, + spiffe.DefaultMaxBackoff, + logger, + ) + if svidErr != nil { + _ = x509Src.Close() + + logger.Error("Failed to get valid X509-SVID for server after retries", "error", svidErr, "max_retries", spiffe.DefaultMaxRetries) + + return fmt.Errorf("failed to get valid X509-SVID for server (SPIRE entry may not be synced yet): %w", svidErr) + } + + logger.Info("Successfully obtained valid X509-SVID for server", "spiffe_id", svid.ID.String()) + + // Wrap x509Src with retry logic so GetX509SVID() calls during TLS handshake also retry + // This is critical because grpccredentials.TLSServerCredentials calls GetX509SVID() + // during the actual TLS handshake, not just during setup. Without this wrapper, + // the TLS handshake may fail if the certificate doesn't have a URI SAN at that moment. + wrappedX509Src := spiffe.NewX509SourceWithRetry( + x509Src, + x509Src, + logger, + spiffe.DefaultMaxRetries, + spiffe.DefaultInitialBackoff, + spiffe.DefaultMaxBackoff, + ) + + logger.Debug("Created X509SourceWithRetry wrapper for server (JWT mode)", + "wrapped_type", fmt.Sprintf("%T", wrappedX509Src), + "src_type", fmt.Sprintf("%T", x509Src)) + + // Create bundle source for trust verification + bundleSrc, err := workloadapi.NewBundleSource(ctx, workloadapi.WithClient(s.client)) + if err != nil { + _ = x509Src.Close() + + return fmt.Errorf("failed to create bundle source: %w", err) + } + + // Create JWT source for validating client JWT-SVIDs + jwtSource, err := workloadapi.NewJWTSource(ctx, workloadapi.WithClient(s.client)) + if err != nil { + _ = x509Src.Close() + _ = bundleSrc.Close() + + return fmt.Errorf("failed to create JWT source: %w", err) + } + + s.x509Src = wrappedX509Src // Store wrapped source for use in GetServerOptions + s.x509Closer = x509Src // Store original source for cleanup + s.bundleSrc = bundleSrc + s.jwtSource = jwtSource + s.audiences = cfg.Audiences + + return nil +} + +// initX509 initializes X.509 authentication components. +func (s *Service) initX509(ctx context.Context) error { + // Create a new X509 source which periodically refetches X509-SVIDs and X.509 bundles + x509Src, err := workloadapi.NewX509Source(ctx, workloadapi.WithClient(s.client)) + if err != nil { + return fmt.Errorf("failed to create X509 source: %w", err) + } + + logger.Debug("Created X509 source for X509 mode, waiting for valid SVID") + + // Wait for X509-SVID to be available with retry logic + // This ensures the server presents a certificate with URI SAN during TLS handshake + // Critical: If the server starts without a valid SPIFFE ID, clients will reject the connection + // with "certificate contains no URI SAN" error + svid, svidErr := spiffe.GetX509SVIDWithRetry( + x509Src, + spiffe.DefaultMaxRetries, + spiffe.DefaultInitialBackoff, + spiffe.DefaultMaxBackoff, + logger, + ) + if svidErr != nil { + _ = x509Src.Close() + + logger.Error("Failed to get valid X509-SVID for server after retries", "error", svidErr, "max_retries", spiffe.DefaultMaxRetries) + + return fmt.Errorf("failed to get valid X509-SVID for server (SPIRE entry may not be synced yet): %w", svidErr) + } + + logger.Info("Successfully obtained valid X509-SVID for server", "spiffe_id", svid.ID.String()) + + // Wrap x509Src with retry logic so GetX509SVID() calls during TLS handshake also retry + // This is critical because grpccredentials.MTLSServerCredentials calls GetX509SVID() + // during the actual TLS handshake, not just during setup. Without this wrapper, + // the TLS handshake may fail if the certificate doesn't have a URI SAN at that moment. + wrappedX509Src := spiffe.NewX509SourceWithRetry( + x509Src, + x509Src, + logger, + spiffe.DefaultMaxRetries, + spiffe.DefaultInitialBackoff, + spiffe.DefaultMaxBackoff, + ) + + logger.Debug("Created X509SourceWithRetry wrapper for server", + "wrapped_type", fmt.Sprintf("%T", wrappedX509Src), + "src_type", fmt.Sprintf("%T", x509Src)) + + // Create a new Bundle source which periodically refetches SPIFFE bundles. + // Required when running Federation. + bundleSrc, err := workloadapi.NewBundleSource(ctx, workloadapi.WithClient(s.client)) + if err != nil { + _ = x509Src.Close() + + return fmt.Errorf("failed to create bundle source: %w", err) + } + + s.x509Src = wrappedX509Src // Store wrapped source for use in GetServerOptions + s.x509Closer = x509Src // Store original source for cleanup + s.bundleSrc = bundleSrc + + return nil +} + +// GetServerOptions returns gRPC server options for authentication. +func (s *Service) GetServerOptions() []grpc.ServerOption { + switch s.mode { + case config.AuthModeJWT: + // JWT mode: Server presents X.509-SVID via TLS, clients authenticate with JWT-SVID + return []grpc.ServerOption{ + grpc.Creds( + grpccredentials.TLSServerCredentials(s.x509Src), + ), + grpc.ChainUnaryInterceptor(JWTUnaryInterceptor(s.jwtSource, s.audiences)), + grpc.ChainStreamInterceptor(JWTStreamInterceptor(s.jwtSource, s.audiences)), + } + + case config.AuthModeX509: + return []grpc.ServerOption{ + grpc.Creds( + grpccredentials.MTLSServerCredentials(s.x509Src, s.bundleSrc, tlsconfig.AuthorizeAny()), + ), + grpc.ChainUnaryInterceptor(X509UnaryInterceptor()), + grpc.ChainStreamInterceptor(X509StreamInterceptor()), + } + + default: + logger.Error("Unsupported auth mode", "mode", s.mode) + + return []grpc.ServerOption{} + } +} + +// Stop closes the workload API client and all sources. +// +//nolint:wrapcheck +func (s *Service) Stop() error { + if s.jwtSource != nil { + if err := s.jwtSource.Close(); err != nil { + logger.Error("Failed to close JWT source", "error", err) + } + } + + if s.x509Closer != nil { + if err := s.x509Closer.Close(); err != nil { + logger.Error("Failed to close X509 source", "error", err) + } + } + + if s.bundleSrc != nil { + if err := s.bundleSrc.Close(); err != nil { + logger.Error("Failed to close bundle source", "error", err) + } + } + + return s.client.Close() +} diff --git a/server/authn/x509.go b/server/authn/x509.go index 3413d1a55..f91843d1c 100644 --- a/server/authn/x509.go +++ b/server/authn/x509.go @@ -1,89 +1,89 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package authn - -import ( - "context" - - "github.com/agntcy/dir/server/healthcheck" - "github.com/spiffe/go-spiffe/v2/spiffegrpc/grpccredentials" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// NewX509Interceptor returns a gRPC interceptor that extracts the SPIFFE ID from X.509 peer -// and adds it to the context for downstream authorization checks. -func NewX509Interceptor() X509InterceptorFn { - return func(ctx context.Context) (context.Context, error) { - // Extract SPIFFE ID from X.509 peer certificate - sid, ok := grpccredentials.PeerIDFromContext(ctx) - if !ok { - logger.Error("X.509 authentication failed: no peer ID in context") - - return ctx, status.Error(codes.Unauthenticated, "not authenticated via X.509") - } - - logger.Debug("X.509 authentication successful", - "spiffe_id", sid.String(), - "trust_domain", sid.TrustDomain().String(), - ) - - // Store the SPIFFE ID in context using the same approach as JWT - return context.WithValue(ctx, SpiffeIDContextKey, sid), nil - } -} - -// X509InterceptorFn is a function that performs X.509 authentication. -type X509InterceptorFn func(ctx context.Context) (context.Context, error) - -// x509UnaryInterceptorFor wraps the X.509 interceptor function for unary RPCs. -func x509UnaryInterceptorFor(fn X509InterceptorFn) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { - // Skip authentication for health check endpoints - if healthcheck.IsHealthCheckEndpoint(info.FullMethod) { - return handler(ctx, req) - } - - newCtx, err := fn(ctx) - if err != nil { - return nil, err - } - - return handler(newCtx, req) - } -} - -// x509StreamInterceptorFor wraps the X.509 interceptor function for stream RPCs. -func x509StreamInterceptorFor(fn X509InterceptorFn) grpc.StreamServerInterceptor { - return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - // Skip authentication for health check endpoints - if healthcheck.IsHealthCheckEndpoint(info.FullMethod) { - return handler(srv, ss) - } - - newCtx, err := fn(ss.Context()) - if err != nil { - return err - } - - // Create a wrapped stream with the new context - wrappedStream := &wrappedServerStream{ - ServerStream: ss, - ctx: newCtx, - } - - return handler(srv, wrappedStream) - } -} - -// X509UnaryInterceptor is a convenience wrapper for X.509 unary authentication. -func X509UnaryInterceptor() grpc.UnaryServerInterceptor { - return x509UnaryInterceptorFor(NewX509Interceptor()) -} - -// X509StreamInterceptor is a convenience wrapper for X.509 stream authentication. -func X509StreamInterceptor() grpc.StreamServerInterceptor { - return x509StreamInterceptorFor(NewX509Interceptor()) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package authn + +import ( + "context" + + "github.com/agntcy/dir/server/healthcheck" + "github.com/spiffe/go-spiffe/v2/spiffegrpc/grpccredentials" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// NewX509Interceptor returns a gRPC interceptor that extracts the SPIFFE ID from X.509 peer +// and adds it to the context for downstream authorization checks. +func NewX509Interceptor() X509InterceptorFn { + return func(ctx context.Context) (context.Context, error) { + // Extract SPIFFE ID from X.509 peer certificate + sid, ok := grpccredentials.PeerIDFromContext(ctx) + if !ok { + logger.Error("X.509 authentication failed: no peer ID in context") + + return ctx, status.Error(codes.Unauthenticated, "not authenticated via X.509") + } + + logger.Debug("X.509 authentication successful", + "spiffe_id", sid.String(), + "trust_domain", sid.TrustDomain().String(), + ) + + // Store the SPIFFE ID in context using the same approach as JWT + return context.WithValue(ctx, SpiffeIDContextKey, sid), nil + } +} + +// X509InterceptorFn is a function that performs X.509 authentication. +type X509InterceptorFn func(ctx context.Context) (context.Context, error) + +// x509UnaryInterceptorFor wraps the X.509 interceptor function for unary RPCs. +func x509UnaryInterceptorFor(fn X509InterceptorFn) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { + // Skip authentication for health check endpoints + if healthcheck.IsHealthCheckEndpoint(info.FullMethod) { + return handler(ctx, req) + } + + newCtx, err := fn(ctx) + if err != nil { + return nil, err + } + + return handler(newCtx, req) + } +} + +// x509StreamInterceptorFor wraps the X.509 interceptor function for stream RPCs. +func x509StreamInterceptorFor(fn X509InterceptorFn) grpc.StreamServerInterceptor { + return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + // Skip authentication for health check endpoints + if healthcheck.IsHealthCheckEndpoint(info.FullMethod) { + return handler(srv, ss) + } + + newCtx, err := fn(ss.Context()) + if err != nil { + return err + } + + // Create a wrapped stream with the new context + wrappedStream := &wrappedServerStream{ + ServerStream: ss, + ctx: newCtx, + } + + return handler(srv, wrappedStream) + } +} + +// X509UnaryInterceptor is a convenience wrapper for X.509 unary authentication. +func X509UnaryInterceptor() grpc.UnaryServerInterceptor { + return x509UnaryInterceptorFor(NewX509Interceptor()) +} + +// X509StreamInterceptor is a convenience wrapper for X.509 stream authentication. +func X509StreamInterceptor() grpc.StreamServerInterceptor { + return x509StreamInterceptorFor(NewX509Interceptor()) +} diff --git a/server/authz/authorizer.go b/server/authz/authorizer.go index 1d2d78451..110f2cda9 100644 --- a/server/authz/authorizer.go +++ b/server/authz/authorizer.go @@ -1,78 +1,78 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package authz - -import ( - _ "embed" - "fmt" - - storev1 "github.com/agntcy/dir/api/store/v1" - "github.com/agntcy/dir/server/authz/config" - "github.com/casbin/casbin/v2" - "github.com/casbin/casbin/v2/model" -) - -// Defines the Casbin authorization model -// -//go:embed model.conf -var modelConf string - -// Defines the allowed external API methods that can be performed -// by users outside of our trust domain. -var allowedExternalAPIMethods = []string{ - storev1.StoreService_Pull_FullMethodName, // store: pull - storev1.StoreService_PullReferrer_FullMethodName, // store: pull referrer - storev1.StoreService_Lookup_FullMethodName, // store: lookup - storev1.SyncService_RequestRegistryCredentials_FullMethodName, // sync: negotiate -} - -type Authorizer struct { - enforcer *casbin.Enforcer -} - -// New creates a new Casbin-based Authorizer. -func NewAuthorizer(cfg config.Config) (*Authorizer, error) { - // Create model from string - model, err := model.NewModelFromString(modelConf) - if err != nil { - return nil, fmt.Errorf("failed to load model: %w", err) - } - - // Create authorization enforcer - enforcer, err := casbin.NewEnforcer(model) - if err != nil { - return nil, fmt.Errorf("failed to create enforcer: %w", err) - } - - // Add policies to the enforcer - if _, err := enforcer.AddPolicies(getPolicies(cfg)); err != nil { - return nil, fmt.Errorf("failed to add policies: %w", err) - } - - return &Authorizer{enforcer: enforcer}, nil -} - -// Authorize checks if the user in trust domain can perform a given API method. -// -//nolint:wrapcheck -func (a *Authorizer) Authorize(trustDomain, apiMethod string) (bool, error) { - return a.enforcer.Enforce(trustDomain, apiMethod) -} - -// getPolicies returns a list of authorization in the following form: -// - All API methods are allowed for users within our trust domain -// - Only specific API methods are allowed for users outside of the trust domain -func getPolicies(cfg config.Config) [][]string { - policies := [][]string{} - - // Allow all API methods for the trust domain - policies = append(policies, []string{cfg.TrustDomain, "*"}) - - // Allow only specific API methods for users outside of the trust domain - for _, method := range allowedExternalAPIMethods { - policies = append(policies, []string{"*", method}) - } - - return policies -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package authz + +import ( + _ "embed" + "fmt" + + storev1 "github.com/agntcy/dir/api/store/v1" + "github.com/agntcy/dir/server/authz/config" + "github.com/casbin/casbin/v2" + "github.com/casbin/casbin/v2/model" +) + +// Defines the Casbin authorization model +// +//go:embed model.conf +var modelConf string + +// Defines the allowed external API methods that can be performed +// by users outside of our trust domain. +var allowedExternalAPIMethods = []string{ + storev1.StoreService_Pull_FullMethodName, // store: pull + storev1.StoreService_PullReferrer_FullMethodName, // store: pull referrer + storev1.StoreService_Lookup_FullMethodName, // store: lookup + storev1.SyncService_RequestRegistryCredentials_FullMethodName, // sync: negotiate +} + +type Authorizer struct { + enforcer *casbin.Enforcer +} + +// New creates a new Casbin-based Authorizer. +func NewAuthorizer(cfg config.Config) (*Authorizer, error) { + // Create model from string + model, err := model.NewModelFromString(modelConf) + if err != nil { + return nil, fmt.Errorf("failed to load model: %w", err) + } + + // Create authorization enforcer + enforcer, err := casbin.NewEnforcer(model) + if err != nil { + return nil, fmt.Errorf("failed to create enforcer: %w", err) + } + + // Add policies to the enforcer + if _, err := enforcer.AddPolicies(getPolicies(cfg)); err != nil { + return nil, fmt.Errorf("failed to add policies: %w", err) + } + + return &Authorizer{enforcer: enforcer}, nil +} + +// Authorize checks if the user in trust domain can perform a given API method. +// +//nolint:wrapcheck +func (a *Authorizer) Authorize(trustDomain, apiMethod string) (bool, error) { + return a.enforcer.Enforce(trustDomain, apiMethod) +} + +// getPolicies returns a list of authorization in the following form: +// - All API methods are allowed for users within our trust domain +// - Only specific API methods are allowed for users outside of the trust domain +func getPolicies(cfg config.Config) [][]string { + policies := [][]string{} + + // Allow all API methods for the trust domain + policies = append(policies, []string{cfg.TrustDomain, "*"}) + + // Allow only specific API methods for users outside of the trust domain + for _, method := range allowedExternalAPIMethods { + policies = append(policies, []string{"*", method}) + } + + return policies +} diff --git a/server/authz/authorizer_test.go b/server/authz/authorizer_test.go index ed3fc3f13..ad8905cdf 100644 --- a/server/authz/authorizer_test.go +++ b/server/authz/authorizer_test.go @@ -1,50 +1,50 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package authz - -import ( - "testing" - - routingv1 "github.com/agntcy/dir/api/routing/v1" - storev1 "github.com/agntcy/dir/api/store/v1" - "github.com/agntcy/dir/server/authz/config" -) - -func TestAuthorizer(t *testing.T) { - authz, err := NewAuthorizer(config.Config{ - TrustDomain: "dir.com", - }) - if err != nil { - t.Fatalf("failed to create Casbin authorizer: %v", err) - } - - tests := []struct { - trustDomain string - apiMethod string - allow bool - }{ - // dir.com: all ops allowed - {"dir.com", storev1.StoreService_Delete_FullMethodName, true}, - {"dir.com", storev1.StoreService_Push_FullMethodName, true}, - {"dir.com", routingv1.RoutingService_Publish_FullMethodName, true}, - - // anyone else: only pull/lookup/sync - {"other.com", storev1.StoreService_Pull_FullMethodName, true}, - {"other.com", storev1.StoreService_Lookup_FullMethodName, true}, - {"other.com", storev1.SyncService_RequestRegistryCredentials_FullMethodName, true}, - {"other.com", storev1.StoreService_Push_FullMethodName, false}, - {"other.com", routingv1.RoutingService_Publish_FullMethodName, false}, - } - - for _, tt := range tests { - allowed, err := authz.Authorize(tt.trustDomain, tt.apiMethod) - if err != nil { - t.Errorf("Authorize() error: %v", err) - } - - if allowed != tt.allow { - t.Errorf("Authorize(%q, %q) = %v, want %v", tt.trustDomain, tt.apiMethod, allowed, tt.allow) - } - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package authz + +import ( + "testing" + + routingv1 "github.com/agntcy/dir/api/routing/v1" + storev1 "github.com/agntcy/dir/api/store/v1" + "github.com/agntcy/dir/server/authz/config" +) + +func TestAuthorizer(t *testing.T) { + authz, err := NewAuthorizer(config.Config{ + TrustDomain: "dir.com", + }) + if err != nil { + t.Fatalf("failed to create Casbin authorizer: %v", err) + } + + tests := []struct { + trustDomain string + apiMethod string + allow bool + }{ + // dir.com: all ops allowed + {"dir.com", storev1.StoreService_Delete_FullMethodName, true}, + {"dir.com", storev1.StoreService_Push_FullMethodName, true}, + {"dir.com", routingv1.RoutingService_Publish_FullMethodName, true}, + + // anyone else: only pull/lookup/sync + {"other.com", storev1.StoreService_Pull_FullMethodName, true}, + {"other.com", storev1.StoreService_Lookup_FullMethodName, true}, + {"other.com", storev1.SyncService_RequestRegistryCredentials_FullMethodName, true}, + {"other.com", storev1.StoreService_Push_FullMethodName, false}, + {"other.com", routingv1.RoutingService_Publish_FullMethodName, false}, + } + + for _, tt := range tests { + allowed, err := authz.Authorize(tt.trustDomain, tt.apiMethod) + if err != nil { + t.Errorf("Authorize() error: %v", err) + } + + if allowed != tt.allow { + t.Errorf("Authorize(%q, %q) = %v, want %v", tt.trustDomain, tt.apiMethod, allowed, tt.allow) + } + } +} diff --git a/server/authz/config/config.go b/server/authz/config/config.go index 1d9936d0e..a11303df7 100644 --- a/server/authz/config/config.go +++ b/server/authz/config/config.go @@ -1,30 +1,30 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package config - -import "errors" - -// Config contains configuration for authorization (AuthZ) services. -// Authorization is separate from authentication (AuthN) - it receives -// an authenticated SPIFFE ID from the context and makes policy decisions. -type Config struct { - // Indicates if authorization is enabled - Enabled bool `json:"enabled,omitempty" mapstructure:"enabled"` - - // Trust domain for this Directory server - // Used to distinguish internal vs external requests - TrustDomain string `json:"trust_domain,omitempty" mapstructure:"trust_domain"` -} - -func (c *Config) Validate() error { - if !c.Enabled { - return nil - } - - if c.TrustDomain == "" { - return errors.New("trust domain is required for authorization") - } - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package config + +import "errors" + +// Config contains configuration for authorization (AuthZ) services. +// Authorization is separate from authentication (AuthN) - it receives +// an authenticated SPIFFE ID from the context and makes policy decisions. +type Config struct { + // Indicates if authorization is enabled + Enabled bool `json:"enabled,omitempty" mapstructure:"enabled"` + + // Trust domain for this Directory server + // Used to distinguish internal vs external requests + TrustDomain string `json:"trust_domain,omitempty" mapstructure:"trust_domain"` +} + +func (c *Config) Validate() error { + if !c.Enabled { + return nil + } + + if c.TrustDomain == "" { + return errors.New("trust domain is required for authorization") + } + + return nil +} diff --git a/server/authz/interceptor.go b/server/authz/interceptor.go index 7e180ad5b..e99f647f9 100644 --- a/server/authz/interceptor.go +++ b/server/authz/interceptor.go @@ -1,96 +1,96 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package authz - -import ( - "context" - "fmt" - - "github.com/agntcy/dir/server/authn" - "github.com/agntcy/dir/server/healthcheck" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type InterceptorFn func(ctx context.Context, apiMethod string) error - -// NewInterceptor returns a gRPC interceptor that performs authorization checks. -// It expects the SPIFFE ID to already be in the context (set by the authn interceptor). -// -//nolint:wrapcheck -func NewInterceptor(authorizer *Authorizer) InterceptorFn { - return func(ctx context.Context, apiMethod string) error { - // Get SPIFFE ID from context (set by authentication interceptor) - sid, ok := authn.SpiffeIDFromContext(ctx) - if !ok { - logger.Error("Authorization failed: no SPIFFE ID in context", "method", apiMethod) - - return status.Error(codes.Unauthenticated, "not authenticated") - } - - trustDomain := sid.TrustDomain().String() - - // Perform authorization check - allowed, err := authorizer.Authorize(trustDomain, apiMethod) - if err != nil { - logger.Error("Authorization error", - "error", err, - "method", apiMethod, - "trust_domain", trustDomain, - "spiffe_id", sid.String(), - ) - - return status.Error(codes.Internal, fmt.Sprintf("something went wrong: %v", err)) - } - - if !allowed { - logger.Warn("Authorization denied", - "method", apiMethod, - "trust_domain", trustDomain, - "spiffe_id", sid.String(), - ) - - return status.Error(codes.PermissionDenied, "not allowed to access "+apiMethod) - } - - logger.Debug("Authorization successful", - "method", apiMethod, - "trust_domain", trustDomain, - "spiffe_id", sid.String(), - ) - - return nil - } -} - -func UnaryInterceptorFor(fn InterceptorFn) func(context.Context, any, *grpc.UnaryServerInfo, grpc.UnaryHandler) (any, error) { - return func(ctx context.Context, req any, sInfo *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { - // Skip authorization for health check endpoints - if healthcheck.IsHealthCheckEndpoint(sInfo.FullMethod) { - return handler(ctx, req) - } - - if err := fn(ctx, sInfo.FullMethod); err != nil { - return nil, err - } - - return handler(ctx, req) - } -} - -func StreamInterceptorFor(fn InterceptorFn) func(any, grpc.ServerStream, *grpc.StreamServerInfo, grpc.StreamHandler) error { - return func(srv any, ss grpc.ServerStream, sInfo *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - // Skip authorization for health check endpoints - if healthcheck.IsHealthCheckEndpoint(sInfo.FullMethod) { - return handler(srv, ss) - } - - if err := fn(ss.Context(), sInfo.FullMethod); err != nil { - return err - } - - return handler(srv, ss) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package authz + +import ( + "context" + "fmt" + + "github.com/agntcy/dir/server/authn" + "github.com/agntcy/dir/server/healthcheck" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type InterceptorFn func(ctx context.Context, apiMethod string) error + +// NewInterceptor returns a gRPC interceptor that performs authorization checks. +// It expects the SPIFFE ID to already be in the context (set by the authn interceptor). +// +//nolint:wrapcheck +func NewInterceptor(authorizer *Authorizer) InterceptorFn { + return func(ctx context.Context, apiMethod string) error { + // Get SPIFFE ID from context (set by authentication interceptor) + sid, ok := authn.SpiffeIDFromContext(ctx) + if !ok { + logger.Error("Authorization failed: no SPIFFE ID in context", "method", apiMethod) + + return status.Error(codes.Unauthenticated, "not authenticated") + } + + trustDomain := sid.TrustDomain().String() + + // Perform authorization check + allowed, err := authorizer.Authorize(trustDomain, apiMethod) + if err != nil { + logger.Error("Authorization error", + "error", err, + "method", apiMethod, + "trust_domain", trustDomain, + "spiffe_id", sid.String(), + ) + + return status.Error(codes.Internal, fmt.Sprintf("something went wrong: %v", err)) + } + + if !allowed { + logger.Warn("Authorization denied", + "method", apiMethod, + "trust_domain", trustDomain, + "spiffe_id", sid.String(), + ) + + return status.Error(codes.PermissionDenied, "not allowed to access "+apiMethod) + } + + logger.Debug("Authorization successful", + "method", apiMethod, + "trust_domain", trustDomain, + "spiffe_id", sid.String(), + ) + + return nil + } +} + +func UnaryInterceptorFor(fn InterceptorFn) func(context.Context, any, *grpc.UnaryServerInfo, grpc.UnaryHandler) (any, error) { + return func(ctx context.Context, req any, sInfo *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { + // Skip authorization for health check endpoints + if healthcheck.IsHealthCheckEndpoint(sInfo.FullMethod) { + return handler(ctx, req) + } + + if err := fn(ctx, sInfo.FullMethod); err != nil { + return nil, err + } + + return handler(ctx, req) + } +} + +func StreamInterceptorFor(fn InterceptorFn) func(any, grpc.ServerStream, *grpc.StreamServerInfo, grpc.StreamHandler) error { + return func(srv any, ss grpc.ServerStream, sInfo *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + // Skip authorization for health check endpoints + if healthcheck.IsHealthCheckEndpoint(sInfo.FullMethod) { + return handler(srv, ss) + } + + if err := fn(ss.Context(), sInfo.FullMethod); err != nil { + return err + } + + return handler(srv, ss) + } +} diff --git a/server/authz/model.conf b/server/authz/model.conf index fb43c92d8..ae956d9ab 100644 --- a/server/authz/model.conf +++ b/server/authz/model.conf @@ -1,11 +1,11 @@ -[request_definition] -r = trust_domain, api_method - -[policy_definition] -p = trust_domain, api_method - -[policy_effect] -e = some(where (p.eft == allow)) - -[matchers] -m = (keyMatch(r.trust_domain, p.trust_domain) || regexMatch(r.trust_domain, p.trust_domain)) && (keyMatch(r.api_method, p.api_method) || regexMatch(r.api_method, p.api_method)) +[request_definition] +r = trust_domain, api_method + +[policy_definition] +p = trust_domain, api_method + +[policy_effect] +e = some(where (p.eft == allow)) + +[matchers] +m = (keyMatch(r.trust_domain, p.trust_domain) || regexMatch(r.trust_domain, p.trust_domain)) && (keyMatch(r.api_method, p.api_method) || regexMatch(r.api_method, p.api_method)) diff --git a/server/authz/service.go b/server/authz/service.go index dee0e8ac2..1145d0e4b 100644 --- a/server/authz/service.go +++ b/server/authz/service.go @@ -1,56 +1,56 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package authz - -import ( - "context" - "fmt" - - "github.com/agntcy/dir/server/authz/config" - "github.com/agntcy/dir/utils/logging" - "google.golang.org/grpc" -) - -var logger = logging.Logger("authz") - -// Service manages authorization policy enforcement. -// It expects authentication to be handled separately by the authn service, -// which will provide the SPIFFE ID in the context. -type Service struct { - authorizer *Authorizer -} - -// New creates a new authorization service. -func New(_ context.Context, cfg config.Config) (*Service, error) { - // Validate - if err := cfg.Validate(); err != nil { - return nil, fmt.Errorf("invalid authz config: %w", err) - } - - // Create authorizer - authorizer, err := NewAuthorizer(cfg) - if err != nil { - return nil, fmt.Errorf("failed to create authorizer: %w", err) - } - - logger.Info("Authorization service initialized", "trust_domain", cfg.TrustDomain) - - return &Service{ - authorizer: authorizer, - }, nil -} - -// GetServerOptions returns gRPC server options for authorization. -func (s *Service) GetServerOptions() []grpc.ServerOption { - return []grpc.ServerOption{ - grpc.ChainUnaryInterceptor(UnaryInterceptorFor(NewInterceptor(s.authorizer))), - grpc.ChainStreamInterceptor(StreamInterceptorFor(NewInterceptor(s.authorizer))), - } -} - -// Stop closes any resources used by the authorization service. -func (s *Service) Stop() error { - // No resources to clean up in the current implementation - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package authz + +import ( + "context" + "fmt" + + "github.com/agntcy/dir/server/authz/config" + "github.com/agntcy/dir/utils/logging" + "google.golang.org/grpc" +) + +var logger = logging.Logger("authz") + +// Service manages authorization policy enforcement. +// It expects authentication to be handled separately by the authn service, +// which will provide the SPIFFE ID in the context. +type Service struct { + authorizer *Authorizer +} + +// New creates a new authorization service. +func New(_ context.Context, cfg config.Config) (*Service, error) { + // Validate + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("invalid authz config: %w", err) + } + + // Create authorizer + authorizer, err := NewAuthorizer(cfg) + if err != nil { + return nil, fmt.Errorf("failed to create authorizer: %w", err) + } + + logger.Info("Authorization service initialized", "trust_domain", cfg.TrustDomain) + + return &Service{ + authorizer: authorizer, + }, nil +} + +// GetServerOptions returns gRPC server options for authorization. +func (s *Service) GetServerOptions() []grpc.ServerOption { + return []grpc.ServerOption{ + grpc.ChainUnaryInterceptor(UnaryInterceptorFor(NewInterceptor(s.authorizer))), + grpc.ChainStreamInterceptor(StreamInterceptorFor(NewInterceptor(s.authorizer))), + } +} + +// Stop closes any resources used by the authorization service. +func (s *Service) Stop() error { + // No resources to clean up in the current implementation + return nil +} diff --git a/server/cmd/main.go b/server/cmd/main.go index 1a8352b67..82a8ad0e3 100644 --- a/server/cmd/main.go +++ b/server/cmd/main.go @@ -1,30 +1,30 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package main - -import ( - "fmt" - - "github.com/agntcy/dir/server" - "github.com/agntcy/dir/server/config" - "github.com/spf13/cobra" -) - -var rootCmd = &cobra.Command{ - Use: "server", - Short: "Run a server for the Directory services.", - Long: "Run a server for the Directory services.", - RunE: func(cmd *cobra.Command, _ []string) error { - cfg, err := config.LoadConfig() - if err != nil { - return fmt.Errorf("failed to load config: %w", err) - } - - return server.Run(cmd.Context(), cfg) - }, -} - -func main() { - cobra.CheckErr(rootCmd.Execute()) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "fmt" + + "github.com/agntcy/dir/server" + "github.com/agntcy/dir/server/config" + "github.com/spf13/cobra" +) + +var rootCmd = &cobra.Command{ + Use: "server", + Short: "Run a server for the Directory services.", + Long: "Run a server for the Directory services.", + RunE: func(cmd *cobra.Command, _ []string) error { + cfg, err := config.LoadConfig() + if err != nil { + return fmt.Errorf("failed to load config: %w", err) + } + + return server.Run(cmd.Context(), cfg) + }, +} + +func main() { + cobra.CheckErr(rootCmd.Execute()) +} diff --git a/server/config/config.go b/server/config/config.go index 5f62c076d..791a7849c 100644 --- a/server/config/config.go +++ b/server/config/config.go @@ -1,552 +1,552 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "errors" - "fmt" - "strings" - "time" - - authn "github.com/agntcy/dir/server/authn/config" - authz "github.com/agntcy/dir/server/authz/config" - database "github.com/agntcy/dir/server/database/config" - sqliteconfig "github.com/agntcy/dir/server/database/sqlite/config" - events "github.com/agntcy/dir/server/events/config" - ratelimitconfig "github.com/agntcy/dir/server/middleware/ratelimit/config" - publication "github.com/agntcy/dir/server/publication/config" - routing "github.com/agntcy/dir/server/routing/config" - store "github.com/agntcy/dir/server/store/config" - oci "github.com/agntcy/dir/server/store/oci/config" - sync "github.com/agntcy/dir/server/sync/config" - syncmonitor "github.com/agntcy/dir/server/sync/monitor/config" - "github.com/agntcy/dir/utils/logging" - "github.com/mitchellh/mapstructure" - "github.com/spf13/viper" -) - -const ( - // Config params. - - DefaultEnvPrefix = "DIRECTORY_SERVER" - DefaultConfigName = "server.config" - DefaultConfigType = "yml" - DefaultConfigPath = "/etc/agntcy/dir" - - // API configuration. - - DefaultListenAddress = "0.0.0.0:8888" - - // OASF Validation configuration. - - // DefaultSchemaURL is the default OASF schema URL for API-based validation. - // When set, records will be validated using the OASF API validator instead of embedded schemas. - DefaultSchemaURL = "https://schema.oasf.outshift.com" - - // Connection management configuration. - // These defaults are based on production gRPC best practices and provide - // a balance between resource efficiency and connection stability. - - // DefaultMaxConcurrentStreams limits concurrent RPC streams per connection. - // This prevents a single connection from monopolizing server resources. - // Value: 1000 is industry standard, sufficient for most clients. - DefaultMaxConcurrentStreams = 1000 - - // DefaultMaxRecvMsgSize limits maximum received message size (4 MB). - // Protects against memory exhaustion from large messages. - // Value: 4 MB covers 99% of OCI artifacts and metadata. - DefaultMaxRecvMsgSize = 4 * 1024 * 1024 - - // DefaultMaxSendMsgSize limits maximum sent message size (4 MB). - // Value: 4 MB matches receive limit for consistency. - DefaultMaxSendMsgSize = 4 * 1024 * 1024 - - // DefaultConnectionTimeout limits time for connection establishment (120 seconds). - // Prevents hanging connection attempts from slow clients. - // Value: 2 minutes allows for slow networks without wasting resources. - DefaultConnectionTimeout = 120 * time.Second - - // DefaultMaxConnectionIdle closes idle connections after this duration (15 minutes). - // An idle connection has no active RPC streams. - // Value: 15 minutes balances resource cleanup vs connection churn. - DefaultMaxConnectionIdle = 15 * time.Minute - - // DefaultMaxConnectionAge forces connection rotation after this duration (30 minutes). - // Prevents long-lived connections from accumulating issues. - // Value: 30 minutes ensures regular TLS session rotation for security. - DefaultMaxConnectionAge = 30 * time.Minute - - // DefaultMaxConnectionAgeGrace is grace period after MaxConnectionAge (5 minutes). - // Allows inflight RPCs to complete before force-closing connection. - // Value: 5 minutes provides sufficient time for most operations. - DefaultMaxConnectionAgeGrace = 5 * time.Minute - - // DefaultKeepaliveTime is interval for sending keepalive pings (5 minutes). - // Detects dead connections when client crashes or network partitions. - // Value: 5 minutes detects issues fast without excessive traffic. - DefaultKeepaliveTime = 5 * time.Minute - - // DefaultKeepaliveTimeout is wait time for keepalive ping response (1 minute). - // Connection is closed if no pong received within this timeout. - // Value: 1 minute allows for network delays without long waits. - DefaultKeepaliveTimeout = 1 * time.Minute - - // DefaultMinTime is minimum time between client keepalive pings (1 minute). - // Prevents clients from abusing keepalive by sending excessive pings. - // Value: 1 minute prevents abuse while allowing reasonable client detection. - DefaultMinTime = 1 * time.Minute - - // DefaultPermitWithoutStream allows keepalive pings without active streams. - // Enables clients to detect dead connections even when idle. - // Value: true provides better connection health detection. - DefaultPermitWithoutStream = true - - // Metrics configuration. - - // DefaultMetricsEnabled enables Prometheus metrics collection. - DefaultMetricsEnabled = true - - // DefaultMetricsAddress is the default listen address for the metrics HTTP server. - DefaultMetricsAddress = ":9090" -) - -var logger = logging.Logger("config") - -type Config struct { - // API configuration - ListenAddress string `json:"listen_address,omitempty" mapstructure:"listen_address"` - - // OASF Validation configuration - OASFAPIValidation OASFAPIValidationConfig `json:"oasf_api_validation,omitempty" mapstructure:"oasf_api_validation"` - - // Logging configuration - Logging LoggingConfig `json:"logging,omitempty" mapstructure:"logging"` - - // Connection management configuration - Connection ConnectionConfig `json:"connection,omitempty" mapstructure:"connection"` - - // Rate limiting configuration - RateLimit ratelimitconfig.Config `json:"ratelimit,omitempty" mapstructure:"ratelimit"` - - // Authn configuration (JWT or X.509 authentication) - Authn authn.Config `json:"authn,omitempty" mapstructure:"authn"` - - // Authz configuration - Authz authz.Config `json:"authz,omitempty" mapstructure:"authz"` - - // Store configuration - Store store.Config `json:"store,omitempty" mapstructure:"store"` - - // Routing configuration - Routing routing.Config `json:"routing,omitempty" mapstructure:"routing"` - - // Database configuration - Database database.Config `json:"database,omitempty" mapstructure:"database"` - - // Sync configuration - Sync sync.Config `json:"sync,omitempty" mapstructure:"sync"` - - // Publication configuration - Publication publication.Config `json:"publication,omitempty" mapstructure:"publication"` - - // Events configuration - Events events.Config `json:"events,omitempty" mapstructure:"events"` - - // Metrics configuration - Metrics MetricsConfig `json:"metrics,omitempty" mapstructure:"metrics"` -} - -// OASFAPIValidationConfig defines OASF API validation configuration. -type OASFAPIValidationConfig struct { - // SchemaURL is the OASF schema URL for API-based validation. - // When set, records will be validated using the OASF API validator instead of embedded schemas. - // Default: https://schema.oasf.outshift.com - SchemaURL string `json:"schema_url,omitempty" mapstructure:"schema_url"` - - // Disable disables API validation and uses embedded schema validation instead. - // Default: false (uses API validation) - Disable bool `json:"disable,omitempty" mapstructure:"disable"` - - // StrictMode enables strict validation mode (fails on warnings). - // When false, uses lax validation mode (allows warnings, only fails on errors). - // Default: true (strict mode) - // Only applies when Disable is false - StrictMode bool `json:"strict_mode,omitempty" mapstructure:"strict_mode"` -} - -// LoggingConfig defines gRPC request/response logging configuration. -type LoggingConfig struct { - // Verbose enables verbose logging mode (includes request/response payloads). - // Default: false (production mode - logs only start/finish with metadata). - Verbose bool `json:"verbose,omitempty" mapstructure:"verbose"` -} - -// ConnectionConfig defines gRPC connection management configuration. -// These settings control connection lifecycle, resource limits, and keepalive behavior -// to prevent resource exhaustion and detect dead connections. -type ConnectionConfig struct { - // MaxConcurrentStreams limits concurrent RPCs per connection. - // Prevents a single connection from monopolizing server resources. - // Default: 1000 - MaxConcurrentStreams uint32 `json:"max_concurrent_streams,omitempty" mapstructure:"max_concurrent_streams"` - - // MaxRecvMsgSize limits the maximum message size the server can receive (in bytes). - // Protects against memory exhaustion from large messages. - // Default: 4194304 (4 MB) - MaxRecvMsgSize int `json:"max_recv_msg_size,omitempty" mapstructure:"max_recv_msg_size"` - - // MaxSendMsgSize limits the maximum message size the server can send (in bytes). - // Default: 4194304 (4 MB) - MaxSendMsgSize int `json:"max_send_msg_size,omitempty" mapstructure:"max_send_msg_size"` - - // ConnectionTimeout limits the time for connection establishment. - // Prevents hanging connection attempts from slow clients. - // Default: 120s (2 minutes) - ConnectionTimeout time.Duration `json:"connection_timeout,omitempty" mapstructure:"connection_timeout"` - - // Keepalive configuration for connection health management. - Keepalive KeepaliveConfig `json:"keepalive,omitempty" mapstructure:"keepalive"` -} - -// KeepaliveConfig defines keepalive parameters for connection health. -// Keepalive pings detect dead connections (client crash, network partition) -// and automatically close idle or aged connections to free resources. -type KeepaliveConfig struct { - // MaxConnectionIdle is the duration after which idle connections are closed. - // An idle connection has no active RPC streams. - // Default: 15m (15 minutes) - MaxConnectionIdle time.Duration `json:"max_connection_idle,omitempty" mapstructure:"max_connection_idle"` - - // MaxConnectionAge is the maximum duration a connection may exist. - // Forces connection rotation to prevent resource leaks and ensure TLS session rotation. - // Default: 30m (30 minutes) - MaxConnectionAge time.Duration `json:"max_connection_age,omitempty" mapstructure:"max_connection_age"` - - // MaxConnectionAgeGrace is the grace period after MaxConnectionAge - // to allow inflight RPCs to complete before force-closing the connection. - // Default: 5m (5 minutes) - MaxConnectionAgeGrace time.Duration `json:"max_connection_age_grace,omitempty" mapstructure:"max_connection_age_grace"` - - // Time is the duration after which a keepalive ping is sent - // on idle connections to check if the connection is still alive. - // Default: 5m (5 minutes) - Time time.Duration `json:"time,omitempty" mapstructure:"time"` - - // Timeout is the duration the server waits for a keepalive ping response. - // If no response is received, the connection is closed. - // Default: 1m (1 minute) - Timeout time.Duration `json:"timeout,omitempty" mapstructure:"timeout"` - - // MinTime is the minimum duration clients should wait between keepalive pings. - // Prevents clients from abusing keepalive by sending excessive pings. - // Default: 1m (1 minute) - MinTime time.Duration `json:"min_time,omitempty" mapstructure:"min_time"` - - // PermitWithoutStream allows clients to send keepalive pings - // even when there are no active RPC streams. - // Enables clients to detect dead connections proactively. - // Default: true - PermitWithoutStream bool `json:"permit_without_stream,omitempty" mapstructure:"permit_without_stream"` -} - -// MetricsConfig holds Prometheus metrics configuration. -type MetricsConfig struct { - // Enabled enables Prometheus metrics collection. - // Default: true - Enabled bool `json:"enabled,omitempty" mapstructure:"enabled"` - - // Address is the HTTP listen address for the metrics endpoint. - // The metrics server runs on a separate port from the gRPC server. - // Default: ":9090" - Address string `json:"address,omitempty" mapstructure:"address"` -} - -// DefaultConnectionConfig returns connection configuration with production-safe defaults. -// These defaults are based on industry best practices for production gRPC deployments -// and provide a balance between resource efficiency, connection stability, and security. -func DefaultConnectionConfig() ConnectionConfig { - return ConnectionConfig{ - MaxConcurrentStreams: DefaultMaxConcurrentStreams, - MaxRecvMsgSize: DefaultMaxRecvMsgSize, - MaxSendMsgSize: DefaultMaxSendMsgSize, - ConnectionTimeout: DefaultConnectionTimeout, - Keepalive: KeepaliveConfig{ - MaxConnectionIdle: DefaultMaxConnectionIdle, - MaxConnectionAge: DefaultMaxConnectionAge, - MaxConnectionAgeGrace: DefaultMaxConnectionAgeGrace, - Time: DefaultKeepaliveTime, - Timeout: DefaultKeepaliveTimeout, - MinTime: DefaultMinTime, - PermitWithoutStream: DefaultPermitWithoutStream, - }, - } -} - -// WithDefaults returns the connection configuration with defaults applied -// if the configuration is not set or has zero values. -// This method checks if MaxConcurrentStreams is 0 (indicating unset configuration) -// and returns the default configuration in that case. -func (c ConnectionConfig) WithDefaults() ConnectionConfig { - // If MaxConcurrentStreams is 0, the config was not set - use defaults - if c.MaxConcurrentStreams == 0 { - return DefaultConnectionConfig() - } - - return c -} - -func LoadConfig() (*Config, error) { - v := viper.NewWithOptions( - viper.KeyDelimiter("."), - viper.EnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")), - ) - - v.SetConfigName(DefaultConfigName) - v.SetConfigType(DefaultConfigType) - v.AddConfigPath(DefaultConfigPath) - - v.SetEnvPrefix(DefaultEnvPrefix) - v.AllowEmptyEnv(true) - v.AutomaticEnv() - - // Read the config file - if err := v.ReadInConfig(); err != nil { - fileNotFoundError := viper.ConfigFileNotFoundError{} - if errors.As(err, &fileNotFoundError) { - logger.Info("Config file not found, use defaults.") - } else { - return nil, fmt.Errorf("failed to read configuration file: %w", err) - } - } - - // - // API configuration - // - _ = v.BindEnv("listen_address") - v.SetDefault("listen_address", DefaultListenAddress) - - // - // OASF Validation configuration - // - _ = v.BindEnv("oasf_api_validation.schema_url") - v.SetDefault("oasf_api_validation.schema_url", DefaultSchemaURL) - - _ = v.BindEnv("oasf_api_validation.disable") - v.SetDefault("oasf_api_validation.disable", false) - - _ = v.BindEnv("oasf_api_validation.strict_mode") - v.SetDefault("oasf_api_validation.strict_mode", true) - - // - // Logging configuration (gRPC request/response logging) - // - _ = v.BindEnv("logging.verbose") - v.SetDefault("logging.verbose", false) - - // - // Rate limiting configuration - // - _ = v.BindEnv("ratelimit.enabled") - v.SetDefault("ratelimit.enabled", false) - - _ = v.BindEnv("ratelimit.global_rps") - v.SetDefault("ratelimit.global_rps", 0.0) - - _ = v.BindEnv("ratelimit.global_burst") - v.SetDefault("ratelimit.global_burst", 0) - - _ = v.BindEnv("ratelimit.per_client_rps") - v.SetDefault("ratelimit.per_client_rps", 0.0) - - _ = v.BindEnv("ratelimit.per_client_burst") - v.SetDefault("ratelimit.per_client_burst", 0) - - // Note: method_limits (per-method rate limit overrides) can only be configured - // via YAML/JSON config file due to its complex nested map structure. - // Environment variable configuration for method limits is not supported. - // Example config: - // ratelimit: - // method_limits: - // "/agntcy.dir.store.v1.StoreService/CreateRecord": - // rps: 50 - // burst: 100 - - // - // Authn configuration (authentication: JWT or X.509) - // - _ = v.BindEnv("authn.enabled") - v.SetDefault("authn.enabled", "false") - - _ = v.BindEnv("authn.mode") - v.SetDefault("authn.mode", "x509") - - _ = v.BindEnv("authn.socket_path") - v.SetDefault("authn.socket_path", "") - - _ = v.BindEnv("authn.audiences") - v.SetDefault("authn.audiences", "") - - // - // Authz configuration (authorization policies) - // - _ = v.BindEnv("authz.enabled") - v.SetDefault("authz.enabled", "false") - - _ = v.BindEnv("authz.trust_domain") - v.SetDefault("authz.trust_domain", "") - - // - // Store configuration - // - _ = v.BindEnv("store.provider") - v.SetDefault("store.provider", store.DefaultProvider) - - _ = v.BindEnv("store.oci.local_dir") - v.SetDefault("store.oci.local_dir", "") - - _ = v.BindEnv("store.oci.cache_dir") - v.SetDefault("store.oci.cache_dir", "") - - _ = v.BindEnv("store.oci.registry_address") - v.SetDefault("store.oci.registry_address", oci.DefaultRegistryAddress) - - _ = v.BindEnv("store.oci.repository_name") - v.SetDefault("store.oci.repository_name", oci.DefaultRepositoryName) - - _ = v.BindEnv("store.oci.auth_config.insecure") - v.SetDefault("store.oci.auth_config.insecure", oci.DefaultAuthConfigInsecure) - - _ = v.BindEnv("store.oci.auth_config.username") - _ = v.BindEnv("store.oci.auth_config.password") - _ = v.BindEnv("store.oci.auth_config.access_token") - _ = v.BindEnv("store.oci.auth_config.refresh_token") - - // - // Routing configuration - // - _ = v.BindEnv("routing.listen_address") - v.SetDefault("routing.listen_address", routing.DefaultListenAddress) - - _ = v.BindEnv("routing.directory_api_address") - v.SetDefault("routing.directory_api_address", "") - - _ = v.BindEnv("routing.bootstrap_peers") - v.SetDefault("routing.bootstrap_peers", strings.Join(routing.DefaultBootstrapPeers, ",")) - - _ = v.BindEnv("routing.key_path") - v.SetDefault("routing.key_path", "") - - _ = v.BindEnv("routing.datastore_dir") - v.SetDefault("routing.datastore_dir", "") - - // - // Routing GossipSub configuration - // Note: Only enable/disable is configurable. Protocol parameters (topic, message size) - // are hardcoded in server/routing/pubsub/constants.go for network compatibility. - // - _ = v.BindEnv("routing.gossipsub.enabled") - v.SetDefault("routing.gossipsub.enabled", routing.DefaultGossipSubEnabled) - - // - // Database configuration - // - _ = v.BindEnv("database.db_type") - v.SetDefault("database.db_type", database.DefaultDBType) - - _ = v.BindEnv("database.sqlite.db_path") - v.SetDefault("database.sqlite.db_path", sqliteconfig.DefaultSQLiteDBPath) - - // - // Sync configuration - // - - _ = v.BindEnv("sync.scheduler_interval") - v.SetDefault("sync.scheduler_interval", sync.DefaultSyncSchedulerInterval) - - _ = v.BindEnv("sync.worker_count") - v.SetDefault("sync.worker_count", sync.DefaultSyncWorkerCount) - - _ = v.BindEnv("sync.worker_timeout") - v.SetDefault("sync.worker_timeout", sync.DefaultSyncWorkerTimeout) - - _ = v.BindEnv("sync.registry_monitor.check_interval") - v.SetDefault("sync.registry_monitor.check_interval", syncmonitor.DefaultCheckInterval) - - _ = v.BindEnv("sync.auth_config.username") - _ = v.BindEnv("sync.auth_config.password") - - // - // Publication configuration - // - - _ = v.BindEnv("publication.scheduler_interval") - v.SetDefault("publication.scheduler_interval", publication.DefaultPublicationSchedulerInterval) - - _ = v.BindEnv("publication.worker_count") - v.SetDefault("publication.worker_count", publication.DefaultPublicationWorkerCount) - - _ = v.BindEnv("publication.worker_timeout") - v.SetDefault("publication.worker_timeout", publication.DefaultPublicationWorkerTimeout) - - // - // Events configuration - // - - _ = v.BindEnv("events.subscriber_buffer_size") - v.SetDefault("events.subscriber_buffer_size", events.DefaultSubscriberBufferSize) - - _ = v.BindEnv("events.log_slow_consumers") - v.SetDefault("events.log_slow_consumers", events.DefaultLogSlowConsumers) - - _ = v.BindEnv("events.log_published_events") - v.SetDefault("events.log_published_events", events.DefaultLogPublishedEvents) - - // - // Metrics configuration - // - _ = v.BindEnv("metrics.enabled") - v.SetDefault("metrics.enabled", DefaultMetricsEnabled) - - _ = v.BindEnv("metrics.address") - v.SetDefault("metrics.address", DefaultMetricsAddress) - - // - // Connection management configuration - // - // Design Decision: No environment variables for connection management. - // Rationale: - // - 11 env vars would be too many and too technical for most users - // - Production-safe defaults work for 90% of deployments - // - Advanced users can use YAML config file for fine-grained control - // - Follows industry best practices (Kubernetes, Prometheus, etc.) - // - // For advanced configuration, use YAML config file: - // connection: - // max_concurrent_streams: 2000 - // max_recv_msg_size: 8388608 # 8 MB - // keepalive: - // max_connection_idle: 10m - // # ... other settings - // - // No viper defaults needed - defaults are applied via ConnectionConfig.WithDefaults() - // after loading to ensure clean separation between loading and defaulting logic. - - // Load configuration into struct - decodeHooks := mapstructure.ComposeDecodeHookFunc( - mapstructure.TextUnmarshallerHookFunc(), - mapstructure.StringToTimeDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - ) - - config := &Config{} - if err := v.Unmarshal(config, viper.DecodeHook(decodeHooks)); err != nil { - return nil, fmt.Errorf("failed to load configuration: %w", err) - } - - // Apply connection management defaults if not configured - // This happens after unmarshal so YAML config takes precedence over defaults - config.Connection = config.Connection.WithDefaults() - - return config, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "errors" + "fmt" + "strings" + "time" + + authn "github.com/agntcy/dir/server/authn/config" + authz "github.com/agntcy/dir/server/authz/config" + database "github.com/agntcy/dir/server/database/config" + sqliteconfig "github.com/agntcy/dir/server/database/sqlite/config" + events "github.com/agntcy/dir/server/events/config" + ratelimitconfig "github.com/agntcy/dir/server/middleware/ratelimit/config" + publication "github.com/agntcy/dir/server/publication/config" + routing "github.com/agntcy/dir/server/routing/config" + store "github.com/agntcy/dir/server/store/config" + oci "github.com/agntcy/dir/server/store/oci/config" + sync "github.com/agntcy/dir/server/sync/config" + syncmonitor "github.com/agntcy/dir/server/sync/monitor/config" + "github.com/agntcy/dir/utils/logging" + "github.com/mitchellh/mapstructure" + "github.com/spf13/viper" +) + +const ( + // Config params. + + DefaultEnvPrefix = "DIRECTORY_SERVER" + DefaultConfigName = "server.config" + DefaultConfigType = "yml" + DefaultConfigPath = "/etc/agntcy/dir" + + // API configuration. + + DefaultListenAddress = "0.0.0.0:8888" + + // OASF Validation configuration. + + // DefaultSchemaURL is the default OASF schema URL for API-based validation. + // When set, records will be validated using the OASF API validator instead of embedded schemas. + DefaultSchemaURL = "https://schema.oasf.outshift.com" + + // Connection management configuration. + // These defaults are based on production gRPC best practices and provide + // a balance between resource efficiency and connection stability. + + // DefaultMaxConcurrentStreams limits concurrent RPC streams per connection. + // This prevents a single connection from monopolizing server resources. + // Value: 1000 is industry standard, sufficient for most clients. + DefaultMaxConcurrentStreams = 1000 + + // DefaultMaxRecvMsgSize limits maximum received message size (4 MB). + // Protects against memory exhaustion from large messages. + // Value: 4 MB covers 99% of OCI artifacts and metadata. + DefaultMaxRecvMsgSize = 4 * 1024 * 1024 + + // DefaultMaxSendMsgSize limits maximum sent message size (4 MB). + // Value: 4 MB matches receive limit for consistency. + DefaultMaxSendMsgSize = 4 * 1024 * 1024 + + // DefaultConnectionTimeout limits time for connection establishment (120 seconds). + // Prevents hanging connection attempts from slow clients. + // Value: 2 minutes allows for slow networks without wasting resources. + DefaultConnectionTimeout = 120 * time.Second + + // DefaultMaxConnectionIdle closes idle connections after this duration (15 minutes). + // An idle connection has no active RPC streams. + // Value: 15 minutes balances resource cleanup vs connection churn. + DefaultMaxConnectionIdle = 15 * time.Minute + + // DefaultMaxConnectionAge forces connection rotation after this duration (30 minutes). + // Prevents long-lived connections from accumulating issues. + // Value: 30 minutes ensures regular TLS session rotation for security. + DefaultMaxConnectionAge = 30 * time.Minute + + // DefaultMaxConnectionAgeGrace is grace period after MaxConnectionAge (5 minutes). + // Allows inflight RPCs to complete before force-closing connection. + // Value: 5 minutes provides sufficient time for most operations. + DefaultMaxConnectionAgeGrace = 5 * time.Minute + + // DefaultKeepaliveTime is interval for sending keepalive pings (5 minutes). + // Detects dead connections when client crashes or network partitions. + // Value: 5 minutes detects issues fast without excessive traffic. + DefaultKeepaliveTime = 5 * time.Minute + + // DefaultKeepaliveTimeout is wait time for keepalive ping response (1 minute). + // Connection is closed if no pong received within this timeout. + // Value: 1 minute allows for network delays without long waits. + DefaultKeepaliveTimeout = 1 * time.Minute + + // DefaultMinTime is minimum time between client keepalive pings (1 minute). + // Prevents clients from abusing keepalive by sending excessive pings. + // Value: 1 minute prevents abuse while allowing reasonable client detection. + DefaultMinTime = 1 * time.Minute + + // DefaultPermitWithoutStream allows keepalive pings without active streams. + // Enables clients to detect dead connections even when idle. + // Value: true provides better connection health detection. + DefaultPermitWithoutStream = true + + // Metrics configuration. + + // DefaultMetricsEnabled enables Prometheus metrics collection. + DefaultMetricsEnabled = true + + // DefaultMetricsAddress is the default listen address for the metrics HTTP server. + DefaultMetricsAddress = ":9090" +) + +var logger = logging.Logger("config") + +type Config struct { + // API configuration + ListenAddress string `json:"listen_address,omitempty" mapstructure:"listen_address"` + + // OASF Validation configuration + OASFAPIValidation OASFAPIValidationConfig `json:"oasf_api_validation,omitempty" mapstructure:"oasf_api_validation"` + + // Logging configuration + Logging LoggingConfig `json:"logging,omitempty" mapstructure:"logging"` + + // Connection management configuration + Connection ConnectionConfig `json:"connection,omitempty" mapstructure:"connection"` + + // Rate limiting configuration + RateLimit ratelimitconfig.Config `json:"ratelimit,omitempty" mapstructure:"ratelimit"` + + // Authn configuration (JWT or X.509 authentication) + Authn authn.Config `json:"authn,omitempty" mapstructure:"authn"` + + // Authz configuration + Authz authz.Config `json:"authz,omitempty" mapstructure:"authz"` + + // Store configuration + Store store.Config `json:"store,omitempty" mapstructure:"store"` + + // Routing configuration + Routing routing.Config `json:"routing,omitempty" mapstructure:"routing"` + + // Database configuration + Database database.Config `json:"database,omitempty" mapstructure:"database"` + + // Sync configuration + Sync sync.Config `json:"sync,omitempty" mapstructure:"sync"` + + // Publication configuration + Publication publication.Config `json:"publication,omitempty" mapstructure:"publication"` + + // Events configuration + Events events.Config `json:"events,omitempty" mapstructure:"events"` + + // Metrics configuration + Metrics MetricsConfig `json:"metrics,omitempty" mapstructure:"metrics"` +} + +// OASFAPIValidationConfig defines OASF API validation configuration. +type OASFAPIValidationConfig struct { + // SchemaURL is the OASF schema URL for API-based validation. + // When set, records will be validated using the OASF API validator instead of embedded schemas. + // Default: https://schema.oasf.outshift.com + SchemaURL string `json:"schema_url,omitempty" mapstructure:"schema_url"` + + // Disable disables API validation and uses embedded schema validation instead. + // Default: false (uses API validation) + Disable bool `json:"disable,omitempty" mapstructure:"disable"` + + // StrictMode enables strict validation mode (fails on warnings). + // When false, uses lax validation mode (allows warnings, only fails on errors). + // Default: true (strict mode) + // Only applies when Disable is false + StrictMode bool `json:"strict_mode,omitempty" mapstructure:"strict_mode"` +} + +// LoggingConfig defines gRPC request/response logging configuration. +type LoggingConfig struct { + // Verbose enables verbose logging mode (includes request/response payloads). + // Default: false (production mode - logs only start/finish with metadata). + Verbose bool `json:"verbose,omitempty" mapstructure:"verbose"` +} + +// ConnectionConfig defines gRPC connection management configuration. +// These settings control connection lifecycle, resource limits, and keepalive behavior +// to prevent resource exhaustion and detect dead connections. +type ConnectionConfig struct { + // MaxConcurrentStreams limits concurrent RPCs per connection. + // Prevents a single connection from monopolizing server resources. + // Default: 1000 + MaxConcurrentStreams uint32 `json:"max_concurrent_streams,omitempty" mapstructure:"max_concurrent_streams"` + + // MaxRecvMsgSize limits the maximum message size the server can receive (in bytes). + // Protects against memory exhaustion from large messages. + // Default: 4194304 (4 MB) + MaxRecvMsgSize int `json:"max_recv_msg_size,omitempty" mapstructure:"max_recv_msg_size"` + + // MaxSendMsgSize limits the maximum message size the server can send (in bytes). + // Default: 4194304 (4 MB) + MaxSendMsgSize int `json:"max_send_msg_size,omitempty" mapstructure:"max_send_msg_size"` + + // ConnectionTimeout limits the time for connection establishment. + // Prevents hanging connection attempts from slow clients. + // Default: 120s (2 minutes) + ConnectionTimeout time.Duration `json:"connection_timeout,omitempty" mapstructure:"connection_timeout"` + + // Keepalive configuration for connection health management. + Keepalive KeepaliveConfig `json:"keepalive,omitempty" mapstructure:"keepalive"` +} + +// KeepaliveConfig defines keepalive parameters for connection health. +// Keepalive pings detect dead connections (client crash, network partition) +// and automatically close idle or aged connections to free resources. +type KeepaliveConfig struct { + // MaxConnectionIdle is the duration after which idle connections are closed. + // An idle connection has no active RPC streams. + // Default: 15m (15 minutes) + MaxConnectionIdle time.Duration `json:"max_connection_idle,omitempty" mapstructure:"max_connection_idle"` + + // MaxConnectionAge is the maximum duration a connection may exist. + // Forces connection rotation to prevent resource leaks and ensure TLS session rotation. + // Default: 30m (30 minutes) + MaxConnectionAge time.Duration `json:"max_connection_age,omitempty" mapstructure:"max_connection_age"` + + // MaxConnectionAgeGrace is the grace period after MaxConnectionAge + // to allow inflight RPCs to complete before force-closing the connection. + // Default: 5m (5 minutes) + MaxConnectionAgeGrace time.Duration `json:"max_connection_age_grace,omitempty" mapstructure:"max_connection_age_grace"` + + // Time is the duration after which a keepalive ping is sent + // on idle connections to check if the connection is still alive. + // Default: 5m (5 minutes) + Time time.Duration `json:"time,omitempty" mapstructure:"time"` + + // Timeout is the duration the server waits for a keepalive ping response. + // If no response is received, the connection is closed. + // Default: 1m (1 minute) + Timeout time.Duration `json:"timeout,omitempty" mapstructure:"timeout"` + + // MinTime is the minimum duration clients should wait between keepalive pings. + // Prevents clients from abusing keepalive by sending excessive pings. + // Default: 1m (1 minute) + MinTime time.Duration `json:"min_time,omitempty" mapstructure:"min_time"` + + // PermitWithoutStream allows clients to send keepalive pings + // even when there are no active RPC streams. + // Enables clients to detect dead connections proactively. + // Default: true + PermitWithoutStream bool `json:"permit_without_stream,omitempty" mapstructure:"permit_without_stream"` +} + +// MetricsConfig holds Prometheus metrics configuration. +type MetricsConfig struct { + // Enabled enables Prometheus metrics collection. + // Default: true + Enabled bool `json:"enabled,omitempty" mapstructure:"enabled"` + + // Address is the HTTP listen address for the metrics endpoint. + // The metrics server runs on a separate port from the gRPC server. + // Default: ":9090" + Address string `json:"address,omitempty" mapstructure:"address"` +} + +// DefaultConnectionConfig returns connection configuration with production-safe defaults. +// These defaults are based on industry best practices for production gRPC deployments +// and provide a balance between resource efficiency, connection stability, and security. +func DefaultConnectionConfig() ConnectionConfig { + return ConnectionConfig{ + MaxConcurrentStreams: DefaultMaxConcurrentStreams, + MaxRecvMsgSize: DefaultMaxRecvMsgSize, + MaxSendMsgSize: DefaultMaxSendMsgSize, + ConnectionTimeout: DefaultConnectionTimeout, + Keepalive: KeepaliveConfig{ + MaxConnectionIdle: DefaultMaxConnectionIdle, + MaxConnectionAge: DefaultMaxConnectionAge, + MaxConnectionAgeGrace: DefaultMaxConnectionAgeGrace, + Time: DefaultKeepaliveTime, + Timeout: DefaultKeepaliveTimeout, + MinTime: DefaultMinTime, + PermitWithoutStream: DefaultPermitWithoutStream, + }, + } +} + +// WithDefaults returns the connection configuration with defaults applied +// if the configuration is not set or has zero values. +// This method checks if MaxConcurrentStreams is 0 (indicating unset configuration) +// and returns the default configuration in that case. +func (c ConnectionConfig) WithDefaults() ConnectionConfig { + // If MaxConcurrentStreams is 0, the config was not set - use defaults + if c.MaxConcurrentStreams == 0 { + return DefaultConnectionConfig() + } + + return c +} + +func LoadConfig() (*Config, error) { + v := viper.NewWithOptions( + viper.KeyDelimiter("."), + viper.EnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")), + ) + + v.SetConfigName(DefaultConfigName) + v.SetConfigType(DefaultConfigType) + v.AddConfigPath(DefaultConfigPath) + + v.SetEnvPrefix(DefaultEnvPrefix) + v.AllowEmptyEnv(true) + v.AutomaticEnv() + + // Read the config file + if err := v.ReadInConfig(); err != nil { + fileNotFoundError := viper.ConfigFileNotFoundError{} + if errors.As(err, &fileNotFoundError) { + logger.Info("Config file not found, use defaults.") + } else { + return nil, fmt.Errorf("failed to read configuration file: %w", err) + } + } + + // + // API configuration + // + _ = v.BindEnv("listen_address") + v.SetDefault("listen_address", DefaultListenAddress) + + // + // OASF Validation configuration + // + _ = v.BindEnv("oasf_api_validation.schema_url") + v.SetDefault("oasf_api_validation.schema_url", DefaultSchemaURL) + + _ = v.BindEnv("oasf_api_validation.disable") + v.SetDefault("oasf_api_validation.disable", false) + + _ = v.BindEnv("oasf_api_validation.strict_mode") + v.SetDefault("oasf_api_validation.strict_mode", true) + + // + // Logging configuration (gRPC request/response logging) + // + _ = v.BindEnv("logging.verbose") + v.SetDefault("logging.verbose", false) + + // + // Rate limiting configuration + // + _ = v.BindEnv("ratelimit.enabled") + v.SetDefault("ratelimit.enabled", false) + + _ = v.BindEnv("ratelimit.global_rps") + v.SetDefault("ratelimit.global_rps", 0.0) + + _ = v.BindEnv("ratelimit.global_burst") + v.SetDefault("ratelimit.global_burst", 0) + + _ = v.BindEnv("ratelimit.per_client_rps") + v.SetDefault("ratelimit.per_client_rps", 0.0) + + _ = v.BindEnv("ratelimit.per_client_burst") + v.SetDefault("ratelimit.per_client_burst", 0) + + // Note: method_limits (per-method rate limit overrides) can only be configured + // via YAML/JSON config file due to its complex nested map structure. + // Environment variable configuration for method limits is not supported. + // Example config: + // ratelimit: + // method_limits: + // "/agntcy.dir.store.v1.StoreService/CreateRecord": + // rps: 50 + // burst: 100 + + // + // Authn configuration (authentication: JWT or X.509) + // + _ = v.BindEnv("authn.enabled") + v.SetDefault("authn.enabled", "false") + + _ = v.BindEnv("authn.mode") + v.SetDefault("authn.mode", "x509") + + _ = v.BindEnv("authn.socket_path") + v.SetDefault("authn.socket_path", "") + + _ = v.BindEnv("authn.audiences") + v.SetDefault("authn.audiences", "") + + // + // Authz configuration (authorization policies) + // + _ = v.BindEnv("authz.enabled") + v.SetDefault("authz.enabled", "false") + + _ = v.BindEnv("authz.trust_domain") + v.SetDefault("authz.trust_domain", "") + + // + // Store configuration + // + _ = v.BindEnv("store.provider") + v.SetDefault("store.provider", store.DefaultProvider) + + _ = v.BindEnv("store.oci.local_dir") + v.SetDefault("store.oci.local_dir", "") + + _ = v.BindEnv("store.oci.cache_dir") + v.SetDefault("store.oci.cache_dir", "") + + _ = v.BindEnv("store.oci.registry_address") + v.SetDefault("store.oci.registry_address", oci.DefaultRegistryAddress) + + _ = v.BindEnv("store.oci.repository_name") + v.SetDefault("store.oci.repository_name", oci.DefaultRepositoryName) + + _ = v.BindEnv("store.oci.auth_config.insecure") + v.SetDefault("store.oci.auth_config.insecure", oci.DefaultAuthConfigInsecure) + + _ = v.BindEnv("store.oci.auth_config.username") + _ = v.BindEnv("store.oci.auth_config.password") + _ = v.BindEnv("store.oci.auth_config.access_token") + _ = v.BindEnv("store.oci.auth_config.refresh_token") + + // + // Routing configuration + // + _ = v.BindEnv("routing.listen_address") + v.SetDefault("routing.listen_address", routing.DefaultListenAddress) + + _ = v.BindEnv("routing.directory_api_address") + v.SetDefault("routing.directory_api_address", "") + + _ = v.BindEnv("routing.bootstrap_peers") + v.SetDefault("routing.bootstrap_peers", strings.Join(routing.DefaultBootstrapPeers, ",")) + + _ = v.BindEnv("routing.key_path") + v.SetDefault("routing.key_path", "") + + _ = v.BindEnv("routing.datastore_dir") + v.SetDefault("routing.datastore_dir", "") + + // + // Routing GossipSub configuration + // Note: Only enable/disable is configurable. Protocol parameters (topic, message size) + // are hardcoded in server/routing/pubsub/constants.go for network compatibility. + // + _ = v.BindEnv("routing.gossipsub.enabled") + v.SetDefault("routing.gossipsub.enabled", routing.DefaultGossipSubEnabled) + + // + // Database configuration + // + _ = v.BindEnv("database.db_type") + v.SetDefault("database.db_type", database.DefaultDBType) + + _ = v.BindEnv("database.sqlite.db_path") + v.SetDefault("database.sqlite.db_path", sqliteconfig.DefaultSQLiteDBPath) + + // + // Sync configuration + // + + _ = v.BindEnv("sync.scheduler_interval") + v.SetDefault("sync.scheduler_interval", sync.DefaultSyncSchedulerInterval) + + _ = v.BindEnv("sync.worker_count") + v.SetDefault("sync.worker_count", sync.DefaultSyncWorkerCount) + + _ = v.BindEnv("sync.worker_timeout") + v.SetDefault("sync.worker_timeout", sync.DefaultSyncWorkerTimeout) + + _ = v.BindEnv("sync.registry_monitor.check_interval") + v.SetDefault("sync.registry_monitor.check_interval", syncmonitor.DefaultCheckInterval) + + _ = v.BindEnv("sync.auth_config.username") + _ = v.BindEnv("sync.auth_config.password") + + // + // Publication configuration + // + + _ = v.BindEnv("publication.scheduler_interval") + v.SetDefault("publication.scheduler_interval", publication.DefaultPublicationSchedulerInterval) + + _ = v.BindEnv("publication.worker_count") + v.SetDefault("publication.worker_count", publication.DefaultPublicationWorkerCount) + + _ = v.BindEnv("publication.worker_timeout") + v.SetDefault("publication.worker_timeout", publication.DefaultPublicationWorkerTimeout) + + // + // Events configuration + // + + _ = v.BindEnv("events.subscriber_buffer_size") + v.SetDefault("events.subscriber_buffer_size", events.DefaultSubscriberBufferSize) + + _ = v.BindEnv("events.log_slow_consumers") + v.SetDefault("events.log_slow_consumers", events.DefaultLogSlowConsumers) + + _ = v.BindEnv("events.log_published_events") + v.SetDefault("events.log_published_events", events.DefaultLogPublishedEvents) + + // + // Metrics configuration + // + _ = v.BindEnv("metrics.enabled") + v.SetDefault("metrics.enabled", DefaultMetricsEnabled) + + _ = v.BindEnv("metrics.address") + v.SetDefault("metrics.address", DefaultMetricsAddress) + + // + // Connection management configuration + // + // Design Decision: No environment variables for connection management. + // Rationale: + // - 11 env vars would be too many and too technical for most users + // - Production-safe defaults work for 90% of deployments + // - Advanced users can use YAML config file for fine-grained control + // - Follows industry best practices (Kubernetes, Prometheus, etc.) + // + // For advanced configuration, use YAML config file: + // connection: + // max_concurrent_streams: 2000 + // max_recv_msg_size: 8388608 # 8 MB + // keepalive: + // max_connection_idle: 10m + // # ... other settings + // + // No viper defaults needed - defaults are applied via ConnectionConfig.WithDefaults() + // after loading to ensure clean separation between loading and defaulting logic. + + // Load configuration into struct + decodeHooks := mapstructure.ComposeDecodeHookFunc( + mapstructure.TextUnmarshallerHookFunc(), + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + ) + + config := &Config{} + if err := v.Unmarshal(config, viper.DecodeHook(decodeHooks)); err != nil { + return nil, fmt.Errorf("failed to load configuration: %w", err) + } + + // Apply connection management defaults if not configured + // This happens after unmarshal so YAML config takes precedence over defaults + config.Connection = config.Connection.WithDefaults() + + return config, nil +} diff --git a/server/config/config_test.go b/server/config/config_test.go index cb6a44f3a..0d6800da3 100644 --- a/server/config/config_test.go +++ b/server/config/config_test.go @@ -1,679 +1,679 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:testifylint -package config - -import ( - "testing" - "time" - - authn "github.com/agntcy/dir/server/authn/config" - authz "github.com/agntcy/dir/server/authz/config" - database "github.com/agntcy/dir/server/database/config" - sqliteconfig "github.com/agntcy/dir/server/database/sqlite/config" - ratelimitconfig "github.com/agntcy/dir/server/middleware/ratelimit/config" - publication "github.com/agntcy/dir/server/publication/config" - routing "github.com/agntcy/dir/server/routing/config" - store "github.com/agntcy/dir/server/store/config" - oci "github.com/agntcy/dir/server/store/oci/config" - sync "github.com/agntcy/dir/server/sync/config" - monitor "github.com/agntcy/dir/server/sync/monitor/config" - "github.com/stretchr/testify/assert" -) - -func TestConfig(t *testing.T) { - tests := []struct { - Name string - EnvVars map[string]string - ExpectedConfig *Config - }{ - { - Name: "Custom config", - EnvVars: map[string]string{ - "DIRECTORY_SERVER_LISTEN_ADDRESS": "example.com:8889", - "DIRECTORY_SERVER_OASF_API_VALIDATION_SCHEMA_URL": "https://custom.schema.url", - "DIRECTORY_SERVER_STORE_PROVIDER": "provider", - "DIRECTORY_SERVER_STORE_OCI_LOCAL_DIR": "local-dir", - "DIRECTORY_SERVER_STORE_OCI_REGISTRY_ADDRESS": "example.com:5001", - "DIRECTORY_SERVER_STORE_OCI_REPOSITORY_NAME": "test-dir", - "DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_INSECURE": "true", - "DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_USERNAME": "username", - "DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_PASSWORD": "password", - "DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_ACCESS_TOKEN": "access-token", - "DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_REFRESH_TOKEN": "refresh-token", - "DIRECTORY_SERVER_ROUTING_LISTEN_ADDRESS": "/ip4/1.1.1.1/tcp/1", - "DIRECTORY_SERVER_ROUTING_BOOTSTRAP_PEERS": "/ip4/1.1.1.1/tcp/1,/ip4/1.1.1.1/tcp/2", - "DIRECTORY_SERVER_ROUTING_KEY_PATH": "/path/to/key", - "DIRECTORY_SERVER_DATABASE_DB_TYPE": "sqlite", - "DIRECTORY_SERVER_DATABASE_SQLITE_DB_PATH": "sqlite.db", - "DIRECTORY_SERVER_SYNC_SCHEDULER_INTERVAL": "1s", - "DIRECTORY_SERVER_SYNC_WORKER_COUNT": "1", - "DIRECTORY_SERVER_SYNC_REGISTRY_MONITOR_CHECK_INTERVAL": "10s", - "DIRECTORY_SERVER_SYNC_WORKER_TIMEOUT": "10s", - "DIRECTORY_SERVER_SYNC_AUTH_CONFIG_USERNAME": "sync-user", - "DIRECTORY_SERVER_SYNC_AUTH_CONFIG_PASSWORD": "sync-password", - "DIRECTORY_SERVER_AUTHZ_ENABLED": "true", - "DIRECTORY_SERVER_AUTHZ_SOCKET_PATH": "/test/agent.sock", - "DIRECTORY_SERVER_AUTHZ_TRUST_DOMAIN": "dir.com", - "DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL": "10s", - "DIRECTORY_SERVER_PUBLICATION_WORKER_COUNT": "1", - "DIRECTORY_SERVER_PUBLICATION_WORKER_TIMEOUT": "10s", - }, - ExpectedConfig: &Config{ - ListenAddress: "example.com:8889", - OASFAPIValidation: OASFAPIValidationConfig{ - SchemaURL: "https://custom.schema.url", - Disable: false, - StrictMode: true, // Default is true (set in config.go) - }, - Connection: DefaultConnectionConfig(), // Connection defaults applied - Authn: authn.Config{ - Enabled: false, - Mode: authn.AuthModeX509, // Default from config.go:109 - Audiences: []string{}, - }, - Store: store.Config{ - Provider: "provider", - OCI: oci.Config{ - LocalDir: "local-dir", - RegistryAddress: "example.com:5001", - RepositoryName: "test-dir", - AuthConfig: oci.AuthConfig{ - Insecure: true, - Username: "username", - Password: "password", - RefreshToken: "refresh-token", - AccessToken: "access-token", - }, - }, - }, - Routing: routing.Config{ - ListenAddress: "/ip4/1.1.1.1/tcp/1", - BootstrapPeers: []string{ - "/ip4/1.1.1.1/tcp/1", - "/ip4/1.1.1.1/tcp/2", - }, - KeyPath: "/path/to/key", - GossipSub: routing.GossipSubConfig{ - Enabled: true, // Default value - }, - }, - Database: database.Config{ - DBType: "sqlite", - SQLite: sqliteconfig.Config{ - DBPath: "sqlite.db", - }, - }, - Sync: sync.Config{ - SchedulerInterval: 1 * time.Second, - WorkerCount: 1, - WorkerTimeout: 10 * time.Second, - RegistryMonitor: monitor.Config{ - CheckInterval: 10 * time.Second, - }, - AuthConfig: sync.AuthConfig{ - Username: "sync-user", - Password: "sync-password", - }, - }, - Authz: authz.Config{ - Enabled: true, - TrustDomain: "dir.com", - }, - Publication: publication.Config{ - SchedulerInterval: 10 * time.Second, - WorkerCount: 1, - WorkerTimeout: 10 * time.Second, - }, - Metrics: MetricsConfig{ - Enabled: true, - Address: ":9090", - }, - }, - }, - { - Name: "Default config", - EnvVars: map[string]string{}, - ExpectedConfig: &Config{ - ListenAddress: DefaultListenAddress, - OASFAPIValidation: OASFAPIValidationConfig{ - SchemaURL: DefaultSchemaURL, // Default OASF schema URL - Disable: false, // Default is false (set in config.go) - StrictMode: true, // Default is true (set in config.go) - }, - Connection: DefaultConnectionConfig(), // Connection defaults applied - Authn: authn.Config{ - Enabled: false, - Mode: authn.AuthModeX509, // Default from config.go:109 - Audiences: []string{}, - }, - Store: store.Config{ - Provider: store.DefaultProvider, - OCI: oci.Config{ - RegistryAddress: oci.DefaultRegistryAddress, - RepositoryName: oci.DefaultRepositoryName, - AuthConfig: oci.AuthConfig{ - Insecure: oci.DefaultAuthConfigInsecure, - }, - }, - }, - Routing: routing.Config{ - ListenAddress: routing.DefaultListenAddress, - BootstrapPeers: routing.DefaultBootstrapPeers, - GossipSub: routing.GossipSubConfig{ - Enabled: routing.DefaultGossipSubEnabled, - }, - }, - Database: database.Config{ - DBType: database.DefaultDBType, - SQLite: sqliteconfig.Config{ - DBPath: sqliteconfig.DefaultSQLiteDBPath, - }, - }, - Sync: sync.Config{ - SchedulerInterval: sync.DefaultSyncSchedulerInterval, - WorkerCount: sync.DefaultSyncWorkerCount, - WorkerTimeout: sync.DefaultSyncWorkerTimeout, - RegistryMonitor: monitor.Config{ - CheckInterval: monitor.DefaultCheckInterval, - }, - }, - Authz: authz.Config{}, - Publication: publication.Config{ - SchedulerInterval: publication.DefaultPublicationSchedulerInterval, - WorkerCount: publication.DefaultPublicationWorkerCount, - WorkerTimeout: publication.DefaultPublicationWorkerTimeout, - }, - Metrics: MetricsConfig{ - Enabled: DefaultMetricsEnabled, - Address: DefaultMetricsAddress, - }, - }, - }, - } - - for _, test := range tests { - t.Run(test.Name, func(t *testing.T) { - for k, v := range test.EnvVars { - t.Setenv(k, v) - } - - config, err := LoadConfig() - assert.NoError(t, err) - assert.Equal(t, *config, *test.ExpectedConfig) - }) - } -} - -// TestConfig_SchemaURL tests that OASF schema URL configuration is correctly parsed. -func TestConfig_SchemaURL(t *testing.T) { - tests := []struct { - name string - envVars map[string]string - expectedSchemaURL string - }{ - { - name: "default schema URL", - envVars: map[string]string{}, - expectedSchemaURL: DefaultSchemaURL, - }, - { - name: "custom schema URL", - envVars: map[string]string{ - "DIRECTORY_SERVER_OASF_API_VALIDATION_SCHEMA_URL": "https://custom.schema.url", - }, - expectedSchemaURL: "https://custom.schema.url", - }, - { - name: "empty schema URL (disable API validator)", - envVars: map[string]string{ - "DIRECTORY_SERVER_OASF_API_VALIDATION_SCHEMA_URL": "", - }, - expectedSchemaURL: "", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Set environment variables - for k, v := range tt.envVars { - t.Setenv(k, v) - } - - // Load config - cfg, err := LoadConfig() - assert.NoError(t, err) - - // Verify schema URL configuration - assert.Equal(t, tt.expectedSchemaURL, cfg.OASFAPIValidation.SchemaURL) - }) - } -} - -// TestConfig_RateLimiting tests that rate limiting configuration is correctly parsed. -func TestConfig_RateLimiting(t *testing.T) { - tests := []struct { - name string - envVars map[string]string - expectedConfig ratelimitconfig.Config - }{ - { - name: "rate limiting enabled with custom values", - envVars: map[string]string{ - "DIRECTORY_SERVER_RATELIMIT_ENABLED": "true", - "DIRECTORY_SERVER_RATELIMIT_GLOBAL_RPS": "50.0", - "DIRECTORY_SERVER_RATELIMIT_GLOBAL_BURST": "100", - "DIRECTORY_SERVER_RATELIMIT_PER_CLIENT_RPS": "500.0", - "DIRECTORY_SERVER_RATELIMIT_PER_CLIENT_BURST": "1000", - }, - expectedConfig: ratelimitconfig.Config{ - Enabled: true, - GlobalRPS: 50.0, - GlobalBurst: 100, - PerClientRPS: 500.0, - PerClientBurst: 1000, - MethodLimits: map[string]ratelimitconfig.MethodLimit{}, - }, - }, - { - name: "rate limiting disabled (default)", - envVars: map[string]string{ - "DIRECTORY_SERVER_RATELIMIT_ENABLED": "false", - }, - expectedConfig: ratelimitconfig.Config{ - Enabled: false, - GlobalRPS: 0, - GlobalBurst: 0, - PerClientRPS: 0, - PerClientBurst: 0, - MethodLimits: map[string]ratelimitconfig.MethodLimit{}, - }, - }, - { - name: "rate limiting with partial configuration", - envVars: map[string]string{ - "DIRECTORY_SERVER_RATELIMIT_ENABLED": "true", - "DIRECTORY_SERVER_RATELIMIT_GLOBAL_RPS": "200.0", - "DIRECTORY_SERVER_RATELIMIT_GLOBAL_BURST": "400", - }, - expectedConfig: ratelimitconfig.Config{ - Enabled: true, - GlobalRPS: 200.0, - GlobalBurst: 400, - PerClientRPS: 0, - PerClientBurst: 0, - MethodLimits: map[string]ratelimitconfig.MethodLimit{}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Set environment variables - for k, v := range tt.envVars { - t.Setenv(k, v) - } - - // Load config - cfg, err := LoadConfig() - assert.NoError(t, err) - - // Verify rate limiting configuration - assert.Equal(t, tt.expectedConfig.Enabled, cfg.RateLimit.Enabled) - assert.Equal(t, tt.expectedConfig.GlobalRPS, cfg.RateLimit.GlobalRPS) - assert.Equal(t, tt.expectedConfig.GlobalBurst, cfg.RateLimit.GlobalBurst) - assert.Equal(t, tt.expectedConfig.PerClientRPS, cfg.RateLimit.PerClientRPS) - assert.Equal(t, tt.expectedConfig.PerClientBurst, cfg.RateLimit.PerClientBurst) - }) - } -} - -// TestConfig_RateLimitingValidation tests that invalid rate limiting configuration -// is properly validated during server initialization (will be tested in server tests). -func TestConfig_RateLimitingValidation(t *testing.T) { - tests := []struct { - name string - config ratelimitconfig.Config - shouldError bool - }{ - { - name: "valid rate limiting configuration", - config: ratelimitconfig.Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 2000, - }, - shouldError: false, - }, - { - name: "invalid rate limiting - negative RPS", - config: ratelimitconfig.Config{ - Enabled: true, - GlobalRPS: -10.0, - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 2000, - }, - shouldError: true, - }, - { - name: "invalid rate limiting - negative burst", - config: ratelimitconfig.Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: -200, - PerClientRPS: 1000.0, - PerClientBurst: 2000, - }, - shouldError: true, - }, - { - name: "disabled rate limiting - no validation", - config: ratelimitconfig.Config{ - Enabled: false, - GlobalRPS: -100.0, // Invalid but should be ignored - GlobalBurst: -200, - PerClientRPS: -1000.0, - PerClientBurst: -2000, - }, - shouldError: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.config.Validate() - if tt.shouldError { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - }) - } -} - -// TestDefaultConnectionConfig verifies that DefaultConnectionConfig returns -// the correct production-safe default values for all connection parameters. -func TestDefaultConnectionConfig(t *testing.T) { - cfg := DefaultConnectionConfig() - - // Verify connection limits - assert.Equal(t, uint32(1000), cfg.MaxConcurrentStreams, "MaxConcurrentStreams should be 1000") - assert.Equal(t, 4*1024*1024, cfg.MaxRecvMsgSize, "MaxRecvMsgSize should be 4 MB") - assert.Equal(t, 4*1024*1024, cfg.MaxSendMsgSize, "MaxSendMsgSize should be 4 MB") - assert.Equal(t, 120*time.Second, cfg.ConnectionTimeout, "ConnectionTimeout should be 120 seconds") - - // Verify keepalive parameters - assert.Equal(t, 15*time.Minute, cfg.Keepalive.MaxConnectionIdle, "MaxConnectionIdle should be 15 minutes") - assert.Equal(t, 30*time.Minute, cfg.Keepalive.MaxConnectionAge, "MaxConnectionAge should be 30 minutes") - assert.Equal(t, 5*time.Minute, cfg.Keepalive.MaxConnectionAgeGrace, "MaxConnectionAgeGrace should be 5 minutes") - assert.Equal(t, 5*time.Minute, cfg.Keepalive.Time, "Keepalive Time should be 5 minutes") - assert.Equal(t, 1*time.Minute, cfg.Keepalive.Timeout, "Keepalive Timeout should be 1 minute") - assert.Equal(t, 1*time.Minute, cfg.Keepalive.MinTime, "Keepalive MinTime should be 1 minute") - assert.Equal(t, true, cfg.Keepalive.PermitWithoutStream, "PermitWithoutStream should be true") -} - -// TestConnectionConfig_DefaultValues verifies that LoadConfig returns default -// connection configuration when no environment variables are set. -func TestConnectionConfig_DefaultValues(t *testing.T) { - // No environment variables set - should use defaults - cfg, err := LoadConfig() - assert.NoError(t, err) - - // Verify connection configuration has default values - // Note: In Phase 3 we'll add default loading to LoadConfig() - // For now, we just verify the struct exists - assert.NotNil(t, cfg.Connection) -} - -// TestConnectionConfig_Constants verifies that all connection constants -// are defined with the expected values based on production best practices. -func TestConnectionConfig_Constants(t *testing.T) { - // Connection limits - use EqualValues for cross-type comparison - assert.EqualValues(t, 1000, DefaultMaxConcurrentStreams) - assert.EqualValues(t, 4*1024*1024, DefaultMaxRecvMsgSize) - assert.EqualValues(t, 4*1024*1024, DefaultMaxSendMsgSize) - assert.Equal(t, 120*time.Second, DefaultConnectionTimeout) - - // Keepalive parameters - assert.Equal(t, 15*time.Minute, DefaultMaxConnectionIdle) - assert.Equal(t, 30*time.Minute, DefaultMaxConnectionAge) - assert.Equal(t, 5*time.Minute, DefaultMaxConnectionAgeGrace) - assert.Equal(t, 5*time.Minute, DefaultKeepaliveTime) - assert.Equal(t, 1*time.Minute, DefaultKeepaliveTimeout) - assert.Equal(t, 1*time.Minute, DefaultMinTime) - assert.True(t, DefaultPermitWithoutStream) -} - -// TestConnectionConfig_StructTags verifies that struct tags are properly -// defined for JSON and mapstructure serialization. -func TestConnectionConfig_StructTags(t *testing.T) { - // This test ensures that configuration can be properly serialized - // and deserialized from YAML/JSON files - cfg := ConnectionConfig{ - MaxConcurrentStreams: 2000, - MaxRecvMsgSize: 8 * 1024 * 1024, - MaxSendMsgSize: 8 * 1024 * 1024, - ConnectionTimeout: 60 * time.Second, - Keepalive: KeepaliveConfig{ - MaxConnectionIdle: 10 * time.Minute, - MaxConnectionAge: 20 * time.Minute, - MaxConnectionAgeGrace: 3 * time.Minute, - Time: 3 * time.Minute, - Timeout: 30 * time.Second, - MinTime: 30 * time.Second, - PermitWithoutStream: false, - }, - } - - // Verify struct is not empty and can be created - assert.NotNil(t, cfg) - assert.Equal(t, uint32(2000), cfg.MaxConcurrentStreams) - assert.NotNil(t, cfg.Keepalive) - assert.Equal(t, 10*time.Minute, cfg.Keepalive.MaxConnectionIdle) -} - -// TestConnectionConfig_ProductionSafety verifies that default values -// are production-safe and follow gRPC best practices. -func TestConnectionConfig_ProductionSafety(t *testing.T) { - cfg := DefaultConnectionConfig() - - // Verify MaxConcurrentStreams is reasonable (not too low, not unlimited) - assert.Greater(t, cfg.MaxConcurrentStreams, uint32(100), "MaxConcurrentStreams should allow reasonable concurrency") - assert.Less(t, cfg.MaxConcurrentStreams, uint32(10000), "MaxConcurrentStreams should not be excessive") - - // Verify message sizes protect against memory exhaustion - assert.Greater(t, cfg.MaxRecvMsgSize, 1*1024*1024, "MaxRecvMsgSize should allow reasonable messages") - assert.Less(t, cfg.MaxRecvMsgSize, 100*1024*1024, "MaxRecvMsgSize should prevent memory exhaustion") - - // Verify keepalive times are reasonable - assert.Greater(t, cfg.Keepalive.Time, 1*time.Minute, "Keepalive Time should not be too aggressive") - assert.Less(t, cfg.Keepalive.Time, 30*time.Minute, "Keepalive Time should detect dead connections") - - // Verify idle timeout is reasonable - assert.Greater(t, cfg.Keepalive.MaxConnectionIdle, 5*time.Minute, "MaxConnectionIdle should not be too aggressive") - assert.Less(t, cfg.Keepalive.MaxConnectionIdle, 2*time.Hour, "MaxConnectionIdle should free resources") - - // Verify connection age rotation - assert.Greater(t, cfg.Keepalive.MaxConnectionAge, cfg.Keepalive.MaxConnectionIdle, "MaxConnectionAge should be greater than MaxConnectionIdle") - assert.Greater(t, cfg.Keepalive.MaxConnectionAgeGrace, 1*time.Minute, "MaxConnectionAgeGrace should allow inflight RPCs to complete") - - // Verify keepalive timeout is reasonable - assert.Greater(t, cfg.Keepalive.Timeout, 10*time.Second, "Keepalive Timeout should allow for network delays") - assert.Less(t, cfg.Keepalive.Timeout, 5*time.Minute, "Keepalive Timeout should not wait too long") - - // Verify MinTime prevents abuse - assert.Greater(t, cfg.Keepalive.MinTime, 10*time.Second, "MinTime should prevent excessive pings") - - // Verify PermitWithoutStream is enabled for better health detection - assert.True(t, cfg.Keepalive.PermitWithoutStream, "PermitWithoutStream should be enabled") -} - -// TestConnectionConfig_WithDefaults verifies that WithDefaults returns -// the correct configuration based on whether the config is set or not. -func TestConnectionConfig_WithDefaults(t *testing.T) { - tests := []struct { - name string - input ConnectionConfig - expected ConnectionConfig - }{ - { - name: "empty config returns defaults", - input: ConnectionConfig{}, - expected: DefaultConnectionConfig(), - }, - { - name: "zero MaxConcurrentStreams returns defaults", - input: ConnectionConfig{ - MaxConcurrentStreams: 0, - MaxRecvMsgSize: 8 * 1024 * 1024, - }, - expected: DefaultConnectionConfig(), - }, - { - name: "configured values are preserved", - input: ConnectionConfig{ - MaxConcurrentStreams: 2000, - MaxRecvMsgSize: 8 * 1024 * 1024, - MaxSendMsgSize: 8 * 1024 * 1024, - ConnectionTimeout: 60 * time.Second, - Keepalive: KeepaliveConfig{ - MaxConnectionIdle: 10 * time.Minute, - MaxConnectionAge: 20 * time.Minute, - MaxConnectionAgeGrace: 3 * time.Minute, - Time: 3 * time.Minute, - Timeout: 30 * time.Second, - MinTime: 30 * time.Second, - PermitWithoutStream: false, - }, - }, - expected: ConnectionConfig{ - MaxConcurrentStreams: 2000, - MaxRecvMsgSize: 8 * 1024 * 1024, - MaxSendMsgSize: 8 * 1024 * 1024, - ConnectionTimeout: 60 * time.Second, - Keepalive: KeepaliveConfig{ - MaxConnectionIdle: 10 * time.Minute, - MaxConnectionAge: 20 * time.Minute, - MaxConnectionAgeGrace: 3 * time.Minute, - Time: 3 * time.Minute, - Timeout: 30 * time.Second, - MinTime: 30 * time.Second, - PermitWithoutStream: false, - }, - }, - }, - { - name: "partial config with non-zero MaxConcurrentStreams is preserved", - input: ConnectionConfig{ - MaxConcurrentStreams: 500, - // Other fields zero/default - }, - expected: ConnectionConfig{ - MaxConcurrentStreams: 500, - // Other fields remain zero/default as set - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := tt.input.WithDefaults() - assert.Equal(t, tt.expected, result) - }) - } -} - -// TestLoadConfig_ConnectionDefaults verifies that LoadConfig applies -// connection management defaults when no config file is present. -func TestLoadConfig_ConnectionDefaults(t *testing.T) { - // LoadConfig should apply defaults automatically - cfg, err := LoadConfig() - assert.NoError(t, err) - assert.NotNil(t, cfg) - - // Verify connection config has defaults applied (use EqualValues for uint32) - assert.EqualValues(t, DefaultMaxConcurrentStreams, cfg.Connection.MaxConcurrentStreams) - assert.EqualValues(t, DefaultMaxRecvMsgSize, cfg.Connection.MaxRecvMsgSize) - assert.EqualValues(t, DefaultMaxSendMsgSize, cfg.Connection.MaxSendMsgSize) - assert.Equal(t, DefaultConnectionTimeout, cfg.Connection.ConnectionTimeout) - - // Verify keepalive defaults - assert.Equal(t, DefaultMaxConnectionIdle, cfg.Connection.Keepalive.MaxConnectionIdle) - assert.Equal(t, DefaultMaxConnectionAge, cfg.Connection.Keepalive.MaxConnectionAge) - assert.Equal(t, DefaultMaxConnectionAgeGrace, cfg.Connection.Keepalive.MaxConnectionAgeGrace) - assert.Equal(t, DefaultKeepaliveTime, cfg.Connection.Keepalive.Time) - assert.Equal(t, DefaultKeepaliveTimeout, cfg.Connection.Keepalive.Timeout) - assert.Equal(t, DefaultMinTime, cfg.Connection.Keepalive.MinTime) - assert.Equal(t, DefaultPermitWithoutStream, cfg.Connection.Keepalive.PermitWithoutStream) -} - -// TestConnectionConfig_YAMLSerialization verifies that connection configuration -// can be serialized to and from YAML format correctly. -func TestConnectionConfig_YAMLSerialization(t *testing.T) { - // Create a custom configuration - customConfig := ConnectionConfig{ - MaxConcurrentStreams: 2000, - MaxRecvMsgSize: 8388608, // 8 MB - MaxSendMsgSize: 8388608, // 8 MB - ConnectionTimeout: 60 * time.Second, - Keepalive: KeepaliveConfig{ - MaxConnectionIdle: 10 * time.Minute, - MaxConnectionAge: 20 * time.Minute, - MaxConnectionAgeGrace: 3 * time.Minute, - Time: 3 * time.Minute, - Timeout: 30 * time.Second, - MinTime: 30 * time.Second, - PermitWithoutStream: false, - }, - } - - // Verify all fields can be set with custom values - assert.Equal(t, uint32(2000), customConfig.MaxConcurrentStreams) - assert.Equal(t, 8388608, customConfig.MaxRecvMsgSize) - assert.Equal(t, 8388608, customConfig.MaxSendMsgSize) - assert.Equal(t, 60*time.Second, customConfig.ConnectionTimeout) - assert.Equal(t, 10*time.Minute, customConfig.Keepalive.MaxConnectionIdle) - assert.Equal(t, 20*time.Minute, customConfig.Keepalive.MaxConnectionAge) - assert.Equal(t, 3*time.Minute, customConfig.Keepalive.MaxConnectionAgeGrace) - assert.Equal(t, 3*time.Minute, customConfig.Keepalive.Time) - assert.Equal(t, 30*time.Second, customConfig.Keepalive.Timeout) - assert.Equal(t, 30*time.Second, customConfig.Keepalive.MinTime) - assert.False(t, customConfig.Keepalive.PermitWithoutStream) -} - -// TestConnectionConfig_MapstructureTags verifies that struct tags are properly -// defined for mapstructure to work with YAML/JSON loading. -func TestConnectionConfig_MapstructureTags(t *testing.T) { - // This test ensures mapstructure can parse the config - // The actual YAML loading is tested through LoadConfig in integration tests - - // Verify we have the correct field types for mapstructure - cfg := ConnectionConfig{ - MaxConcurrentStreams: 1000, - MaxRecvMsgSize: 4 * 1024 * 1024, - MaxSendMsgSize: 4 * 1024 * 1024, - ConnectionTimeout: 120 * time.Second, - Keepalive: KeepaliveConfig{ - MaxConnectionIdle: 15 * time.Minute, - MaxConnectionAge: 30 * time.Minute, - MaxConnectionAgeGrace: 5 * time.Minute, - Time: 5 * time.Minute, - Timeout: 1 * time.Minute, - MinTime: 1 * time.Minute, - PermitWithoutStream: true, - }, - } - - // Verify struct can be created and all fields are accessible - assert.NotZero(t, cfg.MaxConcurrentStreams) - assert.NotZero(t, cfg.MaxRecvMsgSize) - assert.NotZero(t, cfg.MaxSendMsgSize) - assert.NotZero(t, cfg.ConnectionTimeout) - assert.NotZero(t, cfg.Keepalive.MaxConnectionIdle) - assert.NotZero(t, cfg.Keepalive.MaxConnectionAge) - assert.NotZero(t, cfg.Keepalive.Time) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:testifylint +package config + +import ( + "testing" + "time" + + authn "github.com/agntcy/dir/server/authn/config" + authz "github.com/agntcy/dir/server/authz/config" + database "github.com/agntcy/dir/server/database/config" + sqliteconfig "github.com/agntcy/dir/server/database/sqlite/config" + ratelimitconfig "github.com/agntcy/dir/server/middleware/ratelimit/config" + publication "github.com/agntcy/dir/server/publication/config" + routing "github.com/agntcy/dir/server/routing/config" + store "github.com/agntcy/dir/server/store/config" + oci "github.com/agntcy/dir/server/store/oci/config" + sync "github.com/agntcy/dir/server/sync/config" + monitor "github.com/agntcy/dir/server/sync/monitor/config" + "github.com/stretchr/testify/assert" +) + +func TestConfig(t *testing.T) { + tests := []struct { + Name string + EnvVars map[string]string + ExpectedConfig *Config + }{ + { + Name: "Custom config", + EnvVars: map[string]string{ + "DIRECTORY_SERVER_LISTEN_ADDRESS": "example.com:8889", + "DIRECTORY_SERVER_OASF_API_VALIDATION_SCHEMA_URL": "https://custom.schema.url", + "DIRECTORY_SERVER_STORE_PROVIDER": "provider", + "DIRECTORY_SERVER_STORE_OCI_LOCAL_DIR": "local-dir", + "DIRECTORY_SERVER_STORE_OCI_REGISTRY_ADDRESS": "example.com:5001", + "DIRECTORY_SERVER_STORE_OCI_REPOSITORY_NAME": "test-dir", + "DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_INSECURE": "true", + "DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_USERNAME": "username", + "DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_PASSWORD": "password", + "DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_ACCESS_TOKEN": "access-token", + "DIRECTORY_SERVER_STORE_OCI_AUTH_CONFIG_REFRESH_TOKEN": "refresh-token", + "DIRECTORY_SERVER_ROUTING_LISTEN_ADDRESS": "/ip4/1.1.1.1/tcp/1", + "DIRECTORY_SERVER_ROUTING_BOOTSTRAP_PEERS": "/ip4/1.1.1.1/tcp/1,/ip4/1.1.1.1/tcp/2", + "DIRECTORY_SERVER_ROUTING_KEY_PATH": "/path/to/key", + "DIRECTORY_SERVER_DATABASE_DB_TYPE": "sqlite", + "DIRECTORY_SERVER_DATABASE_SQLITE_DB_PATH": "sqlite.db", + "DIRECTORY_SERVER_SYNC_SCHEDULER_INTERVAL": "1s", + "DIRECTORY_SERVER_SYNC_WORKER_COUNT": "1", + "DIRECTORY_SERVER_SYNC_REGISTRY_MONITOR_CHECK_INTERVAL": "10s", + "DIRECTORY_SERVER_SYNC_WORKER_TIMEOUT": "10s", + "DIRECTORY_SERVER_SYNC_AUTH_CONFIG_USERNAME": "sync-user", + "DIRECTORY_SERVER_SYNC_AUTH_CONFIG_PASSWORD": "sync-password", + "DIRECTORY_SERVER_AUTHZ_ENABLED": "true", + "DIRECTORY_SERVER_AUTHZ_SOCKET_PATH": "/test/agent.sock", + "DIRECTORY_SERVER_AUTHZ_TRUST_DOMAIN": "dir.com", + "DIRECTORY_SERVER_PUBLICATION_SCHEDULER_INTERVAL": "10s", + "DIRECTORY_SERVER_PUBLICATION_WORKER_COUNT": "1", + "DIRECTORY_SERVER_PUBLICATION_WORKER_TIMEOUT": "10s", + }, + ExpectedConfig: &Config{ + ListenAddress: "example.com:8889", + OASFAPIValidation: OASFAPIValidationConfig{ + SchemaURL: "https://custom.schema.url", + Disable: false, + StrictMode: true, // Default is true (set in config.go) + }, + Connection: DefaultConnectionConfig(), // Connection defaults applied + Authn: authn.Config{ + Enabled: false, + Mode: authn.AuthModeX509, // Default from config.go:109 + Audiences: []string{}, + }, + Store: store.Config{ + Provider: "provider", + OCI: oci.Config{ + LocalDir: "local-dir", + RegistryAddress: "example.com:5001", + RepositoryName: "test-dir", + AuthConfig: oci.AuthConfig{ + Insecure: true, + Username: "username", + Password: "password", + RefreshToken: "refresh-token", + AccessToken: "access-token", + }, + }, + }, + Routing: routing.Config{ + ListenAddress: "/ip4/1.1.1.1/tcp/1", + BootstrapPeers: []string{ + "/ip4/1.1.1.1/tcp/1", + "/ip4/1.1.1.1/tcp/2", + }, + KeyPath: "/path/to/key", + GossipSub: routing.GossipSubConfig{ + Enabled: true, // Default value + }, + }, + Database: database.Config{ + DBType: "sqlite", + SQLite: sqliteconfig.Config{ + DBPath: "sqlite.db", + }, + }, + Sync: sync.Config{ + SchedulerInterval: 1 * time.Second, + WorkerCount: 1, + WorkerTimeout: 10 * time.Second, + RegistryMonitor: monitor.Config{ + CheckInterval: 10 * time.Second, + }, + AuthConfig: sync.AuthConfig{ + Username: "sync-user", + Password: "sync-password", + }, + }, + Authz: authz.Config{ + Enabled: true, + TrustDomain: "dir.com", + }, + Publication: publication.Config{ + SchedulerInterval: 10 * time.Second, + WorkerCount: 1, + WorkerTimeout: 10 * time.Second, + }, + Metrics: MetricsConfig{ + Enabled: true, + Address: ":9090", + }, + }, + }, + { + Name: "Default config", + EnvVars: map[string]string{}, + ExpectedConfig: &Config{ + ListenAddress: DefaultListenAddress, + OASFAPIValidation: OASFAPIValidationConfig{ + SchemaURL: DefaultSchemaURL, // Default OASF schema URL + Disable: false, // Default is false (set in config.go) + StrictMode: true, // Default is true (set in config.go) + }, + Connection: DefaultConnectionConfig(), // Connection defaults applied + Authn: authn.Config{ + Enabled: false, + Mode: authn.AuthModeX509, // Default from config.go:109 + Audiences: []string{}, + }, + Store: store.Config{ + Provider: store.DefaultProvider, + OCI: oci.Config{ + RegistryAddress: oci.DefaultRegistryAddress, + RepositoryName: oci.DefaultRepositoryName, + AuthConfig: oci.AuthConfig{ + Insecure: oci.DefaultAuthConfigInsecure, + }, + }, + }, + Routing: routing.Config{ + ListenAddress: routing.DefaultListenAddress, + BootstrapPeers: routing.DefaultBootstrapPeers, + GossipSub: routing.GossipSubConfig{ + Enabled: routing.DefaultGossipSubEnabled, + }, + }, + Database: database.Config{ + DBType: database.DefaultDBType, + SQLite: sqliteconfig.Config{ + DBPath: sqliteconfig.DefaultSQLiteDBPath, + }, + }, + Sync: sync.Config{ + SchedulerInterval: sync.DefaultSyncSchedulerInterval, + WorkerCount: sync.DefaultSyncWorkerCount, + WorkerTimeout: sync.DefaultSyncWorkerTimeout, + RegistryMonitor: monitor.Config{ + CheckInterval: monitor.DefaultCheckInterval, + }, + }, + Authz: authz.Config{}, + Publication: publication.Config{ + SchedulerInterval: publication.DefaultPublicationSchedulerInterval, + WorkerCount: publication.DefaultPublicationWorkerCount, + WorkerTimeout: publication.DefaultPublicationWorkerTimeout, + }, + Metrics: MetricsConfig{ + Enabled: DefaultMetricsEnabled, + Address: DefaultMetricsAddress, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + for k, v := range test.EnvVars { + t.Setenv(k, v) + } + + config, err := LoadConfig() + assert.NoError(t, err) + assert.Equal(t, *config, *test.ExpectedConfig) + }) + } +} + +// TestConfig_SchemaURL tests that OASF schema URL configuration is correctly parsed. +func TestConfig_SchemaURL(t *testing.T) { + tests := []struct { + name string + envVars map[string]string + expectedSchemaURL string + }{ + { + name: "default schema URL", + envVars: map[string]string{}, + expectedSchemaURL: DefaultSchemaURL, + }, + { + name: "custom schema URL", + envVars: map[string]string{ + "DIRECTORY_SERVER_OASF_API_VALIDATION_SCHEMA_URL": "https://custom.schema.url", + }, + expectedSchemaURL: "https://custom.schema.url", + }, + { + name: "empty schema URL (disable API validator)", + envVars: map[string]string{ + "DIRECTORY_SERVER_OASF_API_VALIDATION_SCHEMA_URL": "", + }, + expectedSchemaURL: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Set environment variables + for k, v := range tt.envVars { + t.Setenv(k, v) + } + + // Load config + cfg, err := LoadConfig() + assert.NoError(t, err) + + // Verify schema URL configuration + assert.Equal(t, tt.expectedSchemaURL, cfg.OASFAPIValidation.SchemaURL) + }) + } +} + +// TestConfig_RateLimiting tests that rate limiting configuration is correctly parsed. +func TestConfig_RateLimiting(t *testing.T) { + tests := []struct { + name string + envVars map[string]string + expectedConfig ratelimitconfig.Config + }{ + { + name: "rate limiting enabled with custom values", + envVars: map[string]string{ + "DIRECTORY_SERVER_RATELIMIT_ENABLED": "true", + "DIRECTORY_SERVER_RATELIMIT_GLOBAL_RPS": "50.0", + "DIRECTORY_SERVER_RATELIMIT_GLOBAL_BURST": "100", + "DIRECTORY_SERVER_RATELIMIT_PER_CLIENT_RPS": "500.0", + "DIRECTORY_SERVER_RATELIMIT_PER_CLIENT_BURST": "1000", + }, + expectedConfig: ratelimitconfig.Config{ + Enabled: true, + GlobalRPS: 50.0, + GlobalBurst: 100, + PerClientRPS: 500.0, + PerClientBurst: 1000, + MethodLimits: map[string]ratelimitconfig.MethodLimit{}, + }, + }, + { + name: "rate limiting disabled (default)", + envVars: map[string]string{ + "DIRECTORY_SERVER_RATELIMIT_ENABLED": "false", + }, + expectedConfig: ratelimitconfig.Config{ + Enabled: false, + GlobalRPS: 0, + GlobalBurst: 0, + PerClientRPS: 0, + PerClientBurst: 0, + MethodLimits: map[string]ratelimitconfig.MethodLimit{}, + }, + }, + { + name: "rate limiting with partial configuration", + envVars: map[string]string{ + "DIRECTORY_SERVER_RATELIMIT_ENABLED": "true", + "DIRECTORY_SERVER_RATELIMIT_GLOBAL_RPS": "200.0", + "DIRECTORY_SERVER_RATELIMIT_GLOBAL_BURST": "400", + }, + expectedConfig: ratelimitconfig.Config{ + Enabled: true, + GlobalRPS: 200.0, + GlobalBurst: 400, + PerClientRPS: 0, + PerClientBurst: 0, + MethodLimits: map[string]ratelimitconfig.MethodLimit{}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Set environment variables + for k, v := range tt.envVars { + t.Setenv(k, v) + } + + // Load config + cfg, err := LoadConfig() + assert.NoError(t, err) + + // Verify rate limiting configuration + assert.Equal(t, tt.expectedConfig.Enabled, cfg.RateLimit.Enabled) + assert.Equal(t, tt.expectedConfig.GlobalRPS, cfg.RateLimit.GlobalRPS) + assert.Equal(t, tt.expectedConfig.GlobalBurst, cfg.RateLimit.GlobalBurst) + assert.Equal(t, tt.expectedConfig.PerClientRPS, cfg.RateLimit.PerClientRPS) + assert.Equal(t, tt.expectedConfig.PerClientBurst, cfg.RateLimit.PerClientBurst) + }) + } +} + +// TestConfig_RateLimitingValidation tests that invalid rate limiting configuration +// is properly validated during server initialization (will be tested in server tests). +func TestConfig_RateLimitingValidation(t *testing.T) { + tests := []struct { + name string + config ratelimitconfig.Config + shouldError bool + }{ + { + name: "valid rate limiting configuration", + config: ratelimitconfig.Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 2000, + }, + shouldError: false, + }, + { + name: "invalid rate limiting - negative RPS", + config: ratelimitconfig.Config{ + Enabled: true, + GlobalRPS: -10.0, + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 2000, + }, + shouldError: true, + }, + { + name: "invalid rate limiting - negative burst", + config: ratelimitconfig.Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: -200, + PerClientRPS: 1000.0, + PerClientBurst: 2000, + }, + shouldError: true, + }, + { + name: "disabled rate limiting - no validation", + config: ratelimitconfig.Config{ + Enabled: false, + GlobalRPS: -100.0, // Invalid but should be ignored + GlobalBurst: -200, + PerClientRPS: -1000.0, + PerClientBurst: -2000, + }, + shouldError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.config.Validate() + if tt.shouldError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +// TestDefaultConnectionConfig verifies that DefaultConnectionConfig returns +// the correct production-safe default values for all connection parameters. +func TestDefaultConnectionConfig(t *testing.T) { + cfg := DefaultConnectionConfig() + + // Verify connection limits + assert.Equal(t, uint32(1000), cfg.MaxConcurrentStreams, "MaxConcurrentStreams should be 1000") + assert.Equal(t, 4*1024*1024, cfg.MaxRecvMsgSize, "MaxRecvMsgSize should be 4 MB") + assert.Equal(t, 4*1024*1024, cfg.MaxSendMsgSize, "MaxSendMsgSize should be 4 MB") + assert.Equal(t, 120*time.Second, cfg.ConnectionTimeout, "ConnectionTimeout should be 120 seconds") + + // Verify keepalive parameters + assert.Equal(t, 15*time.Minute, cfg.Keepalive.MaxConnectionIdle, "MaxConnectionIdle should be 15 minutes") + assert.Equal(t, 30*time.Minute, cfg.Keepalive.MaxConnectionAge, "MaxConnectionAge should be 30 minutes") + assert.Equal(t, 5*time.Minute, cfg.Keepalive.MaxConnectionAgeGrace, "MaxConnectionAgeGrace should be 5 minutes") + assert.Equal(t, 5*time.Minute, cfg.Keepalive.Time, "Keepalive Time should be 5 minutes") + assert.Equal(t, 1*time.Minute, cfg.Keepalive.Timeout, "Keepalive Timeout should be 1 minute") + assert.Equal(t, 1*time.Minute, cfg.Keepalive.MinTime, "Keepalive MinTime should be 1 minute") + assert.Equal(t, true, cfg.Keepalive.PermitWithoutStream, "PermitWithoutStream should be true") +} + +// TestConnectionConfig_DefaultValues verifies that LoadConfig returns default +// connection configuration when no environment variables are set. +func TestConnectionConfig_DefaultValues(t *testing.T) { + // No environment variables set - should use defaults + cfg, err := LoadConfig() + assert.NoError(t, err) + + // Verify connection configuration has default values + // Note: In Phase 3 we'll add default loading to LoadConfig() + // For now, we just verify the struct exists + assert.NotNil(t, cfg.Connection) +} + +// TestConnectionConfig_Constants verifies that all connection constants +// are defined with the expected values based on production best practices. +func TestConnectionConfig_Constants(t *testing.T) { + // Connection limits - use EqualValues for cross-type comparison + assert.EqualValues(t, 1000, DefaultMaxConcurrentStreams) + assert.EqualValues(t, 4*1024*1024, DefaultMaxRecvMsgSize) + assert.EqualValues(t, 4*1024*1024, DefaultMaxSendMsgSize) + assert.Equal(t, 120*time.Second, DefaultConnectionTimeout) + + // Keepalive parameters + assert.Equal(t, 15*time.Minute, DefaultMaxConnectionIdle) + assert.Equal(t, 30*time.Minute, DefaultMaxConnectionAge) + assert.Equal(t, 5*time.Minute, DefaultMaxConnectionAgeGrace) + assert.Equal(t, 5*time.Minute, DefaultKeepaliveTime) + assert.Equal(t, 1*time.Minute, DefaultKeepaliveTimeout) + assert.Equal(t, 1*time.Minute, DefaultMinTime) + assert.True(t, DefaultPermitWithoutStream) +} + +// TestConnectionConfig_StructTags verifies that struct tags are properly +// defined for JSON and mapstructure serialization. +func TestConnectionConfig_StructTags(t *testing.T) { + // This test ensures that configuration can be properly serialized + // and deserialized from YAML/JSON files + cfg := ConnectionConfig{ + MaxConcurrentStreams: 2000, + MaxRecvMsgSize: 8 * 1024 * 1024, + MaxSendMsgSize: 8 * 1024 * 1024, + ConnectionTimeout: 60 * time.Second, + Keepalive: KeepaliveConfig{ + MaxConnectionIdle: 10 * time.Minute, + MaxConnectionAge: 20 * time.Minute, + MaxConnectionAgeGrace: 3 * time.Minute, + Time: 3 * time.Minute, + Timeout: 30 * time.Second, + MinTime: 30 * time.Second, + PermitWithoutStream: false, + }, + } + + // Verify struct is not empty and can be created + assert.NotNil(t, cfg) + assert.Equal(t, uint32(2000), cfg.MaxConcurrentStreams) + assert.NotNil(t, cfg.Keepalive) + assert.Equal(t, 10*time.Minute, cfg.Keepalive.MaxConnectionIdle) +} + +// TestConnectionConfig_ProductionSafety verifies that default values +// are production-safe and follow gRPC best practices. +func TestConnectionConfig_ProductionSafety(t *testing.T) { + cfg := DefaultConnectionConfig() + + // Verify MaxConcurrentStreams is reasonable (not too low, not unlimited) + assert.Greater(t, cfg.MaxConcurrentStreams, uint32(100), "MaxConcurrentStreams should allow reasonable concurrency") + assert.Less(t, cfg.MaxConcurrentStreams, uint32(10000), "MaxConcurrentStreams should not be excessive") + + // Verify message sizes protect against memory exhaustion + assert.Greater(t, cfg.MaxRecvMsgSize, 1*1024*1024, "MaxRecvMsgSize should allow reasonable messages") + assert.Less(t, cfg.MaxRecvMsgSize, 100*1024*1024, "MaxRecvMsgSize should prevent memory exhaustion") + + // Verify keepalive times are reasonable + assert.Greater(t, cfg.Keepalive.Time, 1*time.Minute, "Keepalive Time should not be too aggressive") + assert.Less(t, cfg.Keepalive.Time, 30*time.Minute, "Keepalive Time should detect dead connections") + + // Verify idle timeout is reasonable + assert.Greater(t, cfg.Keepalive.MaxConnectionIdle, 5*time.Minute, "MaxConnectionIdle should not be too aggressive") + assert.Less(t, cfg.Keepalive.MaxConnectionIdle, 2*time.Hour, "MaxConnectionIdle should free resources") + + // Verify connection age rotation + assert.Greater(t, cfg.Keepalive.MaxConnectionAge, cfg.Keepalive.MaxConnectionIdle, "MaxConnectionAge should be greater than MaxConnectionIdle") + assert.Greater(t, cfg.Keepalive.MaxConnectionAgeGrace, 1*time.Minute, "MaxConnectionAgeGrace should allow inflight RPCs to complete") + + // Verify keepalive timeout is reasonable + assert.Greater(t, cfg.Keepalive.Timeout, 10*time.Second, "Keepalive Timeout should allow for network delays") + assert.Less(t, cfg.Keepalive.Timeout, 5*time.Minute, "Keepalive Timeout should not wait too long") + + // Verify MinTime prevents abuse + assert.Greater(t, cfg.Keepalive.MinTime, 10*time.Second, "MinTime should prevent excessive pings") + + // Verify PermitWithoutStream is enabled for better health detection + assert.True(t, cfg.Keepalive.PermitWithoutStream, "PermitWithoutStream should be enabled") +} + +// TestConnectionConfig_WithDefaults verifies that WithDefaults returns +// the correct configuration based on whether the config is set or not. +func TestConnectionConfig_WithDefaults(t *testing.T) { + tests := []struct { + name string + input ConnectionConfig + expected ConnectionConfig + }{ + { + name: "empty config returns defaults", + input: ConnectionConfig{}, + expected: DefaultConnectionConfig(), + }, + { + name: "zero MaxConcurrentStreams returns defaults", + input: ConnectionConfig{ + MaxConcurrentStreams: 0, + MaxRecvMsgSize: 8 * 1024 * 1024, + }, + expected: DefaultConnectionConfig(), + }, + { + name: "configured values are preserved", + input: ConnectionConfig{ + MaxConcurrentStreams: 2000, + MaxRecvMsgSize: 8 * 1024 * 1024, + MaxSendMsgSize: 8 * 1024 * 1024, + ConnectionTimeout: 60 * time.Second, + Keepalive: KeepaliveConfig{ + MaxConnectionIdle: 10 * time.Minute, + MaxConnectionAge: 20 * time.Minute, + MaxConnectionAgeGrace: 3 * time.Minute, + Time: 3 * time.Minute, + Timeout: 30 * time.Second, + MinTime: 30 * time.Second, + PermitWithoutStream: false, + }, + }, + expected: ConnectionConfig{ + MaxConcurrentStreams: 2000, + MaxRecvMsgSize: 8 * 1024 * 1024, + MaxSendMsgSize: 8 * 1024 * 1024, + ConnectionTimeout: 60 * time.Second, + Keepalive: KeepaliveConfig{ + MaxConnectionIdle: 10 * time.Minute, + MaxConnectionAge: 20 * time.Minute, + MaxConnectionAgeGrace: 3 * time.Minute, + Time: 3 * time.Minute, + Timeout: 30 * time.Second, + MinTime: 30 * time.Second, + PermitWithoutStream: false, + }, + }, + }, + { + name: "partial config with non-zero MaxConcurrentStreams is preserved", + input: ConnectionConfig{ + MaxConcurrentStreams: 500, + // Other fields zero/default + }, + expected: ConnectionConfig{ + MaxConcurrentStreams: 500, + // Other fields remain zero/default as set + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.input.WithDefaults() + assert.Equal(t, tt.expected, result) + }) + } +} + +// TestLoadConfig_ConnectionDefaults verifies that LoadConfig applies +// connection management defaults when no config file is present. +func TestLoadConfig_ConnectionDefaults(t *testing.T) { + // LoadConfig should apply defaults automatically + cfg, err := LoadConfig() + assert.NoError(t, err) + assert.NotNil(t, cfg) + + // Verify connection config has defaults applied (use EqualValues for uint32) + assert.EqualValues(t, DefaultMaxConcurrentStreams, cfg.Connection.MaxConcurrentStreams) + assert.EqualValues(t, DefaultMaxRecvMsgSize, cfg.Connection.MaxRecvMsgSize) + assert.EqualValues(t, DefaultMaxSendMsgSize, cfg.Connection.MaxSendMsgSize) + assert.Equal(t, DefaultConnectionTimeout, cfg.Connection.ConnectionTimeout) + + // Verify keepalive defaults + assert.Equal(t, DefaultMaxConnectionIdle, cfg.Connection.Keepalive.MaxConnectionIdle) + assert.Equal(t, DefaultMaxConnectionAge, cfg.Connection.Keepalive.MaxConnectionAge) + assert.Equal(t, DefaultMaxConnectionAgeGrace, cfg.Connection.Keepalive.MaxConnectionAgeGrace) + assert.Equal(t, DefaultKeepaliveTime, cfg.Connection.Keepalive.Time) + assert.Equal(t, DefaultKeepaliveTimeout, cfg.Connection.Keepalive.Timeout) + assert.Equal(t, DefaultMinTime, cfg.Connection.Keepalive.MinTime) + assert.Equal(t, DefaultPermitWithoutStream, cfg.Connection.Keepalive.PermitWithoutStream) +} + +// TestConnectionConfig_YAMLSerialization verifies that connection configuration +// can be serialized to and from YAML format correctly. +func TestConnectionConfig_YAMLSerialization(t *testing.T) { + // Create a custom configuration + customConfig := ConnectionConfig{ + MaxConcurrentStreams: 2000, + MaxRecvMsgSize: 8388608, // 8 MB + MaxSendMsgSize: 8388608, // 8 MB + ConnectionTimeout: 60 * time.Second, + Keepalive: KeepaliveConfig{ + MaxConnectionIdle: 10 * time.Minute, + MaxConnectionAge: 20 * time.Minute, + MaxConnectionAgeGrace: 3 * time.Minute, + Time: 3 * time.Minute, + Timeout: 30 * time.Second, + MinTime: 30 * time.Second, + PermitWithoutStream: false, + }, + } + + // Verify all fields can be set with custom values + assert.Equal(t, uint32(2000), customConfig.MaxConcurrentStreams) + assert.Equal(t, 8388608, customConfig.MaxRecvMsgSize) + assert.Equal(t, 8388608, customConfig.MaxSendMsgSize) + assert.Equal(t, 60*time.Second, customConfig.ConnectionTimeout) + assert.Equal(t, 10*time.Minute, customConfig.Keepalive.MaxConnectionIdle) + assert.Equal(t, 20*time.Minute, customConfig.Keepalive.MaxConnectionAge) + assert.Equal(t, 3*time.Minute, customConfig.Keepalive.MaxConnectionAgeGrace) + assert.Equal(t, 3*time.Minute, customConfig.Keepalive.Time) + assert.Equal(t, 30*time.Second, customConfig.Keepalive.Timeout) + assert.Equal(t, 30*time.Second, customConfig.Keepalive.MinTime) + assert.False(t, customConfig.Keepalive.PermitWithoutStream) +} + +// TestConnectionConfig_MapstructureTags verifies that struct tags are properly +// defined for mapstructure to work with YAML/JSON loading. +func TestConnectionConfig_MapstructureTags(t *testing.T) { + // This test ensures mapstructure can parse the config + // The actual YAML loading is tested through LoadConfig in integration tests + + // Verify we have the correct field types for mapstructure + cfg := ConnectionConfig{ + MaxConcurrentStreams: 1000, + MaxRecvMsgSize: 4 * 1024 * 1024, + MaxSendMsgSize: 4 * 1024 * 1024, + ConnectionTimeout: 120 * time.Second, + Keepalive: KeepaliveConfig{ + MaxConnectionIdle: 15 * time.Minute, + MaxConnectionAge: 30 * time.Minute, + MaxConnectionAgeGrace: 5 * time.Minute, + Time: 5 * time.Minute, + Timeout: 1 * time.Minute, + MinTime: 1 * time.Minute, + PermitWithoutStream: true, + }, + } + + // Verify struct can be created and all fields are accessible + assert.NotZero(t, cfg.MaxConcurrentStreams) + assert.NotZero(t, cfg.MaxRecvMsgSize) + assert.NotZero(t, cfg.MaxSendMsgSize) + assert.NotZero(t, cfg.ConnectionTimeout) + assert.NotZero(t, cfg.Keepalive.MaxConnectionIdle) + assert.NotZero(t, cfg.Keepalive.MaxConnectionAge) + assert.NotZero(t, cfg.Keepalive.Time) +} diff --git a/server/config/future/server.config.yml b/server/config/future/server.config.yml index 61546db46..4bcdef92c 100644 --- a/server/config/future/server.config.yml +++ b/server/config/future/server.config.yml @@ -1,124 +1,124 @@ -# Content-addressable storage settings. -storage: - # Storage driver: local, oci - driver: oci - - # OCI-backed store - oci: - # Cache dir for OCI data. - # Data is initially stored into cache directory. - # If disabled, data will be pushed directly to remote, - # Otherwise it will be pushed only once the buffer_size is reached. - # On application restart, cache is backed up/pushed again. - cache: - enabled: true - buffer_size: 100M - source: /tmp/agntcy-dir/store/oci - - # If set to false, use cache as main source. - # Do not push to remote store at all. - enabled: false - # Registry to use on remote. - registry: ghcr.io - # All data will be stored under this repo. - # Objects are pushed as tags, manifests, and blobs. - repo: agntcy-dir - # Auth credentials to use. - auth: - access_token: access-token - refresh_token: refresh-token - - # If you wish to use non-OCI backed store, use this. - # For example, you can mount an S3 as a volume. - local: - source: /tmp/agntcy-dir/store/local - -# Routing settings for the peer-to-peer network. -routing: - # Routing table filesystem backup. - # It is okay for the routing table to be deleted - # since items are rebroadcasted. - # This is just to speed up serving for future requests. - source: /tmp/agntcy-dir/routing - - # Path to private key file for peer ID. - # Ideally, this should be encrypted. - # TODO: add more auth providers for peer identification. - # TODO: allow passing ENV flags for decryption or setting auth data. - auth: - key: /tmp/agntcy-dir/node.privkey - - # Use in server mode, otherwise this will only be a client node - # that is used to send requests to DHT, but not respond to them. - # DHT is exposed to: ie. /dir/dht/v1.0.0 - server: true - - # Use in private mode, ie. /dir/dht/lan/v1.0.0 - private: true - - # Nodes to use for bootstrapping of the DHT. - # We read initial routing tables here and get introduced - # to the network. - bootstrap_peers: - - /ipv4/addr/port/p2p/node1 - - /ipv4/addr/port/p2p/node2 - - /ipv4/addr/port/p2p/node2 - - # Peers to add to local routing table on start. - # These form the initial overlay network for this peer. - peers: - - /ipv4/addr/port/p2p/hostA - - /ipv4/addr/port/p2p/hostB - - # Serve and respond to the following labels on the network. - # We will be able to query this data locally. - # We should pick labels with good aggregation. - # - # For Discover API, we can use bloom filters - # https://hur.st/bloomfilter/?n=100000000&p=1.0E-7 - # - # One PutValue request value to DHT can send 10KBs, ie - # 10240/256 (bytes per label) = 40 full-length labels - # - # We should limit to N different labels, for example - # 8 skills + 8 locators - # - # We can validate peer ID on receiving nodes to prevent bad actors from writing - # data on certain peers. - labels: | - # # By default, we participate in agent model content routing - # object/type/agent - - # Agents with specific media skills - agent/skill/text - agent/skill/video - agent/skill/audio - - # Agents for specific envs - agent/locator/binary - agent/locator/source-code - - # Other app-specific keys, e.g. publishers - publisher/agntcy - publisher/cisco - object/type/my-type - - # Republication process for data announced to the network. - # Newly published items are announced as part of the request. - republish_period: 4h - - # Sync certain labels into storage our own store. - # Once we pull the data, we will inform the network - # that we are providing it (ie. publish announce for the model - # plus the data on about the labels). - # This can be used as a backup node for tagged data. - # In addition to the routing labels we are storing for routing, - # we will also append these labels to the list so that we can - # listen for the network requests. - sync: - # Labels must fully match (AND clause against label matching) to allow syncing. - - labels: | - # Sync all Cisco text-based agents - agent/skill/text - publisher/cisco - object/type/agent +# Content-addressable storage settings. +storage: + # Storage driver: local, oci + driver: oci + + # OCI-backed store + oci: + # Cache dir for OCI data. + # Data is initially stored into cache directory. + # If disabled, data will be pushed directly to remote, + # Otherwise it will be pushed only once the buffer_size is reached. + # On application restart, cache is backed up/pushed again. + cache: + enabled: true + buffer_size: 100M + source: /tmp/agntcy-dir/store/oci + + # If set to false, use cache as main source. + # Do not push to remote store at all. + enabled: false + # Registry to use on remote. + registry: ghcr.io + # All data will be stored under this repo. + # Objects are pushed as tags, manifests, and blobs. + repo: agntcy-dir + # Auth credentials to use. + auth: + access_token: access-token + refresh_token: refresh-token + + # If you wish to use non-OCI backed store, use this. + # For example, you can mount an S3 as a volume. + local: + source: /tmp/agntcy-dir/store/local + +# Routing settings for the peer-to-peer network. +routing: + # Routing table filesystem backup. + # It is okay for the routing table to be deleted + # since items are rebroadcasted. + # This is just to speed up serving for future requests. + source: /tmp/agntcy-dir/routing + + # Path to private key file for peer ID. + # Ideally, this should be encrypted. + # TODO: add more auth providers for peer identification. + # TODO: allow passing ENV flags for decryption or setting auth data. + auth: + key: /tmp/agntcy-dir/node.privkey + + # Use in server mode, otherwise this will only be a client node + # that is used to send requests to DHT, but not respond to them. + # DHT is exposed to: ie. /dir/dht/v1.0.0 + server: true + + # Use in private mode, ie. /dir/dht/lan/v1.0.0 + private: true + + # Nodes to use for bootstrapping of the DHT. + # We read initial routing tables here and get introduced + # to the network. + bootstrap_peers: + - /ipv4/addr/port/p2p/node1 + - /ipv4/addr/port/p2p/node2 + - /ipv4/addr/port/p2p/node2 + + # Peers to add to local routing table on start. + # These form the initial overlay network for this peer. + peers: + - /ipv4/addr/port/p2p/hostA + - /ipv4/addr/port/p2p/hostB + + # Serve and respond to the following labels on the network. + # We will be able to query this data locally. + # We should pick labels with good aggregation. + # + # For Discover API, we can use bloom filters + # https://hur.st/bloomfilter/?n=100000000&p=1.0E-7 + # + # One PutValue request value to DHT can send 10KBs, ie + # 10240/256 (bytes per label) = 40 full-length labels + # + # We should limit to N different labels, for example + # 8 skills + 8 locators + # + # We can validate peer ID on receiving nodes to prevent bad actors from writing + # data on certain peers. + labels: | + # # By default, we participate in agent model content routing + # object/type/agent + + # Agents with specific media skills + agent/skill/text + agent/skill/video + agent/skill/audio + + # Agents for specific envs + agent/locator/binary + agent/locator/source-code + + # Other app-specific keys, e.g. publishers + publisher/agntcy + publisher/cisco + object/type/my-type + + # Republication process for data announced to the network. + # Newly published items are announced as part of the request. + republish_period: 4h + + # Sync certain labels into storage our own store. + # Once we pull the data, we will inform the network + # that we are providing it (ie. publish announce for the model + # plus the data on about the labels). + # This can be used as a backup node for tagged data. + # In addition to the routing labels we are storing for routing, + # we will also append these labels to the list so that we can + # listen for the network requests. + sync: + # Labels must fully match (AND clause against label matching) to allow syncing. + - labels: | + # Sync all Cisco text-based agents + agent/skill/text + publisher/cisco + object/type/agent diff --git a/server/config/testdata/connection_custom.yml b/server/config/testdata/connection_custom.yml index 7c6963ac5..fb0e2c2b1 100644 --- a/server/config/testdata/connection_custom.yml +++ b/server/config/testdata/connection_custom.yml @@ -1,14 +1,14 @@ -connection: - max_concurrent_streams: 2000 - max_recv_msg_size: 8388608 # 8 MB - max_send_msg_size: 8388608 # 8 MB - connection_timeout: 60s - keepalive: - max_connection_idle: 10m - max_connection_age: 20m - max_connection_age_grace: 3m - time: 3m - timeout: 30s - min_time: 30s - permit_without_stream: false - +connection: + max_concurrent_streams: 2000 + max_recv_msg_size: 8388608 # 8 MB + max_send_msg_size: 8388608 # 8 MB + connection_timeout: 60s + keepalive: + max_connection_idle: 10m + max_connection_age: 20m + max_connection_age_grace: 3m + time: 3m + timeout: 30s + min_time: 30s + permit_without_stream: false + diff --git a/server/config/testdata/connection_defaults.yml b/server/config/testdata/connection_defaults.yml index 8293b6a3f..fa41a6867 100644 --- a/server/config/testdata/connection_defaults.yml +++ b/server/config/testdata/connection_defaults.yml @@ -1,3 +1,3 @@ -# Empty config file - should use defaults for connection management -listen_address: "0.0.0.0:9999" - +# Empty config file - should use defaults for connection management +listen_address: "0.0.0.0:9999" + diff --git a/server/controller/events.go b/server/controller/events.go index 9a605dabb..23afedccd 100644 --- a/server/controller/events.go +++ b/server/controller/events.go @@ -1,80 +1,80 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package controller - -import ( - eventsv1 "github.com/agntcy/dir/api/events/v1" - "github.com/agntcy/dir/server/events" - "github.com/agntcy/dir/utils/logging" -) - -var eventsLogger = logging.Logger("controller/events") - -type eventsCtlr struct { - eventsv1.UnimplementedEventServiceServer - eventService *events.Service -} - -// NewEventsController creates a new events controller. -func NewEventsController(eventService *events.Service) eventsv1.EventServiceServer { - return &eventsCtlr{ - eventService: eventService, - UnimplementedEventServiceServer: eventsv1.UnimplementedEventServiceServer{}, - } -} - -// Listen implements the event streaming RPC. -// It creates a subscription on the event bus and streams matching events to the client. -func (c *eventsCtlr) Listen(req *eventsv1.ListenRequest, stream eventsv1.EventService_ListenServer) error { - eventsLogger.Info("Client connected to event stream", - "event_types", req.GetEventTypes(), - "label_filters", req.GetLabelFilters(), - "cid_filters", req.GetCidFilters()) - - // Subscribe to event bus - subID, eventCh := c.eventService.Bus().Subscribe(req) - defer c.eventService.Bus().Unsubscribe(subID) - - eventsLogger.Debug("Subscription created", "subscription_id", subID) - - // Stream events to client - for { - select { - case <-stream.Context().Done(): - eventsLogger.Info("Client disconnected from event stream", - "subscription_id", subID, - "reason", stream.Context().Err()) - - return nil - - case event, ok := <-eventCh: - if !ok { - // Channel closed - eventsLogger.Info("Event channel closed", "subscription_id", subID) - - return nil - } - - // Convert event to proto and wrap in ListenResponse - response := &eventsv1.ListenResponse{ - Event: event.ToProto(), - } - - // Send to client - if err := stream.Send(response); err != nil { - eventsLogger.Error("Failed to send event to client", - "subscription_id", subID, - "event_id", event.ID, - "error", err) - - return err //nolint:wrapcheck // gRPC stream error - pass through unchanged - } - - eventsLogger.Debug("Event sent to client", - "subscription_id", subID, - "event_id", event.ID, - "event_type", event.Type) - } - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package controller + +import ( + eventsv1 "github.com/agntcy/dir/api/events/v1" + "github.com/agntcy/dir/server/events" + "github.com/agntcy/dir/utils/logging" +) + +var eventsLogger = logging.Logger("controller/events") + +type eventsCtlr struct { + eventsv1.UnimplementedEventServiceServer + eventService *events.Service +} + +// NewEventsController creates a new events controller. +func NewEventsController(eventService *events.Service) eventsv1.EventServiceServer { + return &eventsCtlr{ + eventService: eventService, + UnimplementedEventServiceServer: eventsv1.UnimplementedEventServiceServer{}, + } +} + +// Listen implements the event streaming RPC. +// It creates a subscription on the event bus and streams matching events to the client. +func (c *eventsCtlr) Listen(req *eventsv1.ListenRequest, stream eventsv1.EventService_ListenServer) error { + eventsLogger.Info("Client connected to event stream", + "event_types", req.GetEventTypes(), + "label_filters", req.GetLabelFilters(), + "cid_filters", req.GetCidFilters()) + + // Subscribe to event bus + subID, eventCh := c.eventService.Bus().Subscribe(req) + defer c.eventService.Bus().Unsubscribe(subID) + + eventsLogger.Debug("Subscription created", "subscription_id", subID) + + // Stream events to client + for { + select { + case <-stream.Context().Done(): + eventsLogger.Info("Client disconnected from event stream", + "subscription_id", subID, + "reason", stream.Context().Err()) + + return nil + + case event, ok := <-eventCh: + if !ok { + // Channel closed + eventsLogger.Info("Event channel closed", "subscription_id", subID) + + return nil + } + + // Convert event to proto and wrap in ListenResponse + response := &eventsv1.ListenResponse{ + Event: event.ToProto(), + } + + // Send to client + if err := stream.Send(response); err != nil { + eventsLogger.Error("Failed to send event to client", + "subscription_id", subID, + "event_id", event.ID, + "error", err) + + return err //nolint:wrapcheck // gRPC stream error - pass through unchanged + } + + eventsLogger.Debug("Event sent to client", + "subscription_id", subID, + "event_id", event.ID, + "event_type", event.Type) + } + } +} diff --git a/server/controller/events_test.go b/server/controller/events_test.go index 37bfd583a..b84e38c7a 100644 --- a/server/controller/events_test.go +++ b/server/controller/events_test.go @@ -1,179 +1,179 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package controller - -import ( - "context" - "errors" - "io" - "testing" - "time" - - eventsv1 "github.com/agntcy/dir/api/events/v1" - "github.com/agntcy/dir/server/events" -) - -// mockListenServer implements EventService_ListenServer for testing. -type mockListenServer struct { - eventsv1.EventService_ListenServer - ctx context.Context //nolint:containedctx // Needed for mock gRPC stream testing - sentMsgs []*eventsv1.ListenResponse -} - -func (m *mockListenServer) Context() context.Context { - return m.ctx -} - -func (m *mockListenServer) Send(resp *eventsv1.ListenResponse) error { - m.sentMsgs = append(m.sentMsgs, resp) - - return nil -} - -func TestEventsControllerListen(t *testing.T) { - // Create event service - eventService := events.New() - - defer func() { _ = eventService.Stop() }() - - // Create controller - controller := NewEventsController(eventService) - - // Create mock stream - ctx, cancel := context.WithCancel(t.Context()) - defer cancel() - - mockStream := &mockListenServer{ - ctx: ctx, - sentMsgs: make([]*eventsv1.ListenResponse, 0), - } - - // Start listening in background - errCh := make(chan error, 1) - - go func() { - req := &eventsv1.ListenRequest{} - errCh <- controller.Listen(req, mockStream) - }() - - // Give it time to subscribe - time.Sleep(50 * time.Millisecond) - - // Publish event - eventService.Bus().RecordPushed("bafytest123", []string{"/skills/AI"}) - - // Give it time to process - time.Sleep(50 * time.Millisecond) - - // Cancel context to stop listening - cancel() - - // Wait for Listen to return - select { - case err := <-errCh: - if err != nil { - t.Errorf("Listen returned error: %v", err) - } - case <-time.After(time.Second): - t.Error("Timeout waiting for Listen to return") - } - - // Verify event was sent - if len(mockStream.sentMsgs) != 1 { - t.Errorf("Expected 1 message sent, got %d", len(mockStream.sentMsgs)) - } - - if len(mockStream.sentMsgs) > 0 { - event := mockStream.sentMsgs[0].GetEvent() - if event.GetType() != eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED { - t.Errorf("Expected RECORD_PUSHED, got %v", event.GetType()) - } - - if event.GetResourceId() != "bafytest123" { - t.Errorf("Expected resource_id bafytest123, got %s", event.GetResourceId()) - } - } -} - -func TestEventsControllerListenWithFilters(t *testing.T) { - eventService := events.New() - - defer func() { _ = eventService.Stop() }() - - controller := NewEventsController(eventService) - - ctx, cancel := context.WithCancel(t.Context()) - defer cancel() - - mockStream := &mockListenServer{ - ctx: ctx, - sentMsgs: make([]*eventsv1.ListenResponse, 0), - } - - // Listen with filters - errCh := make(chan error, 1) - - go func() { - req := &eventsv1.ListenRequest{ - EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, - } - errCh <- controller.Listen(req, mockStream) - }() - - time.Sleep(50 * time.Millisecond) - - // Publish matching event - eventService.Bus().RecordPushed("bafytest123", nil) - - // Publish non-matching event - eventService.Bus().RecordPublished("bafytest456", nil) - - time.Sleep(50 * time.Millisecond) - cancel() - - // Wait for completion - <-errCh - - // Should have received only the matching event - if len(mockStream.sentMsgs) != 1 { - t.Errorf("Expected 1 message (filtered), got %d", len(mockStream.sentMsgs)) - } - - if len(mockStream.sentMsgs) > 0 { - event := mockStream.sentMsgs[0].GetEvent() - if event.GetType() != eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED { - t.Errorf("Expected RECORD_PUSHED, got %v", event.GetType()) - } - } -} - -func TestEventsControllerListenContextCancellation(t *testing.T) { - eventService := events.New() - - defer func() { _ = eventService.Stop() }() - - controller := NewEventsController(eventService) - - // Create context that's already cancelled - ctx, cancel := context.WithCancel(t.Context()) - cancel() // Cancel immediately - - mockStream := &mockListenServer{ - ctx: ctx, - sentMsgs: make([]*eventsv1.ListenResponse, 0), - } - - // Listen should return immediately due to cancelled context - req := &eventsv1.ListenRequest{} - err := controller.Listen(req, mockStream) - - if err != nil && !errors.Is(err, io.EOF) && !errors.Is(err, context.Canceled) { - t.Errorf("Unexpected error: %v", err) - } - - // Should not have sent any messages - if len(mockStream.sentMsgs) != 0 { - t.Errorf("Expected 0 messages with cancelled context, got %d", len(mockStream.sentMsgs)) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package controller + +import ( + "context" + "errors" + "io" + "testing" + "time" + + eventsv1 "github.com/agntcy/dir/api/events/v1" + "github.com/agntcy/dir/server/events" +) + +// mockListenServer implements EventService_ListenServer for testing. +type mockListenServer struct { + eventsv1.EventService_ListenServer + ctx context.Context //nolint:containedctx // Needed for mock gRPC stream testing + sentMsgs []*eventsv1.ListenResponse +} + +func (m *mockListenServer) Context() context.Context { + return m.ctx +} + +func (m *mockListenServer) Send(resp *eventsv1.ListenResponse) error { + m.sentMsgs = append(m.sentMsgs, resp) + + return nil +} + +func TestEventsControllerListen(t *testing.T) { + // Create event service + eventService := events.New() + + defer func() { _ = eventService.Stop() }() + + // Create controller + controller := NewEventsController(eventService) + + // Create mock stream + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + mockStream := &mockListenServer{ + ctx: ctx, + sentMsgs: make([]*eventsv1.ListenResponse, 0), + } + + // Start listening in background + errCh := make(chan error, 1) + + go func() { + req := &eventsv1.ListenRequest{} + errCh <- controller.Listen(req, mockStream) + }() + + // Give it time to subscribe + time.Sleep(50 * time.Millisecond) + + // Publish event + eventService.Bus().RecordPushed("bafytest123", []string{"/skills/AI"}) + + // Give it time to process + time.Sleep(50 * time.Millisecond) + + // Cancel context to stop listening + cancel() + + // Wait for Listen to return + select { + case err := <-errCh: + if err != nil { + t.Errorf("Listen returned error: %v", err) + } + case <-time.After(time.Second): + t.Error("Timeout waiting for Listen to return") + } + + // Verify event was sent + if len(mockStream.sentMsgs) != 1 { + t.Errorf("Expected 1 message sent, got %d", len(mockStream.sentMsgs)) + } + + if len(mockStream.sentMsgs) > 0 { + event := mockStream.sentMsgs[0].GetEvent() + if event.GetType() != eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED { + t.Errorf("Expected RECORD_PUSHED, got %v", event.GetType()) + } + + if event.GetResourceId() != "bafytest123" { + t.Errorf("Expected resource_id bafytest123, got %s", event.GetResourceId()) + } + } +} + +func TestEventsControllerListenWithFilters(t *testing.T) { + eventService := events.New() + + defer func() { _ = eventService.Stop() }() + + controller := NewEventsController(eventService) + + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + mockStream := &mockListenServer{ + ctx: ctx, + sentMsgs: make([]*eventsv1.ListenResponse, 0), + } + + // Listen with filters + errCh := make(chan error, 1) + + go func() { + req := &eventsv1.ListenRequest{ + EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, + } + errCh <- controller.Listen(req, mockStream) + }() + + time.Sleep(50 * time.Millisecond) + + // Publish matching event + eventService.Bus().RecordPushed("bafytest123", nil) + + // Publish non-matching event + eventService.Bus().RecordPublished("bafytest456", nil) + + time.Sleep(50 * time.Millisecond) + cancel() + + // Wait for completion + <-errCh + + // Should have received only the matching event + if len(mockStream.sentMsgs) != 1 { + t.Errorf("Expected 1 message (filtered), got %d", len(mockStream.sentMsgs)) + } + + if len(mockStream.sentMsgs) > 0 { + event := mockStream.sentMsgs[0].GetEvent() + if event.GetType() != eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED { + t.Errorf("Expected RECORD_PUSHED, got %v", event.GetType()) + } + } +} + +func TestEventsControllerListenContextCancellation(t *testing.T) { + eventService := events.New() + + defer func() { _ = eventService.Stop() }() + + controller := NewEventsController(eventService) + + // Create context that's already cancelled + ctx, cancel := context.WithCancel(t.Context()) + cancel() // Cancel immediately + + mockStream := &mockListenServer{ + ctx: ctx, + sentMsgs: make([]*eventsv1.ListenResponse, 0), + } + + // Listen should return immediately due to cancelled context + req := &eventsv1.ListenRequest{} + err := controller.Listen(req, mockStream) + + if err != nil && !errors.Is(err, io.EOF) && !errors.Is(err, context.Canceled) { + t.Errorf("Unexpected error: %v", err) + } + + // Should not have sent any messages + if len(mockStream.sentMsgs) != 0 { + t.Errorf("Expected 0 messages with cancelled context, got %d", len(mockStream.sentMsgs)) + } +} diff --git a/server/controller/publication.go b/server/controller/publication.go index e3b14648f..bb193c919 100644 --- a/server/controller/publication.go +++ b/server/controller/publication.go @@ -1,115 +1,115 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package controller - -import ( - "context" - "fmt" - - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/utils/logging" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var publicationLogger = logging.Logger("controller/publication") - -// publicationCtlr implements the PublicationService gRPC interface. -type publicationCtlr struct { - routingv1.UnimplementedPublicationServiceServer - db types.DatabaseAPI - opts types.APIOptions -} - -// NewPublicationController creates a new publication controller. -func NewPublicationController(db types.DatabaseAPI, opts types.APIOptions) routingv1.PublicationServiceServer { - return &publicationCtlr{ - db: db, - opts: opts, - } -} - -func (c *publicationCtlr) CreatePublication(_ context.Context, req *routingv1.PublishRequest) (*routingv1.CreatePublicationResponse, error) { - publicationLogger.Debug("Called publication controller's CreatePublication method") - - // Validate the publish request - if req == nil { - return nil, status.Errorf(codes.InvalidArgument, "publish request cannot be nil") - } - - // Validate that at least one request type is specified - switch req.GetRequest().(type) { - case *routingv1.PublishRequest_RecordRefs: - if req.GetRecordRefs() == nil || len(req.GetRecordRefs().GetRefs()) == 0 { - return nil, status.Errorf(codes.InvalidArgument, "record refs cannot be empty") - } - case *routingv1.PublishRequest_Queries: - if req.GetQueries() == nil || len(req.GetQueries().GetQueries()) == 0 { - return nil, status.Errorf(codes.InvalidArgument, "queries cannot be empty") - } - default: - return nil, status.Errorf(codes.InvalidArgument, "invalid publish request: must specify record_refs, queries, or all_records") - } - - id, err := c.db.CreatePublication(req) - if err != nil { - return nil, fmt.Errorf("failed to create publication: %w", err) - } - - publicationLogger.Debug("Publication created successfully", "publication_id", id) - - return &routingv1.CreatePublicationResponse{ - PublicationId: id, - }, nil -} - -func (c *publicationCtlr) ListPublications(req *routingv1.ListPublicationsRequest, srv routingv1.PublicationService_ListPublicationsServer) error { - publicationLogger.Debug("Called publication controller's ListPublications method", "req", req) - - offset := int(req.GetOffset()) - limit := int(req.GetLimit()) - - publications, err := c.db.GetPublications(offset, limit) - if err != nil { - return fmt.Errorf("failed to list publications: %w", err) - } - - for _, publication := range publications { - publicationLogger.Debug("Sending publication object", "publication_id", publication.GetID(), "status", publication.GetStatus()) - - if err := srv.Send(&routingv1.ListPublicationsItem{ - PublicationId: publication.GetID(), - Status: publication.GetStatus(), - CreatedTime: publication.GetCreatedTime(), - LastUpdateTime: publication.GetLastUpdateTime(), - }); err != nil { - return fmt.Errorf("failed to send publication object: %w", err) - } - } - - publicationLogger.Debug("Finished sending publication objects") - - return nil -} - -func (c *publicationCtlr) GetPublication(_ context.Context, req *routingv1.GetPublicationRequest) (*routingv1.GetPublicationResponse, error) { - publicationLogger.Debug("Called publication controller's GetPublication method", "req", req) - - if req.GetPublicationId() == "" { - return nil, status.Errorf(codes.InvalidArgument, "publication_id cannot be empty") - } - - publicationObj, err := c.db.GetPublicationByID(req.GetPublicationId()) - if err != nil { - return nil, fmt.Errorf("failed to get publication by ID: %w", err) - } - - return &routingv1.GetPublicationResponse{ - PublicationId: publicationObj.GetID(), - Status: publicationObj.GetStatus(), - CreatedTime: publicationObj.GetCreatedTime(), - LastUpdateTime: publicationObj.GetLastUpdateTime(), - }, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package controller + +import ( + "context" + "fmt" + + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/utils/logging" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var publicationLogger = logging.Logger("controller/publication") + +// publicationCtlr implements the PublicationService gRPC interface. +type publicationCtlr struct { + routingv1.UnimplementedPublicationServiceServer + db types.DatabaseAPI + opts types.APIOptions +} + +// NewPublicationController creates a new publication controller. +func NewPublicationController(db types.DatabaseAPI, opts types.APIOptions) routingv1.PublicationServiceServer { + return &publicationCtlr{ + db: db, + opts: opts, + } +} + +func (c *publicationCtlr) CreatePublication(_ context.Context, req *routingv1.PublishRequest) (*routingv1.CreatePublicationResponse, error) { + publicationLogger.Debug("Called publication controller's CreatePublication method") + + // Validate the publish request + if req == nil { + return nil, status.Errorf(codes.InvalidArgument, "publish request cannot be nil") + } + + // Validate that at least one request type is specified + switch req.GetRequest().(type) { + case *routingv1.PublishRequest_RecordRefs: + if req.GetRecordRefs() == nil || len(req.GetRecordRefs().GetRefs()) == 0 { + return nil, status.Errorf(codes.InvalidArgument, "record refs cannot be empty") + } + case *routingv1.PublishRequest_Queries: + if req.GetQueries() == nil || len(req.GetQueries().GetQueries()) == 0 { + return nil, status.Errorf(codes.InvalidArgument, "queries cannot be empty") + } + default: + return nil, status.Errorf(codes.InvalidArgument, "invalid publish request: must specify record_refs, queries, or all_records") + } + + id, err := c.db.CreatePublication(req) + if err != nil { + return nil, fmt.Errorf("failed to create publication: %w", err) + } + + publicationLogger.Debug("Publication created successfully", "publication_id", id) + + return &routingv1.CreatePublicationResponse{ + PublicationId: id, + }, nil +} + +func (c *publicationCtlr) ListPublications(req *routingv1.ListPublicationsRequest, srv routingv1.PublicationService_ListPublicationsServer) error { + publicationLogger.Debug("Called publication controller's ListPublications method", "req", req) + + offset := int(req.GetOffset()) + limit := int(req.GetLimit()) + + publications, err := c.db.GetPublications(offset, limit) + if err != nil { + return fmt.Errorf("failed to list publications: %w", err) + } + + for _, publication := range publications { + publicationLogger.Debug("Sending publication object", "publication_id", publication.GetID(), "status", publication.GetStatus()) + + if err := srv.Send(&routingv1.ListPublicationsItem{ + PublicationId: publication.GetID(), + Status: publication.GetStatus(), + CreatedTime: publication.GetCreatedTime(), + LastUpdateTime: publication.GetLastUpdateTime(), + }); err != nil { + return fmt.Errorf("failed to send publication object: %w", err) + } + } + + publicationLogger.Debug("Finished sending publication objects") + + return nil +} + +func (c *publicationCtlr) GetPublication(_ context.Context, req *routingv1.GetPublicationRequest) (*routingv1.GetPublicationResponse, error) { + publicationLogger.Debug("Called publication controller's GetPublication method", "req", req) + + if req.GetPublicationId() == "" { + return nil, status.Errorf(codes.InvalidArgument, "publication_id cannot be empty") + } + + publicationObj, err := c.db.GetPublicationByID(req.GetPublicationId()) + if err != nil { + return nil, fmt.Errorf("failed to get publication by ID: %w", err) + } + + return &routingv1.GetPublicationResponse{ + PublicationId: publicationObj.GetID(), + Status: publicationObj.GetStatus(), + CreatedTime: publicationObj.GetCreatedTime(), + LastUpdateTime: publicationObj.GetLastUpdateTime(), + }, nil +} diff --git a/server/controller/routing.go b/server/controller/routing.go index 5ff5642d1..26d1100f6 100644 --- a/server/controller/routing.go +++ b/server/controller/routing.go @@ -1,149 +1,149 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package controller - -import ( - "context" - - corev1 "github.com/agntcy/dir/api/core/v1" - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/server/types/adapters" - "github.com/agntcy/dir/utils/logging" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/emptypb" -) - -var routingLogger = logging.Logger("controller/routing") - -type routingCtlr struct { - routingv1.UnimplementedRoutingServiceServer - routing types.RoutingAPI - store types.StoreAPI - publication types.PublicationAPI -} - -func NewRoutingController(routing types.RoutingAPI, store types.StoreAPI, publication types.PublicationAPI) routingv1.RoutingServiceServer { - return &routingCtlr{ - routing: routing, - store: store, - publication: publication, - UnimplementedRoutingServiceServer: routingv1.UnimplementedRoutingServiceServer{}, - } -} - -func (c *routingCtlr) Publish(ctx context.Context, req *routingv1.PublishRequest) (*emptypb.Empty, error) { - routingLogger.Debug("Called routing controller's Publish method", "req", req) - - // Create publication to be handled by the publication service - publicationID, err := c.publication.CreatePublication(ctx, req) - if err != nil { - routingLogger.Error("Failed to create publication", "error", err) - - return nil, status.Errorf(codes.Internal, "failed to create publication: %v", err) - } - - routingLogger.Info("Publication created successfully", "publication_id", publicationID) - - return &emptypb.Empty{}, nil -} - -func (c *routingCtlr) List(req *routingv1.ListRequest, srv routingv1.RoutingService_ListServer) error { - routingLogger.Debug("Called routing controller's List method", "req", req) - - itemChan, err := c.routing.List(srv.Context(), req) - if err != nil { - st := status.Convert(err) - - return status.Errorf(st.Code(), "failed to list: %s", st.Message()) - } - - // Stream ListResponse items directly to the client - for item := range itemChan { - if err := srv.Send(item); err != nil { - return status.Errorf(codes.Internal, "failed to send list response: %v", err) - } - } - - return nil -} - -func (c *routingCtlr) Search(req *routingv1.SearchRequest, srv routingv1.RoutingService_SearchServer) error { - routingLogger.Debug("Called routing controller's Search method", "req", req) - - itemChan, err := c.routing.Search(srv.Context(), req) - if err != nil { - st := status.Convert(err) - - return status.Errorf(st.Code(), "failed to search: %s", st.Message()) - } - - // Stream SearchResponse items directly to the client - for item := range itemChan { - if err := srv.Send(item); err != nil { - return status.Errorf(codes.Internal, "failed to send search response: %v", err) - } - } - - return nil -} - -func (c *routingCtlr) Unpublish(ctx context.Context, req *routingv1.UnpublishRequest) (*emptypb.Empty, error) { - routingLogger.Debug("Called routing controller's Unpublish method", "req", req) - - // Only handle RecordRefs, not queries - recordRefs, ok := req.GetRequest().(*routingv1.UnpublishRequest_RecordRefs) - if !ok { - return nil, status.Error(codes.InvalidArgument, "unpublish request must specify record_refs") //nolint:wrapcheck // gRPC status errors should not be wrapped - } - - // Process each RecordRef - for _, ref := range recordRefs.RecordRefs.GetRefs() { - record, err := c.getRecord(ctx, ref) - if err != nil { - st := status.Convert(err) - - return nil, status.Errorf(st.Code(), "failed to get record: %s", st.Message()) - } - - // Wrap record with adapter for interface-based unpublishing - adapter := adapters.NewRecordAdapter(record) - - err = c.routing.Unpublish(ctx, adapter) - if err != nil { - st := status.Convert(err) - - return nil, status.Errorf(st.Code(), "failed to unpublish: %s", st.Message()) - } - - routingLogger.Info("Successfully unpublished record", "cid", ref.GetCid()) - } - - return &emptypb.Empty{}, nil -} - -func (c *routingCtlr) getRecord(ctx context.Context, ref *corev1.RecordRef) (*corev1.Record, error) { - routingLogger.Debug("Called routing controller's getRecord method", "ref", ref) - - if ref == nil || ref.GetCid() == "" { - return nil, status.Errorf(codes.InvalidArgument, "object reference is required and must have a CID") - } - - _, err := c.store.Lookup(ctx, ref) - if err != nil { - st := status.Convert(err) - - return nil, status.Errorf(st.Code(), "failed to lookup object: %s", st.Message()) - } - - record, err := c.store.Pull(ctx, ref) - if err != nil { - st := status.Convert(err) - - return nil, status.Errorf(st.Code(), "failed to pull object: %s", st.Message()) - } - - return record, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package controller + +import ( + "context" + + corev1 "github.com/agntcy/dir/api/core/v1" + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/server/types/adapters" + "github.com/agntcy/dir/utils/logging" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/emptypb" +) + +var routingLogger = logging.Logger("controller/routing") + +type routingCtlr struct { + routingv1.UnimplementedRoutingServiceServer + routing types.RoutingAPI + store types.StoreAPI + publication types.PublicationAPI +} + +func NewRoutingController(routing types.RoutingAPI, store types.StoreAPI, publication types.PublicationAPI) routingv1.RoutingServiceServer { + return &routingCtlr{ + routing: routing, + store: store, + publication: publication, + UnimplementedRoutingServiceServer: routingv1.UnimplementedRoutingServiceServer{}, + } +} + +func (c *routingCtlr) Publish(ctx context.Context, req *routingv1.PublishRequest) (*emptypb.Empty, error) { + routingLogger.Debug("Called routing controller's Publish method", "req", req) + + // Create publication to be handled by the publication service + publicationID, err := c.publication.CreatePublication(ctx, req) + if err != nil { + routingLogger.Error("Failed to create publication", "error", err) + + return nil, status.Errorf(codes.Internal, "failed to create publication: %v", err) + } + + routingLogger.Info("Publication created successfully", "publication_id", publicationID) + + return &emptypb.Empty{}, nil +} + +func (c *routingCtlr) List(req *routingv1.ListRequest, srv routingv1.RoutingService_ListServer) error { + routingLogger.Debug("Called routing controller's List method", "req", req) + + itemChan, err := c.routing.List(srv.Context(), req) + if err != nil { + st := status.Convert(err) + + return status.Errorf(st.Code(), "failed to list: %s", st.Message()) + } + + // Stream ListResponse items directly to the client + for item := range itemChan { + if err := srv.Send(item); err != nil { + return status.Errorf(codes.Internal, "failed to send list response: %v", err) + } + } + + return nil +} + +func (c *routingCtlr) Search(req *routingv1.SearchRequest, srv routingv1.RoutingService_SearchServer) error { + routingLogger.Debug("Called routing controller's Search method", "req", req) + + itemChan, err := c.routing.Search(srv.Context(), req) + if err != nil { + st := status.Convert(err) + + return status.Errorf(st.Code(), "failed to search: %s", st.Message()) + } + + // Stream SearchResponse items directly to the client + for item := range itemChan { + if err := srv.Send(item); err != nil { + return status.Errorf(codes.Internal, "failed to send search response: %v", err) + } + } + + return nil +} + +func (c *routingCtlr) Unpublish(ctx context.Context, req *routingv1.UnpublishRequest) (*emptypb.Empty, error) { + routingLogger.Debug("Called routing controller's Unpublish method", "req", req) + + // Only handle RecordRefs, not queries + recordRefs, ok := req.GetRequest().(*routingv1.UnpublishRequest_RecordRefs) + if !ok { + return nil, status.Error(codes.InvalidArgument, "unpublish request must specify record_refs") //nolint:wrapcheck // gRPC status errors should not be wrapped + } + + // Process each RecordRef + for _, ref := range recordRefs.RecordRefs.GetRefs() { + record, err := c.getRecord(ctx, ref) + if err != nil { + st := status.Convert(err) + + return nil, status.Errorf(st.Code(), "failed to get record: %s", st.Message()) + } + + // Wrap record with adapter for interface-based unpublishing + adapter := adapters.NewRecordAdapter(record) + + err = c.routing.Unpublish(ctx, adapter) + if err != nil { + st := status.Convert(err) + + return nil, status.Errorf(st.Code(), "failed to unpublish: %s", st.Message()) + } + + routingLogger.Info("Successfully unpublished record", "cid", ref.GetCid()) + } + + return &emptypb.Empty{}, nil +} + +func (c *routingCtlr) getRecord(ctx context.Context, ref *corev1.RecordRef) (*corev1.Record, error) { + routingLogger.Debug("Called routing controller's getRecord method", "ref", ref) + + if ref == nil || ref.GetCid() == "" { + return nil, status.Errorf(codes.InvalidArgument, "object reference is required and must have a CID") + } + + _, err := c.store.Lookup(ctx, ref) + if err != nil { + st := status.Convert(err) + + return nil, status.Errorf(st.Code(), "failed to lookup object: %s", st.Message()) + } + + record, err := c.store.Pull(ctx, ref) + if err != nil { + st := status.Convert(err) + + return nil, status.Errorf(st.Code(), "failed to pull object: %s", st.Message()) + } + + return record, nil +} diff --git a/server/controller/search.go b/server/controller/search.go index a3bd52052..51ca4d811 100644 --- a/server/controller/search.go +++ b/server/controller/search.go @@ -1,95 +1,95 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package controller - -import ( - "fmt" - - corev1 "github.com/agntcy/dir/api/core/v1" - searchv1 "github.com/agntcy/dir/api/search/v1" - databaseutils "github.com/agntcy/dir/server/database/utils" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/utils/logging" -) - -var searchLogger = logging.Logger("controller/search") - -type searchCtlr struct { - searchv1.UnimplementedSearchServiceServer - db types.DatabaseAPI - store types.StoreAPI -} - -func NewSearchController(db types.DatabaseAPI, store types.StoreAPI) searchv1.SearchServiceServer { - return &searchCtlr{ - UnimplementedSearchServiceServer: searchv1.UnimplementedSearchServiceServer{}, - db: db, - store: store, - } -} - -func (c *searchCtlr) SearchCIDs(req *searchv1.SearchCIDsRequest, srv searchv1.SearchService_SearchCIDsServer) error { - searchLogger.Debug("Called search controller's SearchCIDs method", "req", req) - - filterOptions, err := databaseutils.QueryToFilters(req.GetQueries()) - if err != nil { - return fmt.Errorf("failed to create filter options: %w", err) - } - - filterOptions = append(filterOptions, - types.WithLimit(int(req.GetLimit())), - types.WithOffset(int(req.GetOffset())), - ) - - recordCIDs, err := c.db.GetRecordCIDs(filterOptions...) - if err != nil { - return fmt.Errorf("failed to get record CIDs: %w", err) - } - - for _, cid := range recordCIDs { - if err := srv.Send(&searchv1.SearchCIDsResponse{RecordCid: cid}); err != nil { - return fmt.Errorf("failed to send record CID: %w", err) - } - } - - return nil -} - -func (c *searchCtlr) SearchRecords(req *searchv1.SearchRecordsRequest, srv searchv1.SearchService_SearchRecordsServer) error { - searchLogger.Debug("Called search controller's SearchRecords method", "req", req) - - filterOptions, err := databaseutils.QueryToFilters(req.GetQueries()) - if err != nil { - return fmt.Errorf("failed to create filter options: %w", err) - } - - filterOptions = append(filterOptions, - types.WithLimit(int(req.GetLimit())), - types.WithOffset(int(req.GetOffset())), - ) - - recordCIDs, err := c.db.GetRecordCIDs(filterOptions...) - if err != nil { - return fmt.Errorf("failed to get record CIDs: %w", err) - } - - for _, cid := range recordCIDs { - if err := srv.Context().Err(); err != nil { - return fmt.Errorf("client disconnected: %w", err) - } - - record, err := c.store.Pull(srv.Context(), &corev1.RecordRef{Cid: cid}) - if err != nil { - searchLogger.Warn("Failed to pull record from store", "cid", cid, "error", err) - - continue - } - - if err := srv.Send(&searchv1.SearchRecordsResponse{Record: record}); err != nil { - return fmt.Errorf("failed to send record: %w", err) - } - } - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package controller + +import ( + "fmt" + + corev1 "github.com/agntcy/dir/api/core/v1" + searchv1 "github.com/agntcy/dir/api/search/v1" + databaseutils "github.com/agntcy/dir/server/database/utils" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/utils/logging" +) + +var searchLogger = logging.Logger("controller/search") + +type searchCtlr struct { + searchv1.UnimplementedSearchServiceServer + db types.DatabaseAPI + store types.StoreAPI +} + +func NewSearchController(db types.DatabaseAPI, store types.StoreAPI) searchv1.SearchServiceServer { + return &searchCtlr{ + UnimplementedSearchServiceServer: searchv1.UnimplementedSearchServiceServer{}, + db: db, + store: store, + } +} + +func (c *searchCtlr) SearchCIDs(req *searchv1.SearchCIDsRequest, srv searchv1.SearchService_SearchCIDsServer) error { + searchLogger.Debug("Called search controller's SearchCIDs method", "req", req) + + filterOptions, err := databaseutils.QueryToFilters(req.GetQueries()) + if err != nil { + return fmt.Errorf("failed to create filter options: %w", err) + } + + filterOptions = append(filterOptions, + types.WithLimit(int(req.GetLimit())), + types.WithOffset(int(req.GetOffset())), + ) + + recordCIDs, err := c.db.GetRecordCIDs(filterOptions...) + if err != nil { + return fmt.Errorf("failed to get record CIDs: %w", err) + } + + for _, cid := range recordCIDs { + if err := srv.Send(&searchv1.SearchCIDsResponse{RecordCid: cid}); err != nil { + return fmt.Errorf("failed to send record CID: %w", err) + } + } + + return nil +} + +func (c *searchCtlr) SearchRecords(req *searchv1.SearchRecordsRequest, srv searchv1.SearchService_SearchRecordsServer) error { + searchLogger.Debug("Called search controller's SearchRecords method", "req", req) + + filterOptions, err := databaseutils.QueryToFilters(req.GetQueries()) + if err != nil { + return fmt.Errorf("failed to create filter options: %w", err) + } + + filterOptions = append(filterOptions, + types.WithLimit(int(req.GetLimit())), + types.WithOffset(int(req.GetOffset())), + ) + + recordCIDs, err := c.db.GetRecordCIDs(filterOptions...) + if err != nil { + return fmt.Errorf("failed to get record CIDs: %w", err) + } + + for _, cid := range recordCIDs { + if err := srv.Context().Err(); err != nil { + return fmt.Errorf("client disconnected: %w", err) + } + + record, err := c.store.Pull(srv.Context(), &corev1.RecordRef{Cid: cid}) + if err != nil { + searchLogger.Warn("Failed to pull record from store", "cid", cid, "error", err) + + continue + } + + if err := srv.Send(&searchv1.SearchRecordsResponse{Record: record}); err != nil { + return fmt.Errorf("failed to send record: %w", err) + } + } + + return nil +} diff --git a/server/controller/sign.go b/server/controller/sign.go index 0450ac517..cf7fd2436 100644 --- a/server/controller/sign.go +++ b/server/controller/sign.go @@ -1,76 +1,76 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package controller - -import ( - "context" - - signv1 "github.com/agntcy/dir/api/sign/v1" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/utils/logging" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var signLogger = logging.Logger("controller/sign") - -type signCtrl struct { - signv1.UnimplementedSignServiceServer - store types.StoreAPI -} - -// NewSignController creates a new sign service controller. -func NewSignController(store types.StoreAPI) signv1.SignServiceServer { - return &signCtrl{ - store: store, - } -} - -//nolint:wrapcheck -func (s *signCtrl) Sign(_ context.Context, _ *signv1.SignRequest) (*signv1.SignResponse, error) { - signLogger.Debug("Sign request received") - - // Sign functionality is handled client-side - return nil, status.Error(codes.Unimplemented, "server-side signing not implemented") -} - -func (s *signCtrl) Verify(ctx context.Context, req *signv1.VerifyRequest) (*signv1.VerifyResponse, error) { - signLogger.Debug("Verify request received") - - // Validate request - if req.GetRecordRef() == nil || req.GetRecordRef().GetCid() == "" { - return nil, status.Error(codes.InvalidArgument, "record ref must be set") //nolint:wrapcheck - } - - // Server-side verification is enabled by zot verification. - return s.verify(ctx, req.GetRecordRef().GetCid()) -} - -// verify attempts zot verification if the store supports it. -func (s *signCtrl) verify(ctx context.Context, recordCID string) (*signv1.VerifyResponse, error) { - // Check if the store supports zot verification - zotStore, ok := s.store.(types.VerifierStore) - if !ok { - return nil, status.Error(codes.Unimplemented, "zot verification not available in this store configuration") //nolint:wrapcheck - } - - signLogger.Debug("Attempting zot verification", "recordCID", recordCID) - - verified, err := zotStore.VerifyWithZot(ctx, recordCID) - if err != nil { - return nil, status.Errorf(codes.Internal, "zot verification failed: %v", err) - } - - signLogger.Debug("Zot verification completed", "recordCID", recordCID, "verified", verified) - - var errMsg string - if !verified { - errMsg = "Signature verification failed" - } - - return &signv1.VerifyResponse{ - Success: verified, - ErrorMessage: &errMsg, - }, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package controller + +import ( + "context" + + signv1 "github.com/agntcy/dir/api/sign/v1" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/utils/logging" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var signLogger = logging.Logger("controller/sign") + +type signCtrl struct { + signv1.UnimplementedSignServiceServer + store types.StoreAPI +} + +// NewSignController creates a new sign service controller. +func NewSignController(store types.StoreAPI) signv1.SignServiceServer { + return &signCtrl{ + store: store, + } +} + +//nolint:wrapcheck +func (s *signCtrl) Sign(_ context.Context, _ *signv1.SignRequest) (*signv1.SignResponse, error) { + signLogger.Debug("Sign request received") + + // Sign functionality is handled client-side + return nil, status.Error(codes.Unimplemented, "server-side signing not implemented") +} + +func (s *signCtrl) Verify(ctx context.Context, req *signv1.VerifyRequest) (*signv1.VerifyResponse, error) { + signLogger.Debug("Verify request received") + + // Validate request + if req.GetRecordRef() == nil || req.GetRecordRef().GetCid() == "" { + return nil, status.Error(codes.InvalidArgument, "record ref must be set") //nolint:wrapcheck + } + + // Server-side verification is enabled by zot verification. + return s.verify(ctx, req.GetRecordRef().GetCid()) +} + +// verify attempts zot verification if the store supports it. +func (s *signCtrl) verify(ctx context.Context, recordCID string) (*signv1.VerifyResponse, error) { + // Check if the store supports zot verification + zotStore, ok := s.store.(types.VerifierStore) + if !ok { + return nil, status.Error(codes.Unimplemented, "zot verification not available in this store configuration") //nolint:wrapcheck + } + + signLogger.Debug("Attempting zot verification", "recordCID", recordCID) + + verified, err := zotStore.VerifyWithZot(ctx, recordCID) + if err != nil { + return nil, status.Errorf(codes.Internal, "zot verification failed: %v", err) + } + + signLogger.Debug("Zot verification completed", "recordCID", recordCID, "verified", verified) + + var errMsg string + if !verified { + errMsg = "Signature verification failed" + } + + return &signv1.VerifyResponse{ + Success: verified, + ErrorMessage: &errMsg, + }, nil +} diff --git a/server/controller/sign_events_test.go b/server/controller/sign_events_test.go index 190a7c0ab..9057f463f 100644 --- a/server/controller/sign_events_test.go +++ b/server/controller/sign_events_test.go @@ -1,63 +1,63 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package controller - -import ( - "testing" - - eventsv1 "github.com/agntcy/dir/api/events/v1" - "github.com/agntcy/dir/server/events" -) - -const ( - testCID = "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi" - testSigner = "client" -) - -// TestSignEventsEmission is a simple test to verify that sign events are emitted. -// This test verifies that the event bus methods are called correctly, -// without testing the complex controller logic itself. -func TestSignEventsEmission(t *testing.T) { - // Create event bus and subscribe - bus := events.NewEventBus() - safeEventBus := events.NewSafeEventBus(bus) - - req := &eventsv1.ListenRequest{} - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - // Test RECORD_SIGNED event - t.Run("RECORD_SIGNED", func(t *testing.T) { - safeEventBus.RecordSigned(testCID, testSigner) - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_SIGNED { - t.Errorf("Expected RECORD_SIGNED, got %v", event.Type) - } - - if event.ResourceID != testCID { - t.Errorf("Expected CID '%s', got %s", testCID, event.ResourceID) - } - - if event.Metadata["signer"] != testSigner { - t.Errorf("Expected signer=%s in metadata, got %v", testSigner, event.Metadata) - } - default: - t.Error("Expected to receive RECORD_SIGNED event") - } - }) -} - -// TestSignWithNilEventBus verifies that sign works even with nil event bus (shouldn't panic). -func TestSignWithNilEventBus(t *testing.T) { - safeEventBus := events.NewSafeEventBus(nil) - - // Should not panic - safeEventBus.RecordSigned(testCID, testSigner) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package controller + +import ( + "testing" + + eventsv1 "github.com/agntcy/dir/api/events/v1" + "github.com/agntcy/dir/server/events" +) + +const ( + testCID = "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi" + testSigner = "client" +) + +// TestSignEventsEmission is a simple test to verify that sign events are emitted. +// This test verifies that the event bus methods are called correctly, +// without testing the complex controller logic itself. +func TestSignEventsEmission(t *testing.T) { + // Create event bus and subscribe + bus := events.NewEventBus() + safeEventBus := events.NewSafeEventBus(bus) + + req := &eventsv1.ListenRequest{} + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + // Test RECORD_SIGNED event + t.Run("RECORD_SIGNED", func(t *testing.T) { + safeEventBus.RecordSigned(testCID, testSigner) + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_SIGNED { + t.Errorf("Expected RECORD_SIGNED, got %v", event.Type) + } + + if event.ResourceID != testCID { + t.Errorf("Expected CID '%s', got %s", testCID, event.ResourceID) + } + + if event.Metadata["signer"] != testSigner { + t.Errorf("Expected signer=%s in metadata, got %v", testSigner, event.Metadata) + } + default: + t.Error("Expected to receive RECORD_SIGNED event") + } + }) +} + +// TestSignWithNilEventBus verifies that sign works even with nil event bus (shouldn't panic). +func TestSignWithNilEventBus(t *testing.T) { + safeEventBus := events.NewSafeEventBus(nil) + + // Should not panic + safeEventBus.RecordSigned(testCID, testSigner) +} diff --git a/server/controller/store.go b/server/controller/store.go index 1cab499c0..db3f723b3 100644 --- a/server/controller/store.go +++ b/server/controller/store.go @@ -1,407 +1,407 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:wrapcheck,dupl -package controller - -import ( - "context" - "errors" - "fmt" - "io" - - corev1 "github.com/agntcy/dir/api/core/v1" - storev1 "github.com/agntcy/dir/api/store/v1" - "github.com/agntcy/dir/server/events" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/server/types/adapters" - "github.com/agntcy/dir/utils/logging" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/emptypb" -) - -var storeLogger = logging.Logger("controller/store") - -type storeCtrl struct { - storev1.UnimplementedStoreServiceServer - store types.StoreAPI - db types.DatabaseAPI - eventBus *events.SafeEventBus -} - -func NewStoreController(store types.StoreAPI, db types.DatabaseAPI, eventBus *events.SafeEventBus) storev1.StoreServiceServer { - return &storeCtrl{ - UnimplementedStoreServiceServer: storev1.UnimplementedStoreServiceServer{}, - store: store, - db: db, - eventBus: eventBus, - } -} - -func (s storeCtrl) Push(stream storev1.StoreService_PushServer) error { - storeLogger.Debug("Called store controller's Push method") - - ctx := stream.Context() - - for { - // Receive complete Record from stream - record, err := stream.Recv() - if errors.Is(err, io.EOF) { - storeLogger.Debug("Push stream completed") - - return nil - } - - if err != nil { - return status.Errorf(codes.Internal, "failed to receive record: %v", err) - } - - isValid, validationErrors, err := record.Validate(ctx) - if err != nil { - return status.Errorf(codes.Internal, "failed to validate record: %v", err) - } - - if !isValid { - // Extract record name and version for better error reporting - recordName, recordVersion := extractRecordInfo(record) - - // Log validation error with record details - storeLogger.Warn("Record validation failed", - "name", recordName, - "version", recordVersion, - "errors", validationErrors) - - return status.Errorf(codes.InvalidArgument, "record validation failed: %v", validationErrors) - } - - pushedRef, err := s.pushRecordToStore(stream.Context(), record) - if err != nil { - return err - } - - // Send the RecordRef back via stream - if err := stream.Send(pushedRef); err != nil { - return status.Errorf(codes.Internal, "failed to send record reference: %v", err) - } - } -} - -func (s storeCtrl) Pull(stream storev1.StoreService_PullServer) error { - storeLogger.Debug("Called store controller's Pull method") - - for { - // Receive RecordRef from stream - recordRef, err := stream.Recv() - if errors.Is(err, io.EOF) { - storeLogger.Debug("Pull stream completed") - - return nil - } - - if err != nil { - return status.Errorf(codes.Internal, "failed to receive record reference: %v", err) - } - - storeLogger.Debug("Pull request received", "cid", recordRef.GetCid()) - - // Validate record reference - if err := s.validateRecordRef(recordRef); err != nil { - return err - } - - // Pull record from store - record, err := s.pullRecordFromStore(stream.Context(), recordRef) - if err != nil { - return err - } - - // Send Record back via stream - if err := stream.Send(record); err != nil { - return status.Errorf(codes.Internal, "failed to send record: %v", err) - } - } -} - -func (s storeCtrl) Lookup(stream storev1.StoreService_LookupServer) error { - storeLogger.Debug("Called store controller's Lookup method") - - for { - // Receive RecordRef from stream - recordRef, err := stream.Recv() - if errors.Is(err, io.EOF) { - storeLogger.Debug("Lookup stream completed") - - return nil - } - - if err != nil { - return status.Errorf(codes.Internal, "failed to receive record reference: %v", err) - } - - storeLogger.Debug("Lookup request received", "cid", recordRef.GetCid()) - - // Validate CID - if recordRef.GetCid() == "" { - return status.Error(codes.InvalidArgument, "record cid is required") - } - - // Lookup record metadata - recordMeta, err := s.store.Lookup(stream.Context(), recordRef) - if err != nil { - st := status.Convert(err) - - return status.Errorf(st.Code(), "failed to lookup record: %s", st.Message()) - } - - storeLogger.Debug("Record metadata retrieved successfully", "cid", recordRef.GetCid()) - - // Send RecordMeta back via stream - if err := stream.Send(recordMeta); err != nil { - return status.Errorf(codes.Internal, "failed to send record metadata: %v", err) - } - } -} - -func (s storeCtrl) Delete(stream storev1.StoreService_DeleteServer) error { - storeLogger.Debug("Called store controller's Delete method") - - for { - // Receive RecordRef from stream - recordRef, err := stream.Recv() - if errors.Is(err, io.EOF) { - storeLogger.Debug("Delete stream completed") - - if err := stream.SendAndClose(&emptypb.Empty{}); err != nil { - return status.Errorf(codes.Internal, "failed to send response: %v", err) - } - - return nil - } - - if err != nil { - return status.Errorf(codes.Internal, "failed to receive record reference: %v", err) - } - - storeLogger.Debug("Delete request received", "cid", recordRef.GetCid()) - - // Validate CID - if recordRef.GetCid() == "" { - return status.Error(codes.InvalidArgument, "record cid is required") - } - - // Delete record from store - err = s.store.Delete(stream.Context(), recordRef) - if err != nil { - st := status.Convert(err) - - return status.Errorf(st.Code(), "failed to delete record: %s", st.Message()) - } - - // Clean up search database (secondary operation - don't fail on errors) - if err := s.db.RemoveRecord(recordRef.GetCid()); err != nil { - // Log error but don't fail the delete - storage is source of truth - storeLogger.Error("Failed to remove record from search index", "error", err, "cid", recordRef.GetCid()) - } else { - storeLogger.Debug("Record removed from search index", "cid", recordRef.GetCid()) - } - - storeLogger.Info("Record deleted successfully", "cid", recordRef.GetCid()) - } -} - -func (s storeCtrl) PushReferrer(stream storev1.StoreService_PushReferrerServer) error { - storeLogger.Debug("Called store controller's PushReferrer method") - - for { - // Receive PushReferrerRequest from stream - request, err := stream.Recv() - if errors.Is(err, io.EOF) { - storeLogger.Debug("PushReferrer stream completed") - - return nil - } - - if err != nil { - return status.Errorf(codes.Internal, "failed to receive push referrer request: %v", err) - } - - // Validate the record reference - if err := s.validateRecordRef(request.GetRecordRef()); err != nil { - return err - } - - // Handle the referrer directly since we only have one type now - response := s.pushReferrer(stream.Context(), request) - - if err := stream.Send(response); err != nil { - return status.Errorf(codes.Internal, "failed to send push referrer response: %v", err) - } - } -} - -func (s storeCtrl) pushReferrer(ctx context.Context, request *storev1.PushReferrerRequest) *storev1.PushReferrerResponse { - storeLogger.Debug("Pushing referrer", "cid", request.GetRecordRef().GetCid(), "type", request.GetReferrer().GetType()) - - // Try to use referrer storage if the store supports it - refStore, ok := s.store.(types.ReferrerStoreAPI) - if !ok { - errMsg := "referrer storage not supported by current store implementation" - - return &storev1.PushReferrerResponse{ - Success: false, - ErrorMessage: &errMsg, - } - } - - err := refStore.PushReferrer(ctx, request.GetRecordRef().GetCid(), request.GetReferrer()) - if err != nil { - errMsg := fmt.Sprintf("failed to store referrer for record %s: %v", request.GetRecordRef().GetCid(), err) - - return &storev1.PushReferrerResponse{ - Success: false, - ErrorMessage: &errMsg, - } - } - - storeLogger.Debug("Referrer stored successfully", "cid", request.GetRecordRef().GetCid(), "type", request.GetReferrer().GetType()) - - // Emit RECORD_SIGNED event if this is a signature referrer - if request.GetReferrer().GetType() == corev1.SignatureReferrerType { - s.eventBus.RecordSigned(request.GetRecordRef().GetCid(), "client") - } - - return &storev1.PushReferrerResponse{ - Success: true, - } -} - -// PullReferrer handles retrieving referrers (like signatures) for records. -func (s storeCtrl) PullReferrer(stream storev1.StoreService_PullReferrerServer) error { - storeLogger.Debug("Called store controller's PullReferrer method") - - for { - // Receive PullReferrerRequest from stream - request, err := stream.Recv() - if errors.Is(err, io.EOF) { - storeLogger.Debug("PullReferrer stream completed") - - return nil - } - - if err != nil { - return status.Errorf(codes.Internal, "failed to receive pull referrer request: %v", err) - } - - // Validate the record reference - if err := s.validateRecordRef(request.GetRecordRef()); err != nil { - return err - } - - // Determine referrer type (empty string means all types) - referrerType := "" - if request.ReferrerType != nil { - referrerType = request.GetReferrerType() - } - - // Try to use referrer storage if the store supports it - refStore, ok := s.store.(types.ReferrerStoreAPI) - if !ok { - storeLogger.Error("Referrer storage not supported by current store implementation") - - return stream.Send(&storev1.PullReferrerResponse{}) - } - - // Use WalkReferrers with a callback that streams each referrer - walkFn := func(referrer *corev1.RecordReferrer) error { - response := &storev1.PullReferrerResponse{ - Referrer: referrer, - } - - if err := stream.Send(response); err != nil { - return status.Errorf(codes.Internal, "failed to send referrer response: %v", err) - } - - storeLogger.Debug("Referrer streamed successfully", "cid", request.GetRecordRef().GetCid(), "type", referrerType) - - return nil - } - - // Walk referrers of the specified type - err = refStore.WalkReferrers(stream.Context(), request.GetRecordRef().GetCid(), referrerType, walkFn) - if err != nil { - storeLogger.Error("Failed to walk referrers by type for record", "error", err, "cid", request.GetRecordRef().GetCid(), "type", referrerType) - - return stream.Send(&storev1.PullReferrerResponse{}) - } - } -} - -// pushRecordToStore pushes a record to the store and adds it to the search index. -func (s storeCtrl) pushRecordToStore(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) { - // Push the record to store - pushedRef, err := s.store.Push(ctx, record) - if err != nil { - storeLogger.Error("Failed to push record to store", "error", err) - - return nil, status.Errorf(codes.Internal, "failed to push record to store: %v", err) - } - - storeLogger.Info("Record pushed to store successfully", "cid", pushedRef.GetCid()) - - // Add record to search index for discoverability - // Use the adapter pattern to convert corev1.Record to types.Record - recordAdapter := adapters.NewRecordAdapter(record) - if err := s.db.AddRecord(recordAdapter); err != nil { - // Log error but don't fail the push operation - storeLogger.Error("Failed to add record to search index", "error", err, "cid", pushedRef.GetCid()) - } else { - storeLogger.Debug("Record added to search index successfully", "cid", pushedRef.GetCid()) - } - - return pushedRef, nil -} - -// validateRecordRef validates a record reference. -func (s storeCtrl) validateRecordRef(recordRef *corev1.RecordRef) error { - if recordRef.GetCid() == "" { - return status.Error(codes.InvalidArgument, "record cid is required") - } - - return nil -} - -// pullRecordFromStore pulls a record from the store with validation. -func (s storeCtrl) pullRecordFromStore(ctx context.Context, recordRef *corev1.RecordRef) (*corev1.Record, error) { - // Pull record from store - record, err := s.store.Pull(ctx, recordRef) - if err != nil { - st := status.Convert(err) - - return nil, status.Errorf(st.Code(), "failed to pull record: %s", st.Message()) - } - - storeLogger.Debug("Record pulled successfully", "cid", recordRef.GetCid()) - - return record, nil -} - -// extractRecordInfo extracts name and version from a record for logging. -func extractRecordInfo(record *corev1.Record) (string, string) { - name := "unknown" - version := "unknown" - - adapter := adapters.NewRecordAdapter(record) - - recordData, err := adapter.GetRecordData() - if err != nil { - return name, version - } - - if recordData != nil { - name = recordData.GetName() - version = recordData.GetVersion() - } - - return name, version -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:wrapcheck,dupl +package controller + +import ( + "context" + "errors" + "fmt" + "io" + + corev1 "github.com/agntcy/dir/api/core/v1" + storev1 "github.com/agntcy/dir/api/store/v1" + "github.com/agntcy/dir/server/events" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/server/types/adapters" + "github.com/agntcy/dir/utils/logging" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/emptypb" +) + +var storeLogger = logging.Logger("controller/store") + +type storeCtrl struct { + storev1.UnimplementedStoreServiceServer + store types.StoreAPI + db types.DatabaseAPI + eventBus *events.SafeEventBus +} + +func NewStoreController(store types.StoreAPI, db types.DatabaseAPI, eventBus *events.SafeEventBus) storev1.StoreServiceServer { + return &storeCtrl{ + UnimplementedStoreServiceServer: storev1.UnimplementedStoreServiceServer{}, + store: store, + db: db, + eventBus: eventBus, + } +} + +func (s storeCtrl) Push(stream storev1.StoreService_PushServer) error { + storeLogger.Debug("Called store controller's Push method") + + ctx := stream.Context() + + for { + // Receive complete Record from stream + record, err := stream.Recv() + if errors.Is(err, io.EOF) { + storeLogger.Debug("Push stream completed") + + return nil + } + + if err != nil { + return status.Errorf(codes.Internal, "failed to receive record: %v", err) + } + + isValid, validationErrors, err := record.Validate(ctx) + if err != nil { + return status.Errorf(codes.Internal, "failed to validate record: %v", err) + } + + if !isValid { + // Extract record name and version for better error reporting + recordName, recordVersion := extractRecordInfo(record) + + // Log validation error with record details + storeLogger.Warn("Record validation failed", + "name", recordName, + "version", recordVersion, + "errors", validationErrors) + + return status.Errorf(codes.InvalidArgument, "record validation failed: %v", validationErrors) + } + + pushedRef, err := s.pushRecordToStore(stream.Context(), record) + if err != nil { + return err + } + + // Send the RecordRef back via stream + if err := stream.Send(pushedRef); err != nil { + return status.Errorf(codes.Internal, "failed to send record reference: %v", err) + } + } +} + +func (s storeCtrl) Pull(stream storev1.StoreService_PullServer) error { + storeLogger.Debug("Called store controller's Pull method") + + for { + // Receive RecordRef from stream + recordRef, err := stream.Recv() + if errors.Is(err, io.EOF) { + storeLogger.Debug("Pull stream completed") + + return nil + } + + if err != nil { + return status.Errorf(codes.Internal, "failed to receive record reference: %v", err) + } + + storeLogger.Debug("Pull request received", "cid", recordRef.GetCid()) + + // Validate record reference + if err := s.validateRecordRef(recordRef); err != nil { + return err + } + + // Pull record from store + record, err := s.pullRecordFromStore(stream.Context(), recordRef) + if err != nil { + return err + } + + // Send Record back via stream + if err := stream.Send(record); err != nil { + return status.Errorf(codes.Internal, "failed to send record: %v", err) + } + } +} + +func (s storeCtrl) Lookup(stream storev1.StoreService_LookupServer) error { + storeLogger.Debug("Called store controller's Lookup method") + + for { + // Receive RecordRef from stream + recordRef, err := stream.Recv() + if errors.Is(err, io.EOF) { + storeLogger.Debug("Lookup stream completed") + + return nil + } + + if err != nil { + return status.Errorf(codes.Internal, "failed to receive record reference: %v", err) + } + + storeLogger.Debug("Lookup request received", "cid", recordRef.GetCid()) + + // Validate CID + if recordRef.GetCid() == "" { + return status.Error(codes.InvalidArgument, "record cid is required") + } + + // Lookup record metadata + recordMeta, err := s.store.Lookup(stream.Context(), recordRef) + if err != nil { + st := status.Convert(err) + + return status.Errorf(st.Code(), "failed to lookup record: %s", st.Message()) + } + + storeLogger.Debug("Record metadata retrieved successfully", "cid", recordRef.GetCid()) + + // Send RecordMeta back via stream + if err := stream.Send(recordMeta); err != nil { + return status.Errorf(codes.Internal, "failed to send record metadata: %v", err) + } + } +} + +func (s storeCtrl) Delete(stream storev1.StoreService_DeleteServer) error { + storeLogger.Debug("Called store controller's Delete method") + + for { + // Receive RecordRef from stream + recordRef, err := stream.Recv() + if errors.Is(err, io.EOF) { + storeLogger.Debug("Delete stream completed") + + if err := stream.SendAndClose(&emptypb.Empty{}); err != nil { + return status.Errorf(codes.Internal, "failed to send response: %v", err) + } + + return nil + } + + if err != nil { + return status.Errorf(codes.Internal, "failed to receive record reference: %v", err) + } + + storeLogger.Debug("Delete request received", "cid", recordRef.GetCid()) + + // Validate CID + if recordRef.GetCid() == "" { + return status.Error(codes.InvalidArgument, "record cid is required") + } + + // Delete record from store + err = s.store.Delete(stream.Context(), recordRef) + if err != nil { + st := status.Convert(err) + + return status.Errorf(st.Code(), "failed to delete record: %s", st.Message()) + } + + // Clean up search database (secondary operation - don't fail on errors) + if err := s.db.RemoveRecord(recordRef.GetCid()); err != nil { + // Log error but don't fail the delete - storage is source of truth + storeLogger.Error("Failed to remove record from search index", "error", err, "cid", recordRef.GetCid()) + } else { + storeLogger.Debug("Record removed from search index", "cid", recordRef.GetCid()) + } + + storeLogger.Info("Record deleted successfully", "cid", recordRef.GetCid()) + } +} + +func (s storeCtrl) PushReferrer(stream storev1.StoreService_PushReferrerServer) error { + storeLogger.Debug("Called store controller's PushReferrer method") + + for { + // Receive PushReferrerRequest from stream + request, err := stream.Recv() + if errors.Is(err, io.EOF) { + storeLogger.Debug("PushReferrer stream completed") + + return nil + } + + if err != nil { + return status.Errorf(codes.Internal, "failed to receive push referrer request: %v", err) + } + + // Validate the record reference + if err := s.validateRecordRef(request.GetRecordRef()); err != nil { + return err + } + + // Handle the referrer directly since we only have one type now + response := s.pushReferrer(stream.Context(), request) + + if err := stream.Send(response); err != nil { + return status.Errorf(codes.Internal, "failed to send push referrer response: %v", err) + } + } +} + +func (s storeCtrl) pushReferrer(ctx context.Context, request *storev1.PushReferrerRequest) *storev1.PushReferrerResponse { + storeLogger.Debug("Pushing referrer", "cid", request.GetRecordRef().GetCid(), "type", request.GetReferrer().GetType()) + + // Try to use referrer storage if the store supports it + refStore, ok := s.store.(types.ReferrerStoreAPI) + if !ok { + errMsg := "referrer storage not supported by current store implementation" + + return &storev1.PushReferrerResponse{ + Success: false, + ErrorMessage: &errMsg, + } + } + + err := refStore.PushReferrer(ctx, request.GetRecordRef().GetCid(), request.GetReferrer()) + if err != nil { + errMsg := fmt.Sprintf("failed to store referrer for record %s: %v", request.GetRecordRef().GetCid(), err) + + return &storev1.PushReferrerResponse{ + Success: false, + ErrorMessage: &errMsg, + } + } + + storeLogger.Debug("Referrer stored successfully", "cid", request.GetRecordRef().GetCid(), "type", request.GetReferrer().GetType()) + + // Emit RECORD_SIGNED event if this is a signature referrer + if request.GetReferrer().GetType() == corev1.SignatureReferrerType { + s.eventBus.RecordSigned(request.GetRecordRef().GetCid(), "client") + } + + return &storev1.PushReferrerResponse{ + Success: true, + } +} + +// PullReferrer handles retrieving referrers (like signatures) for records. +func (s storeCtrl) PullReferrer(stream storev1.StoreService_PullReferrerServer) error { + storeLogger.Debug("Called store controller's PullReferrer method") + + for { + // Receive PullReferrerRequest from stream + request, err := stream.Recv() + if errors.Is(err, io.EOF) { + storeLogger.Debug("PullReferrer stream completed") + + return nil + } + + if err != nil { + return status.Errorf(codes.Internal, "failed to receive pull referrer request: %v", err) + } + + // Validate the record reference + if err := s.validateRecordRef(request.GetRecordRef()); err != nil { + return err + } + + // Determine referrer type (empty string means all types) + referrerType := "" + if request.ReferrerType != nil { + referrerType = request.GetReferrerType() + } + + // Try to use referrer storage if the store supports it + refStore, ok := s.store.(types.ReferrerStoreAPI) + if !ok { + storeLogger.Error("Referrer storage not supported by current store implementation") + + return stream.Send(&storev1.PullReferrerResponse{}) + } + + // Use WalkReferrers with a callback that streams each referrer + walkFn := func(referrer *corev1.RecordReferrer) error { + response := &storev1.PullReferrerResponse{ + Referrer: referrer, + } + + if err := stream.Send(response); err != nil { + return status.Errorf(codes.Internal, "failed to send referrer response: %v", err) + } + + storeLogger.Debug("Referrer streamed successfully", "cid", request.GetRecordRef().GetCid(), "type", referrerType) + + return nil + } + + // Walk referrers of the specified type + err = refStore.WalkReferrers(stream.Context(), request.GetRecordRef().GetCid(), referrerType, walkFn) + if err != nil { + storeLogger.Error("Failed to walk referrers by type for record", "error", err, "cid", request.GetRecordRef().GetCid(), "type", referrerType) + + return stream.Send(&storev1.PullReferrerResponse{}) + } + } +} + +// pushRecordToStore pushes a record to the store and adds it to the search index. +func (s storeCtrl) pushRecordToStore(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) { + // Push the record to store + pushedRef, err := s.store.Push(ctx, record) + if err != nil { + storeLogger.Error("Failed to push record to store", "error", err) + + return nil, status.Errorf(codes.Internal, "failed to push record to store: %v", err) + } + + storeLogger.Info("Record pushed to store successfully", "cid", pushedRef.GetCid()) + + // Add record to search index for discoverability + // Use the adapter pattern to convert corev1.Record to types.Record + recordAdapter := adapters.NewRecordAdapter(record) + if err := s.db.AddRecord(recordAdapter); err != nil { + // Log error but don't fail the push operation + storeLogger.Error("Failed to add record to search index", "error", err, "cid", pushedRef.GetCid()) + } else { + storeLogger.Debug("Record added to search index successfully", "cid", pushedRef.GetCid()) + } + + return pushedRef, nil +} + +// validateRecordRef validates a record reference. +func (s storeCtrl) validateRecordRef(recordRef *corev1.RecordRef) error { + if recordRef.GetCid() == "" { + return status.Error(codes.InvalidArgument, "record cid is required") + } + + return nil +} + +// pullRecordFromStore pulls a record from the store with validation. +func (s storeCtrl) pullRecordFromStore(ctx context.Context, recordRef *corev1.RecordRef) (*corev1.Record, error) { + // Pull record from store + record, err := s.store.Pull(ctx, recordRef) + if err != nil { + st := status.Convert(err) + + return nil, status.Errorf(st.Code(), "failed to pull record: %s", st.Message()) + } + + storeLogger.Debug("Record pulled successfully", "cid", recordRef.GetCid()) + + return record, nil +} + +// extractRecordInfo extracts name and version from a record for logging. +func extractRecordInfo(record *corev1.Record) (string, string) { + name := "unknown" + version := "unknown" + + adapter := adapters.NewRecordAdapter(record) + + recordData, err := adapter.GetRecordData() + if err != nil { + return name, version + } + + if recordData != nil { + name = recordData.GetName() + version = recordData.GetVersion() + } + + return name, version +} diff --git a/server/controller/sync.go b/server/controller/sync.go index a195b170d..b53567691 100644 --- a/server/controller/sync.go +++ b/server/controller/sync.go @@ -1,188 +1,188 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package controller - -import ( - "context" - "errors" - "fmt" - "net/url" - "strings" - - storev1 "github.com/agntcy/dir/api/store/v1" - ociconfig "github.com/agntcy/dir/server/store/oci/config" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/utils/logging" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var syncLogger = logging.Logger("controller/sync") - -// syncCtlr implements the SyncService gRPC interface. -type syncCtlr struct { - storev1.UnimplementedSyncServiceServer - db types.DatabaseAPI - opts types.APIOptions -} - -// NewSyncController creates a new sync controller. -func NewSyncController(db types.DatabaseAPI, opts types.APIOptions) storev1.SyncServiceServer { - return &syncCtlr{ - db: db, - opts: opts, - } -} - -func (c *syncCtlr) CreateSync(_ context.Context, req *storev1.CreateSyncRequest) (*storev1.CreateSyncResponse, error) { - syncLogger.Debug("Called sync controller's CreateSync method") - - // Validate the remote directory URL - if err := validateRemoteDirectoryURL(req.GetRemoteDirectoryUrl()); err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid remote directory URL: %v", err) - } - - id, err := c.db.CreateSync(req.GetRemoteDirectoryUrl(), req.GetCids()) - if err != nil { - return nil, fmt.Errorf("failed to create sync: %w", err) - } - - syncLogger.Debug("Sync created successfully") - - return &storev1.CreateSyncResponse{ - SyncId: id, - }, nil -} - -func (c *syncCtlr) ListSyncs(req *storev1.ListSyncsRequest, srv storev1.SyncService_ListSyncsServer) error { - syncLogger.Debug("Called sync controller's ListSyncs method", "req", req) - - syncs, err := c.db.GetSyncs(int(req.GetOffset()), int(req.GetLimit())) - if err != nil { - return fmt.Errorf("failed to list syncs: %w", err) - } - - for _, sync := range syncs { - syncLogger.Debug("Sending sync object", "sync_id", sync.GetID(), "status", sync.GetStatus()) - - if err := srv.Send(&storev1.ListSyncsItem{ - SyncId: sync.GetID(), - RemoteDirectoryUrl: sync.GetRemoteDirectoryURL(), - Status: sync.GetStatus(), - }); err != nil { - return fmt.Errorf("failed to send sync object: %w", err) - } - } - - syncLogger.Debug("Finished sending sync objects") - - return nil -} - -func (c *syncCtlr) GetSync(_ context.Context, req *storev1.GetSyncRequest) (*storev1.GetSyncResponse, error) { - syncLogger.Debug("Called sync controller's GetSync method", "req", req) - - syncObj, err := c.db.GetSyncByID(req.GetSyncId()) - if err != nil { - return nil, fmt.Errorf("failed to get sync by ID: %w", err) - } - - return &storev1.GetSyncResponse{ - SyncId: syncObj.GetID(), - RemoteDirectoryUrl: syncObj.GetRemoteDirectoryURL(), - Status: syncObj.GetStatus(), - }, nil -} - -func (c *syncCtlr) DeleteSync(_ context.Context, req *storev1.DeleteSyncRequest) (*storev1.DeleteSyncResponse, error) { - syncLogger.Debug("Called sync controller's DeleteSync method", "req", req) - - // Get the sync to check its current status - syncObj, err := c.db.GetSyncByID(req.GetSyncId()) - if err != nil { - return nil, fmt.Errorf("failed to get sync: %w", err) - } - - if syncObj.GetStatus() == storev1.SyncStatus_SYNC_STATUS_DELETED { - return nil, status.Errorf(codes.NotFound, "sync has already been deleted") - } - - // Mark sync for deletion - the scheduler will pick this up - if err := c.db.UpdateSyncStatus(req.GetSyncId(), storev1.SyncStatus_SYNC_STATUS_DELETE_PENDING); err != nil { - return nil, fmt.Errorf("failed to mark sync for deletion: %w", err) - } - - syncLogger.Debug("Sync marked for deletion", "sync_id", req.GetSyncId()) - - return &storev1.DeleteSyncResponse{}, nil -} - -// RequestRegistryCredentials handles requests for registry authentication credentials. -func (c *syncCtlr) RequestRegistryCredentials(_ context.Context, req *storev1.RequestRegistryCredentialsRequest) (*storev1.RequestRegistryCredentialsResponse, error) { - syncLogger.Debug("Called sync controller's RequestRegistryCredentials method", "req", req) - - // Validate requesting node ID - if req.GetRequestingNodeId() == "" { - return &storev1.RequestRegistryCredentialsResponse{ - Success: false, - ErrorMessage: "requesting node ID is required", - }, nil - } - - // Get OCI configuration to determine registry details - ociConfig := c.opts.Config().Store.OCI - syncConfig := c.opts.Config().Sync - - // Build registry URL based on configuration - registryURL := ociConfig.RegistryAddress - if registryURL == "" { - registryURL = ociconfig.DefaultRegistryAddress - } - - return &storev1.RequestRegistryCredentialsResponse{ - Success: true, - RemoteRegistryUrl: registryURL, - Credentials: &storev1.RequestRegistryCredentialsResponse_BasicAuth{ - BasicAuth: &storev1.BasicAuthCredentials{ - Username: syncConfig.Username, - Password: syncConfig.Password, - }, - }, - }, nil -} - -// validateRemoteDirectoryURL validates the format of a remote directory URL. -func validateRemoteDirectoryURL(rawURL string) error { - if rawURL == "" { - return errors.New("remote directory URL is required") - } - - // If the URL doesn't have a scheme, treat it as a raw host:port - if !strings.Contains(rawURL, "://") { - // Validate that it looks like host:port - if !strings.Contains(rawURL, ":") { - return errors.New("URL must include port (e.g., 'host:port' or 'http://host:port')") - } - - return nil - } - - // Parse as full URL - parsedURL, err := url.Parse(rawURL) - if err != nil { - return fmt.Errorf("failed to parse URL: %w", err) - } - - // Only allow http and https schemes - if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" { - return fmt.Errorf("unsupported scheme '%s', only 'http' and 'https' are supported", parsedURL.Scheme) - } - - // Validate hostname - if parsedURL.Hostname() == "" { - return errors.New("URL must include a hostname") - } - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package controller + +import ( + "context" + "errors" + "fmt" + "net/url" + "strings" + + storev1 "github.com/agntcy/dir/api/store/v1" + ociconfig "github.com/agntcy/dir/server/store/oci/config" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/utils/logging" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var syncLogger = logging.Logger("controller/sync") + +// syncCtlr implements the SyncService gRPC interface. +type syncCtlr struct { + storev1.UnimplementedSyncServiceServer + db types.DatabaseAPI + opts types.APIOptions +} + +// NewSyncController creates a new sync controller. +func NewSyncController(db types.DatabaseAPI, opts types.APIOptions) storev1.SyncServiceServer { + return &syncCtlr{ + db: db, + opts: opts, + } +} + +func (c *syncCtlr) CreateSync(_ context.Context, req *storev1.CreateSyncRequest) (*storev1.CreateSyncResponse, error) { + syncLogger.Debug("Called sync controller's CreateSync method") + + // Validate the remote directory URL + if err := validateRemoteDirectoryURL(req.GetRemoteDirectoryUrl()); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid remote directory URL: %v", err) + } + + id, err := c.db.CreateSync(req.GetRemoteDirectoryUrl(), req.GetCids()) + if err != nil { + return nil, fmt.Errorf("failed to create sync: %w", err) + } + + syncLogger.Debug("Sync created successfully") + + return &storev1.CreateSyncResponse{ + SyncId: id, + }, nil +} + +func (c *syncCtlr) ListSyncs(req *storev1.ListSyncsRequest, srv storev1.SyncService_ListSyncsServer) error { + syncLogger.Debug("Called sync controller's ListSyncs method", "req", req) + + syncs, err := c.db.GetSyncs(int(req.GetOffset()), int(req.GetLimit())) + if err != nil { + return fmt.Errorf("failed to list syncs: %w", err) + } + + for _, sync := range syncs { + syncLogger.Debug("Sending sync object", "sync_id", sync.GetID(), "status", sync.GetStatus()) + + if err := srv.Send(&storev1.ListSyncsItem{ + SyncId: sync.GetID(), + RemoteDirectoryUrl: sync.GetRemoteDirectoryURL(), + Status: sync.GetStatus(), + }); err != nil { + return fmt.Errorf("failed to send sync object: %w", err) + } + } + + syncLogger.Debug("Finished sending sync objects") + + return nil +} + +func (c *syncCtlr) GetSync(_ context.Context, req *storev1.GetSyncRequest) (*storev1.GetSyncResponse, error) { + syncLogger.Debug("Called sync controller's GetSync method", "req", req) + + syncObj, err := c.db.GetSyncByID(req.GetSyncId()) + if err != nil { + return nil, fmt.Errorf("failed to get sync by ID: %w", err) + } + + return &storev1.GetSyncResponse{ + SyncId: syncObj.GetID(), + RemoteDirectoryUrl: syncObj.GetRemoteDirectoryURL(), + Status: syncObj.GetStatus(), + }, nil +} + +func (c *syncCtlr) DeleteSync(_ context.Context, req *storev1.DeleteSyncRequest) (*storev1.DeleteSyncResponse, error) { + syncLogger.Debug("Called sync controller's DeleteSync method", "req", req) + + // Get the sync to check its current status + syncObj, err := c.db.GetSyncByID(req.GetSyncId()) + if err != nil { + return nil, fmt.Errorf("failed to get sync: %w", err) + } + + if syncObj.GetStatus() == storev1.SyncStatus_SYNC_STATUS_DELETED { + return nil, status.Errorf(codes.NotFound, "sync has already been deleted") + } + + // Mark sync for deletion - the scheduler will pick this up + if err := c.db.UpdateSyncStatus(req.GetSyncId(), storev1.SyncStatus_SYNC_STATUS_DELETE_PENDING); err != nil { + return nil, fmt.Errorf("failed to mark sync for deletion: %w", err) + } + + syncLogger.Debug("Sync marked for deletion", "sync_id", req.GetSyncId()) + + return &storev1.DeleteSyncResponse{}, nil +} + +// RequestRegistryCredentials handles requests for registry authentication credentials. +func (c *syncCtlr) RequestRegistryCredentials(_ context.Context, req *storev1.RequestRegistryCredentialsRequest) (*storev1.RequestRegistryCredentialsResponse, error) { + syncLogger.Debug("Called sync controller's RequestRegistryCredentials method", "req", req) + + // Validate requesting node ID + if req.GetRequestingNodeId() == "" { + return &storev1.RequestRegistryCredentialsResponse{ + Success: false, + ErrorMessage: "requesting node ID is required", + }, nil + } + + // Get OCI configuration to determine registry details + ociConfig := c.opts.Config().Store.OCI + syncConfig := c.opts.Config().Sync + + // Build registry URL based on configuration + registryURL := ociConfig.RegistryAddress + if registryURL == "" { + registryURL = ociconfig.DefaultRegistryAddress + } + + return &storev1.RequestRegistryCredentialsResponse{ + Success: true, + RemoteRegistryUrl: registryURL, + Credentials: &storev1.RequestRegistryCredentialsResponse_BasicAuth{ + BasicAuth: &storev1.BasicAuthCredentials{ + Username: syncConfig.Username, + Password: syncConfig.Password, + }, + }, + }, nil +} + +// validateRemoteDirectoryURL validates the format of a remote directory URL. +func validateRemoteDirectoryURL(rawURL string) error { + if rawURL == "" { + return errors.New("remote directory URL is required") + } + + // If the URL doesn't have a scheme, treat it as a raw host:port + if !strings.Contains(rawURL, "://") { + // Validate that it looks like host:port + if !strings.Contains(rawURL, ":") { + return errors.New("URL must include port (e.g., 'host:port' or 'http://host:port')") + } + + return nil + } + + // Parse as full URL + parsedURL, err := url.Parse(rawURL) + if err != nil { + return fmt.Errorf("failed to parse URL: %w", err) + } + + // Only allow http and https schemes + if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" { + return fmt.Errorf("unsupported scheme '%s', only 'http' and 'https' are supported", parsedURL.Scheme) + } + + // Validate hostname + if parsedURL.Hostname() == "" { + return errors.New("URL must include a hostname") + } + + return nil +} diff --git a/server/database/config/config.go b/server/database/config/config.go index 79b088640..9836003fd 100644 --- a/server/database/config/config.go +++ b/server/database/config/config.go @@ -1,20 +1,20 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - sqliteconfig "github.com/agntcy/dir/server/database/sqlite/config" -) - -const ( - DefaultDBType = "sqlite" -) - -type Config struct { - // DBType is the type of the database. - DBType string `json:"db_type,omitempty" mapstructure:"db_type"` - - // Config for SQLite database. - SQLite sqliteconfig.Config `json:"sqlite,omitempty" mapstructure:"sqlite"` -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + sqliteconfig "github.com/agntcy/dir/server/database/sqlite/config" +) + +const ( + DefaultDBType = "sqlite" +) + +type Config struct { + // DBType is the type of the database. + DBType string `json:"db_type,omitempty" mapstructure:"db_type"` + + // Config for SQLite database. + SQLite sqliteconfig.Config `json:"sqlite,omitempty" mapstructure:"sqlite"` +} diff --git a/server/database/database.go b/server/database/database.go index cd53c2684..e038e387e 100644 --- a/server/database/database.go +++ b/server/database/database.go @@ -1,31 +1,31 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package database - -import ( - "fmt" - - "github.com/agntcy/dir/server/database/sqlite" - "github.com/agntcy/dir/server/types" -) - -type DB string - -const ( - SQLite DB = "sqlite" -) - -func New(opts types.APIOptions) (types.DatabaseAPI, error) { - switch db := DB(opts.Config().Database.DBType); db { - case SQLite: - sqliteDB, err := sqlite.New(opts.Config().Database.SQLite.DBPath) - if err != nil { - return nil, fmt.Errorf("failed to create SQLite database: %w", err) - } - - return sqliteDB, nil - default: - return nil, fmt.Errorf("unsupported database=%s", db) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package database + +import ( + "fmt" + + "github.com/agntcy/dir/server/database/sqlite" + "github.com/agntcy/dir/server/types" +) + +type DB string + +const ( + SQLite DB = "sqlite" +) + +func New(opts types.APIOptions) (types.DatabaseAPI, error) { + switch db := DB(opts.Config().Database.DBType); db { + case SQLite: + sqliteDB, err := sqlite.New(opts.Config().Database.SQLite.DBPath) + if err != nil { + return nil, fmt.Errorf("failed to create SQLite database: %w", err) + } + + return sqliteDB, nil + default: + return nil, fmt.Errorf("unsupported database=%s", db) + } +} diff --git a/server/database/sqlite/config/config.go b/server/database/sqlite/config/config.go index 492f8c8c0..49fd89548 100644 --- a/server/database/sqlite/config/config.go +++ b/server/database/sqlite/config/config.go @@ -1,13 +1,13 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package config - -const ( - DefaultSQLiteDBPath = "/tmp/dir.db" -) - -type Config struct { - // DBPath is the path to the SQLite database file. - DBPath string `json:"db_path,omitempty" mapstructure:"db_path"` -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package config + +const ( + DefaultSQLiteDBPath = "/tmp/dir.db" +) + +type Config struct { + // DBPath is the path to the SQLite database file. + DBPath string `json:"db_path,omitempty" mapstructure:"db_path"` +} diff --git a/server/database/sqlite/domain.go b/server/database/sqlite/domain.go index 1f4e05eeb..23adae992 100644 --- a/server/database/sqlite/domain.go +++ b/server/database/sqlite/domain.go @@ -1,46 +1,46 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package sqlite - -import ( - "time" - - "github.com/agntcy/dir/server/types" -) - -type Domain struct { - ID uint `gorm:"primarykey"` - CreatedAt time.Time - UpdatedAt time.Time - RecordCID string `gorm:"column:record_cid;not null;index"` - DomainID uint64 `gorm:"not null"` - Name string `gorm:"not null"` -} - -func (domain *Domain) GetAnnotations() map[string]string { - // SQLite domains don't store annotations, return empty map - return make(map[string]string) -} - -func (domain *Domain) GetName() string { - return domain.Name -} - -func (domain *Domain) GetID() uint64 { - return domain.DomainID -} - -// convertDomains converts domain interfaces to SQLite Domain structs. -func convertDomains(domains []types.Domain, recordCID string) []Domain { - result := make([]Domain, len(domains)) - for i, domain := range domains { - result[i] = Domain{ - RecordCID: recordCID, - DomainID: domain.GetID(), - Name: domain.GetName(), - } - } - - return result -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package sqlite + +import ( + "time" + + "github.com/agntcy/dir/server/types" +) + +type Domain struct { + ID uint `gorm:"primarykey"` + CreatedAt time.Time + UpdatedAt time.Time + RecordCID string `gorm:"column:record_cid;not null;index"` + DomainID uint64 `gorm:"not null"` + Name string `gorm:"not null"` +} + +func (domain *Domain) GetAnnotations() map[string]string { + // SQLite domains don't store annotations, return empty map + return make(map[string]string) +} + +func (domain *Domain) GetName() string { + return domain.Name +} + +func (domain *Domain) GetID() uint64 { + return domain.DomainID +} + +// convertDomains converts domain interfaces to SQLite Domain structs. +func convertDomains(domains []types.Domain, recordCID string) []Domain { + result := make([]Domain, len(domains)) + for i, domain := range domains { + result[i] = Domain{ + RecordCID: recordCID, + DomainID: domain.GetID(), + Name: domain.GetName(), + } + } + + return result +} diff --git a/server/database/sqlite/locator.go b/server/database/sqlite/locator.go index 2b721c4b4..e838bdf6b 100644 --- a/server/database/sqlite/locator.go +++ b/server/database/sqlite/locator.go @@ -1,56 +1,56 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package sqlite - -import ( - "time" - - "github.com/agntcy/dir/server/types" -) - -type Locator struct { - ID uint `gorm:"primarykey"` - CreatedAt time.Time - UpdatedAt time.Time - RecordCID string `gorm:"column:record_cid;not null;index"` - Type string `gorm:"not null"` - URL string `gorm:"not null"` -} - -func (locator *Locator) GetAnnotations() map[string]string { - // SQLite locators don't store annotations, return empty map - return make(map[string]string) -} - -func (locator *Locator) GetType() string { - return locator.Type -} - -func (locator *Locator) GetURL() string { - return locator.URL -} - -func (locator *Locator) GetSize() uint64 { - // SQLite locators don't store size information - return 0 -} - -func (locator *Locator) GetDigest() string { - // SQLite locators don't store digest information - return "" -} - -// convertLocators transforms interface types to SQLite structs. -func convertLocators(locators []types.Locator, recordCID string) []Locator { - result := make([]Locator, len(locators)) - for i, locator := range locators { - result[i] = Locator{ - RecordCID: recordCID, - Type: locator.GetType(), - URL: locator.GetURL(), - } - } - - return result -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package sqlite + +import ( + "time" + + "github.com/agntcy/dir/server/types" +) + +type Locator struct { + ID uint `gorm:"primarykey"` + CreatedAt time.Time + UpdatedAt time.Time + RecordCID string `gorm:"column:record_cid;not null;index"` + Type string `gorm:"not null"` + URL string `gorm:"not null"` +} + +func (locator *Locator) GetAnnotations() map[string]string { + // SQLite locators don't store annotations, return empty map + return make(map[string]string) +} + +func (locator *Locator) GetType() string { + return locator.Type +} + +func (locator *Locator) GetURL() string { + return locator.URL +} + +func (locator *Locator) GetSize() uint64 { + // SQLite locators don't store size information + return 0 +} + +func (locator *Locator) GetDigest() string { + // SQLite locators don't store digest information + return "" +} + +// convertLocators transforms interface types to SQLite structs. +func convertLocators(locators []types.Locator, recordCID string) []Locator { + result := make([]Locator, len(locators)) + for i, locator := range locators { + result[i] = Locator{ + RecordCID: recordCID, + Type: locator.GetType(), + URL: locator.GetURL(), + } + } + + return result +} diff --git a/server/database/sqlite/module.go b/server/database/sqlite/module.go index 4b074017c..414e9d9a2 100644 --- a/server/database/sqlite/module.go +++ b/server/database/sqlite/module.go @@ -1,46 +1,46 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package sqlite - -import ( - "time" - - "github.com/agntcy/dir/server/types" -) - -type Module struct { - ID uint `gorm:"primarykey"` - CreatedAt time.Time - UpdatedAt time.Time - RecordCID string `gorm:"column:record_cid;not null;index"` - Name string `gorm:"not null"` - ModuleID uint64 `gorm:"column:module_id"` -} - -func (module *Module) GetName() string { - return module.Name -} - -func (module *Module) GetID() uint64 { - return module.ModuleID -} - -func (module *Module) GetData() map[string]any { - // SQLite modules don't store data, return empty map - return make(map[string]any) -} - -// convertModules transforms interface types to SQLite structs. -func convertModules(modules []types.Module, recordCID string) []Module { - result := make([]Module, len(modules)) - for i, module := range modules { - result[i] = Module{ - RecordCID: recordCID, - Name: module.GetName(), - ModuleID: module.GetID(), - } - } - - return result -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package sqlite + +import ( + "time" + + "github.com/agntcy/dir/server/types" +) + +type Module struct { + ID uint `gorm:"primarykey"` + CreatedAt time.Time + UpdatedAt time.Time + RecordCID string `gorm:"column:record_cid;not null;index"` + Name string `gorm:"not null"` + ModuleID uint64 `gorm:"column:module_id"` +} + +func (module *Module) GetName() string { + return module.Name +} + +func (module *Module) GetID() uint64 { + return module.ModuleID +} + +func (module *Module) GetData() map[string]any { + // SQLite modules don't store data, return empty map + return make(map[string]any) +} + +// convertModules transforms interface types to SQLite structs. +func convertModules(modules []types.Module, recordCID string) []Module { + result := make([]Module, len(modules)) + for i, module := range modules { + result[i] = Module{ + RecordCID: recordCID, + Name: module.GetName(), + ModuleID: module.GetID(), + } + } + + return result +} diff --git a/server/database/sqlite/publication.go b/server/database/sqlite/publication.go index 0b554f2e0..1a0867945 100644 --- a/server/database/sqlite/publication.go +++ b/server/database/sqlite/publication.go @@ -1,157 +1,157 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package sqlite - -import ( - "fmt" - "time" - - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/server/types" - "github.com/google/uuid" - "google.golang.org/protobuf/encoding/protojson" - "gorm.io/gorm" -) - -type Publication struct { - GormID uint `gorm:"primarykey"` - CreatedAt time.Time - UpdatedAt time.Time - ID string `gorm:"not null;index"` - RequestJSON string `gorm:"not null"` // JSON-encoded PublishRequest - Status routingv1.PublicationStatus `gorm:"not null"` - CreatedTime string `gorm:"not null"` - LastUpdateTime string `gorm:"not null"` -} - -func (pub *Publication) GetID() string { - return pub.ID -} - -func (pub *Publication) GetRequest() *routingv1.PublishRequest { - var request routingv1.PublishRequest - if err := protojson.Unmarshal([]byte(pub.RequestJSON), &request); err != nil { - logger.Error("Failed to unmarshal publish request", "error", err) - - return nil - } - - return &request -} - -func (pub *Publication) GetStatus() routingv1.PublicationStatus { - return pub.Status -} - -func (pub *Publication) GetCreatedTime() string { - return pub.CreatedTime -} - -func (pub *Publication) GetLastUpdateTime() string { - return pub.LastUpdateTime -} - -func (d *DB) CreatePublication(request *routingv1.PublishRequest) (string, error) { - requestJSON, err := protojson.Marshal(request) - if err != nil { - return "", fmt.Errorf("failed to marshal publish request: %w", err) - } - - now := time.Now().Format(time.RFC3339) - publication := &Publication{ - ID: uuid.NewString(), - RequestJSON: string(requestJSON), - Status: routingv1.PublicationStatus_PUBLICATION_STATUS_PENDING, - CreatedTime: now, - LastUpdateTime: now, - } - - if err := d.gormDB.Create(publication).Error; err != nil { - return "", fmt.Errorf("failed to create publication: %w", err) - } - - logger.Debug("Added publication to SQLite database", "publication_id", publication.ID) - - return publication.ID, nil -} - -func (d *DB) GetPublicationByID(publicationID string) (types.PublicationObject, error) { - var publication Publication - if err := d.gormDB.Where("id = ?", publicationID).First(&publication).Error; err != nil { - return nil, err - } - - return &publication, nil -} - -func (d *DB) GetPublications(offset, limit int) ([]types.PublicationObject, error) { - var publications []Publication - - query := d.gormDB.Offset(offset) - - // Only apply limit if it's greater than 0 - if limit > 0 { - query = query.Limit(limit) - } - - if err := query.Find(&publications).Error; err != nil { - return nil, err - } - - // convert to types.PublicationObject - publicationObjects := make([]types.PublicationObject, len(publications)) - for i, publication := range publications { - publicationObjects[i] = &publication - } - - return publicationObjects, nil -} - -func (d *DB) GetPublicationsByStatus(status routingv1.PublicationStatus) ([]types.PublicationObject, error) { - var publications []Publication - if err := d.gormDB.Where("status = ?", status).Find(&publications).Error; err != nil { - return nil, err - } - - // convert to types.PublicationObject - publicationObjects := make([]types.PublicationObject, len(publications)) - for i, publication := range publications { - publicationObjects[i] = &publication - } - - return publicationObjects, nil -} - -func (d *DB) UpdatePublicationStatus(publicationID string, status routingv1.PublicationStatus) error { - publicationObj, err := d.GetPublicationByID(publicationID) - if err != nil { - return err - } - - publication, ok := publicationObj.(*Publication) - if !ok { - return gorm.ErrInvalidData - } - - publication.Status = status - publication.LastUpdateTime = time.Now().Format(time.RFC3339) - - if err := d.gormDB.Save(publication).Error; err != nil { - return err - } - - logger.Debug("Updated publication in SQLite database", "publication_id", publication.GetID(), "status", publication.GetStatus()) - - return nil -} - -func (d *DB) DeletePublication(publicationID string) error { - if err := d.gormDB.Where("id = ?", publicationID).Delete(&Publication{}).Error; err != nil { - return err - } - - logger.Debug("Deleted publication from SQLite database", "publication_id", publicationID) - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package sqlite + +import ( + "fmt" + "time" + + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/server/types" + "github.com/google/uuid" + "google.golang.org/protobuf/encoding/protojson" + "gorm.io/gorm" +) + +type Publication struct { + GormID uint `gorm:"primarykey"` + CreatedAt time.Time + UpdatedAt time.Time + ID string `gorm:"not null;index"` + RequestJSON string `gorm:"not null"` // JSON-encoded PublishRequest + Status routingv1.PublicationStatus `gorm:"not null"` + CreatedTime string `gorm:"not null"` + LastUpdateTime string `gorm:"not null"` +} + +func (pub *Publication) GetID() string { + return pub.ID +} + +func (pub *Publication) GetRequest() *routingv1.PublishRequest { + var request routingv1.PublishRequest + if err := protojson.Unmarshal([]byte(pub.RequestJSON), &request); err != nil { + logger.Error("Failed to unmarshal publish request", "error", err) + + return nil + } + + return &request +} + +func (pub *Publication) GetStatus() routingv1.PublicationStatus { + return pub.Status +} + +func (pub *Publication) GetCreatedTime() string { + return pub.CreatedTime +} + +func (pub *Publication) GetLastUpdateTime() string { + return pub.LastUpdateTime +} + +func (d *DB) CreatePublication(request *routingv1.PublishRequest) (string, error) { + requestJSON, err := protojson.Marshal(request) + if err != nil { + return "", fmt.Errorf("failed to marshal publish request: %w", err) + } + + now := time.Now().Format(time.RFC3339) + publication := &Publication{ + ID: uuid.NewString(), + RequestJSON: string(requestJSON), + Status: routingv1.PublicationStatus_PUBLICATION_STATUS_PENDING, + CreatedTime: now, + LastUpdateTime: now, + } + + if err := d.gormDB.Create(publication).Error; err != nil { + return "", fmt.Errorf("failed to create publication: %w", err) + } + + logger.Debug("Added publication to SQLite database", "publication_id", publication.ID) + + return publication.ID, nil +} + +func (d *DB) GetPublicationByID(publicationID string) (types.PublicationObject, error) { + var publication Publication + if err := d.gormDB.Where("id = ?", publicationID).First(&publication).Error; err != nil { + return nil, err + } + + return &publication, nil +} + +func (d *DB) GetPublications(offset, limit int) ([]types.PublicationObject, error) { + var publications []Publication + + query := d.gormDB.Offset(offset) + + // Only apply limit if it's greater than 0 + if limit > 0 { + query = query.Limit(limit) + } + + if err := query.Find(&publications).Error; err != nil { + return nil, err + } + + // convert to types.PublicationObject + publicationObjects := make([]types.PublicationObject, len(publications)) + for i, publication := range publications { + publicationObjects[i] = &publication + } + + return publicationObjects, nil +} + +func (d *DB) GetPublicationsByStatus(status routingv1.PublicationStatus) ([]types.PublicationObject, error) { + var publications []Publication + if err := d.gormDB.Where("status = ?", status).Find(&publications).Error; err != nil { + return nil, err + } + + // convert to types.PublicationObject + publicationObjects := make([]types.PublicationObject, len(publications)) + for i, publication := range publications { + publicationObjects[i] = &publication + } + + return publicationObjects, nil +} + +func (d *DB) UpdatePublicationStatus(publicationID string, status routingv1.PublicationStatus) error { + publicationObj, err := d.GetPublicationByID(publicationID) + if err != nil { + return err + } + + publication, ok := publicationObj.(*Publication) + if !ok { + return gorm.ErrInvalidData + } + + publication.Status = status + publication.LastUpdateTime = time.Now().Format(time.RFC3339) + + if err := d.gormDB.Save(publication).Error; err != nil { + return err + } + + logger.Debug("Updated publication in SQLite database", "publication_id", publication.GetID(), "status", publication.GetStatus()) + + return nil +} + +func (d *DB) DeletePublication(publicationID string) error { + if err := d.gormDB.Where("id = ?", publicationID).Delete(&Publication{}).Error; err != nil { + return err + } + + logger.Debug("Deleted publication from SQLite database", "publication_id", publicationID) + + return nil +} diff --git a/server/database/sqlite/record.go b/server/database/sqlite/record.go index 80dce5059..b39921c6a 100644 --- a/server/database/sqlite/record.go +++ b/server/database/sqlite/record.go @@ -1,371 +1,371 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package sqlite - -import ( - "errors" - "fmt" - "strings" - "time" - - "github.com/agntcy/dir/server/database/utils" - "github.com/agntcy/dir/server/types" - "gorm.io/gorm" -) - -type Record struct { - CreatedAt time.Time - UpdatedAt time.Time - RecordCID string `gorm:"column:record_cid;primarykey;not null"` - Name string `gorm:"not null"` - Version string `gorm:"not null"` - SchemaVersion string `gorm:"column:schema_version"` - OASFCreatedAt string `gorm:"column:oasf_created_at"` - Authors []string `gorm:"column:authors;serializer:json"` // Stored as JSON array - - Skills []Skill `gorm:"foreignKey:RecordCID;references:RecordCID;constraint:OnDelete:CASCADE"` - Locators []Locator `gorm:"foreignKey:RecordCID;references:RecordCID;constraint:OnDelete:CASCADE"` - Modules []Module `gorm:"foreignKey:RecordCID;references:RecordCID;constraint:OnDelete:CASCADE"` - Domains []Domain `gorm:"foreignKey:RecordCID;references:RecordCID;constraint:OnDelete:CASCADE"` -} - -// Implement central Record interface. -func (r *Record) GetCid() string { - return r.RecordCID -} - -func (r *Record) GetRecordData() (types.RecordData, error) { - return &RecordDataAdapter{record: r}, nil -} - -// RecordDataAdapter adapts SQLite Record to central RecordData interface. -type RecordDataAdapter struct { - record *Record -} - -func (r *RecordDataAdapter) GetAnnotations() map[string]string { - // SQLite records don't store annotations, return empty map - return make(map[string]string) -} - -func (r *RecordDataAdapter) GetDomains() []types.Domain { - domains := make([]types.Domain, len(r.record.Domains)) - for i, domain := range r.record.Domains { - domains[i] = &domain - } - - return domains -} - -func (r *RecordDataAdapter) GetSchemaVersion() string { - if r.record.SchemaVersion != "" { - return r.record.SchemaVersion - } - // Default schema version for search records - return "v1" -} - -func (r *RecordDataAdapter) GetName() string { - return r.record.Name -} - -func (r *RecordDataAdapter) GetVersion() string { - return r.record.Version -} - -func (r *RecordDataAdapter) GetDescription() string { - // SQLite records don't store description - return "" -} - -func (r *RecordDataAdapter) GetAuthors() []string { - return r.record.Authors -} - -func (r *RecordDataAdapter) GetCreatedAt() string { - if r.record.OASFCreatedAt != "" { - return r.record.OASFCreatedAt - } - - return r.record.CreatedAt.Format("2006-01-02T15:04:05Z") -} - -func (r *RecordDataAdapter) GetSkills() []types.Skill { - skills := make([]types.Skill, len(r.record.Skills)) - for i, skill := range r.record.Skills { - skills[i] = &skill - } - - return skills -} - -func (r *RecordDataAdapter) GetLocators() []types.Locator { - locators := make([]types.Locator, len(r.record.Locators)) - for i, locator := range r.record.Locators { - locators[i] = &locator - } - - return locators -} - -func (r *RecordDataAdapter) GetModules() []types.Module { - modules := make([]types.Module, len(r.record.Modules)) - for i, module := range r.record.Modules { - modules[i] = &module - } - - return modules -} - -func (r *RecordDataAdapter) GetSignature() types.Signature { - // SQLite records don't store signature information - return nil -} - -func (r *RecordDataAdapter) GetPreviousRecordCid() string { - // SQLite records don't store previous record CID - return "" -} - -func (d *DB) AddRecord(record types.Record) error { - // Extract record data - recordData, err := record.GetRecordData() - if err != nil { - return fmt.Errorf("failed to get record data: %w", err) - } - - // Get CID - cid := record.GetCid() - - // Check if record already exists - var existingRecord Record - - err = d.gormDB.Where("record_cid = ?", cid).First(&existingRecord).Error - if err == nil { - // Record exists, skip insert - logger.Debug("Record already exists in search database, skipping insert", "record_cid", existingRecord.RecordCID, "cid", cid) - - return nil - } - - // If error is not "record not found", return the error - if !errors.Is(err, gorm.ErrRecordNotFound) { - return fmt.Errorf("failed to check existing record: %w", err) - } - - // Build complete Record with all associations - sqliteRecord := &Record{ - RecordCID: cid, - Name: recordData.GetName(), - Version: recordData.GetVersion(), - SchemaVersion: recordData.GetSchemaVersion(), - OASFCreatedAt: recordData.GetCreatedAt(), - Authors: recordData.GetAuthors(), - Skills: convertSkills(recordData.GetSkills(), cid), - Locators: convertLocators(recordData.GetLocators(), cid), - Modules: convertModules(recordData.GetModules(), cid), - Domains: convertDomains(recordData.GetDomains(), cid), - } - - // Let GORM handle the entire creation with associations - if err := d.gormDB.Create(sqliteRecord).Error; err != nil { - return fmt.Errorf("failed to add record to SQLite database: %w", err) - } - - logger.Debug("Added new record with associations to SQLite database", "record_cid", sqliteRecord.RecordCID, "cid", cid, - "skills", len(sqliteRecord.Skills), "locators", len(sqliteRecord.Locators), "modules", len(sqliteRecord.Modules), - "domains", len(sqliteRecord.Domains)) - - return nil -} - -// GetRecordCIDs retrieves only record CIDs based on the provided options. -// This is optimized for cases where only CIDs are needed, avoiding expensive joins and preloads. -func (d *DB) GetRecordCIDs(opts ...types.FilterOption) ([]string, error) { - // Create default configuration. - cfg := &types.RecordFilters{} - - // Apply all options. - for _, opt := range opts { - if opt == nil { - return nil, errors.New("nil option provided") - } - - opt(cfg) - } - - // Start with the base query for records - only select CID for efficiency. - query := d.gormDB.Model(&Record{}).Select("records.record_cid").Distinct() - - // Apply pagination. - if cfg.Limit > 0 { - query = query.Limit(cfg.Limit) - } - - if cfg.Offset > 0 { - query = query.Offset(cfg.Offset) - } - - // Apply all filters. - query = d.handleFilterOptions(query, cfg) - - // Execute the query to get only CIDs (no preloading needed). - var cids []string - if err := query.Pluck("record_cid", &cids).Error; err != nil { - return nil, fmt.Errorf("failed to query record CIDs: %w", err) - } - - // Return CIDs directly - no need for wrapper objects. - return cids, nil -} - -// RemoveRecord removes a record from the search database by CID. -// Uses CASCADE DELETE to automatically remove related Skills, Locators, and Modules. -func (d *DB) RemoveRecord(cid string) error { - result := d.gormDB.Where("record_cid = ?", cid).Delete(&Record{}) - - if result.Error != nil { - return fmt.Errorf("failed to remove record from search database: %w", result.Error) - } - - if result.RowsAffected == 0 { - // Record not found in search database (might not have been indexed) - logger.Debug("No record found in search database", "cid", cid) - - return nil // Not an error - might be a storage-only record - } - - logger.Debug("Removed record from search database", "cid", cid, "rows_affected", result.RowsAffected) - - return nil -} - -// handleFilterOptions applies the provided filters to the query. -// -//nolint:gocognit,cyclop,nestif,gocyclo -func (d *DB) handleFilterOptions(query *gorm.DB, cfg *types.RecordFilters) *gorm.DB { - // Apply record-level filters with wildcard support. - if len(cfg.Names) > 0 { - condition, args := utils.BuildWildcardCondition("records.name", cfg.Names) - if condition != "" { - query = query.Where(condition, args...) - } - } - - if len(cfg.Versions) > 0 { - condition, args := utils.BuildComparisonConditions("records.version", cfg.Versions) - if condition != "" { - query = query.Where(condition, args...) - } - } - - // Handle skill filters with wildcard support. - if len(cfg.SkillIDs) > 0 || len(cfg.SkillNames) > 0 { - query = query.Joins("JOIN skills ON skills.record_cid = records.record_cid") - - if len(cfg.SkillIDs) > 0 { - query = query.Where("skills.skill_id IN ?", cfg.SkillIDs) - } - - if len(cfg.SkillNames) > 0 { - condition, args := utils.BuildWildcardCondition("skills.name", cfg.SkillNames) - if condition != "" { - query = query.Where(condition, args...) - } - } - } - - // Handle locator filters with wildcard support. - if len(cfg.LocatorTypes) > 0 || len(cfg.LocatorURLs) > 0 { - query = query.Joins("JOIN locators ON locators.record_cid = records.record_cid") - - if len(cfg.LocatorTypes) > 0 { - condition, args := utils.BuildWildcardCondition("locators.type", cfg.LocatorTypes) - if condition != "" { - query = query.Where(condition, args...) - } - } - - if len(cfg.LocatorURLs) > 0 { - condition, args := utils.BuildWildcardCondition("locators.url", cfg.LocatorURLs) - if condition != "" { - query = query.Where(condition, args...) - } - } - } - - // Handle module filters with wildcard support. - if len(cfg.ModuleNames) > 0 { - query = query.Joins("JOIN modules ON modules.record_cid = records.record_cid") - - if len(cfg.ModuleNames) > 0 { - condition, args := utils.BuildWildcardCondition("modules.name", cfg.ModuleNames) - if condition != "" { - query = query.Where(condition, args...) - } - } - } - - // Handle domain filters with wildcard support. - if len(cfg.DomainIDs) > 0 || len(cfg.DomainNames) > 0 { - query = query.Joins("JOIN domains ON domains.record_cid = records.record_cid") - - if len(cfg.DomainIDs) > 0 { - query = query.Where("domains.domain_id IN ?", cfg.DomainIDs) - } - - if len(cfg.DomainNames) > 0 { - condition, args := utils.BuildWildcardCondition("domains.name", cfg.DomainNames) - if condition != "" { - query = query.Where(condition, args...) - } - } - } - - // Handle created_at filter with comparison operator support. - if len(cfg.CreatedAts) > 0 { - condition, args := utils.BuildComparisonConditions("records.oasf_created_at", cfg.CreatedAts) - if condition != "" { - query = query.Where(condition, args...) - } - } - - // Handle author filters with wildcard support (searching in JSON array). - if len(cfg.Authors) > 0 { - // Build OR conditions for each author pattern against the JSON string - var authorConditions []string - - var authorArgs []interface{} - - for _, author := range cfg.Authors { - condition, arg := utils.BuildSingleWildcardCondition("records.authors", "*"+author+"*") - authorConditions = append(authorConditions, condition) - authorArgs = append(authorArgs, arg) - } - - if len(authorConditions) > 0 { - query = query.Where(strings.Join(authorConditions, " OR "), authorArgs...) - } - } - - // Handle schema version filter with comparison operator support. - if len(cfg.SchemaVersions) > 0 { - condition, args := utils.BuildComparisonConditions("records.schema_version", cfg.SchemaVersions) - if condition != "" { - query = query.Where(condition, args...) - } - } - - // Handle module ID filters. - if len(cfg.ModuleIDs) > 0 { - // Check if modules join already exists - if len(cfg.ModuleNames) == 0 { - query = query.Joins("JOIN modules ON modules.record_cid = records.record_cid") - } - - query = query.Where("modules.module_id IN ?", cfg.ModuleIDs) - } - - return query -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package sqlite + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/agntcy/dir/server/database/utils" + "github.com/agntcy/dir/server/types" + "gorm.io/gorm" +) + +type Record struct { + CreatedAt time.Time + UpdatedAt time.Time + RecordCID string `gorm:"column:record_cid;primarykey;not null"` + Name string `gorm:"not null"` + Version string `gorm:"not null"` + SchemaVersion string `gorm:"column:schema_version"` + OASFCreatedAt string `gorm:"column:oasf_created_at"` + Authors []string `gorm:"column:authors;serializer:json"` // Stored as JSON array + + Skills []Skill `gorm:"foreignKey:RecordCID;references:RecordCID;constraint:OnDelete:CASCADE"` + Locators []Locator `gorm:"foreignKey:RecordCID;references:RecordCID;constraint:OnDelete:CASCADE"` + Modules []Module `gorm:"foreignKey:RecordCID;references:RecordCID;constraint:OnDelete:CASCADE"` + Domains []Domain `gorm:"foreignKey:RecordCID;references:RecordCID;constraint:OnDelete:CASCADE"` +} + +// Implement central Record interface. +func (r *Record) GetCid() string { + return r.RecordCID +} + +func (r *Record) GetRecordData() (types.RecordData, error) { + return &RecordDataAdapter{record: r}, nil +} + +// RecordDataAdapter adapts SQLite Record to central RecordData interface. +type RecordDataAdapter struct { + record *Record +} + +func (r *RecordDataAdapter) GetAnnotations() map[string]string { + // SQLite records don't store annotations, return empty map + return make(map[string]string) +} + +func (r *RecordDataAdapter) GetDomains() []types.Domain { + domains := make([]types.Domain, len(r.record.Domains)) + for i, domain := range r.record.Domains { + domains[i] = &domain + } + + return domains +} + +func (r *RecordDataAdapter) GetSchemaVersion() string { + if r.record.SchemaVersion != "" { + return r.record.SchemaVersion + } + // Default schema version for search records + return "v1" +} + +func (r *RecordDataAdapter) GetName() string { + return r.record.Name +} + +func (r *RecordDataAdapter) GetVersion() string { + return r.record.Version +} + +func (r *RecordDataAdapter) GetDescription() string { + // SQLite records don't store description + return "" +} + +func (r *RecordDataAdapter) GetAuthors() []string { + return r.record.Authors +} + +func (r *RecordDataAdapter) GetCreatedAt() string { + if r.record.OASFCreatedAt != "" { + return r.record.OASFCreatedAt + } + + return r.record.CreatedAt.Format("2006-01-02T15:04:05Z") +} + +func (r *RecordDataAdapter) GetSkills() []types.Skill { + skills := make([]types.Skill, len(r.record.Skills)) + for i, skill := range r.record.Skills { + skills[i] = &skill + } + + return skills +} + +func (r *RecordDataAdapter) GetLocators() []types.Locator { + locators := make([]types.Locator, len(r.record.Locators)) + for i, locator := range r.record.Locators { + locators[i] = &locator + } + + return locators +} + +func (r *RecordDataAdapter) GetModules() []types.Module { + modules := make([]types.Module, len(r.record.Modules)) + for i, module := range r.record.Modules { + modules[i] = &module + } + + return modules +} + +func (r *RecordDataAdapter) GetSignature() types.Signature { + // SQLite records don't store signature information + return nil +} + +func (r *RecordDataAdapter) GetPreviousRecordCid() string { + // SQLite records don't store previous record CID + return "" +} + +func (d *DB) AddRecord(record types.Record) error { + // Extract record data + recordData, err := record.GetRecordData() + if err != nil { + return fmt.Errorf("failed to get record data: %w", err) + } + + // Get CID + cid := record.GetCid() + + // Check if record already exists + var existingRecord Record + + err = d.gormDB.Where("record_cid = ?", cid).First(&existingRecord).Error + if err == nil { + // Record exists, skip insert + logger.Debug("Record already exists in search database, skipping insert", "record_cid", existingRecord.RecordCID, "cid", cid) + + return nil + } + + // If error is not "record not found", return the error + if !errors.Is(err, gorm.ErrRecordNotFound) { + return fmt.Errorf("failed to check existing record: %w", err) + } + + // Build complete Record with all associations + sqliteRecord := &Record{ + RecordCID: cid, + Name: recordData.GetName(), + Version: recordData.GetVersion(), + SchemaVersion: recordData.GetSchemaVersion(), + OASFCreatedAt: recordData.GetCreatedAt(), + Authors: recordData.GetAuthors(), + Skills: convertSkills(recordData.GetSkills(), cid), + Locators: convertLocators(recordData.GetLocators(), cid), + Modules: convertModules(recordData.GetModules(), cid), + Domains: convertDomains(recordData.GetDomains(), cid), + } + + // Let GORM handle the entire creation with associations + if err := d.gormDB.Create(sqliteRecord).Error; err != nil { + return fmt.Errorf("failed to add record to SQLite database: %w", err) + } + + logger.Debug("Added new record with associations to SQLite database", "record_cid", sqliteRecord.RecordCID, "cid", cid, + "skills", len(sqliteRecord.Skills), "locators", len(sqliteRecord.Locators), "modules", len(sqliteRecord.Modules), + "domains", len(sqliteRecord.Domains)) + + return nil +} + +// GetRecordCIDs retrieves only record CIDs based on the provided options. +// This is optimized for cases where only CIDs are needed, avoiding expensive joins and preloads. +func (d *DB) GetRecordCIDs(opts ...types.FilterOption) ([]string, error) { + // Create default configuration. + cfg := &types.RecordFilters{} + + // Apply all options. + for _, opt := range opts { + if opt == nil { + return nil, errors.New("nil option provided") + } + + opt(cfg) + } + + // Start with the base query for records - only select CID for efficiency. + query := d.gormDB.Model(&Record{}).Select("records.record_cid").Distinct() + + // Apply pagination. + if cfg.Limit > 0 { + query = query.Limit(cfg.Limit) + } + + if cfg.Offset > 0 { + query = query.Offset(cfg.Offset) + } + + // Apply all filters. + query = d.handleFilterOptions(query, cfg) + + // Execute the query to get only CIDs (no preloading needed). + var cids []string + if err := query.Pluck("record_cid", &cids).Error; err != nil { + return nil, fmt.Errorf("failed to query record CIDs: %w", err) + } + + // Return CIDs directly - no need for wrapper objects. + return cids, nil +} + +// RemoveRecord removes a record from the search database by CID. +// Uses CASCADE DELETE to automatically remove related Skills, Locators, and Modules. +func (d *DB) RemoveRecord(cid string) error { + result := d.gormDB.Where("record_cid = ?", cid).Delete(&Record{}) + + if result.Error != nil { + return fmt.Errorf("failed to remove record from search database: %w", result.Error) + } + + if result.RowsAffected == 0 { + // Record not found in search database (might not have been indexed) + logger.Debug("No record found in search database", "cid", cid) + + return nil // Not an error - might be a storage-only record + } + + logger.Debug("Removed record from search database", "cid", cid, "rows_affected", result.RowsAffected) + + return nil +} + +// handleFilterOptions applies the provided filters to the query. +// +//nolint:gocognit,cyclop,nestif,gocyclo +func (d *DB) handleFilterOptions(query *gorm.DB, cfg *types.RecordFilters) *gorm.DB { + // Apply record-level filters with wildcard support. + if len(cfg.Names) > 0 { + condition, args := utils.BuildWildcardCondition("records.name", cfg.Names) + if condition != "" { + query = query.Where(condition, args...) + } + } + + if len(cfg.Versions) > 0 { + condition, args := utils.BuildComparisonConditions("records.version", cfg.Versions) + if condition != "" { + query = query.Where(condition, args...) + } + } + + // Handle skill filters with wildcard support. + if len(cfg.SkillIDs) > 0 || len(cfg.SkillNames) > 0 { + query = query.Joins("JOIN skills ON skills.record_cid = records.record_cid") + + if len(cfg.SkillIDs) > 0 { + query = query.Where("skills.skill_id IN ?", cfg.SkillIDs) + } + + if len(cfg.SkillNames) > 0 { + condition, args := utils.BuildWildcardCondition("skills.name", cfg.SkillNames) + if condition != "" { + query = query.Where(condition, args...) + } + } + } + + // Handle locator filters with wildcard support. + if len(cfg.LocatorTypes) > 0 || len(cfg.LocatorURLs) > 0 { + query = query.Joins("JOIN locators ON locators.record_cid = records.record_cid") + + if len(cfg.LocatorTypes) > 0 { + condition, args := utils.BuildWildcardCondition("locators.type", cfg.LocatorTypes) + if condition != "" { + query = query.Where(condition, args...) + } + } + + if len(cfg.LocatorURLs) > 0 { + condition, args := utils.BuildWildcardCondition("locators.url", cfg.LocatorURLs) + if condition != "" { + query = query.Where(condition, args...) + } + } + } + + // Handle module filters with wildcard support. + if len(cfg.ModuleNames) > 0 { + query = query.Joins("JOIN modules ON modules.record_cid = records.record_cid") + + if len(cfg.ModuleNames) > 0 { + condition, args := utils.BuildWildcardCondition("modules.name", cfg.ModuleNames) + if condition != "" { + query = query.Where(condition, args...) + } + } + } + + // Handle domain filters with wildcard support. + if len(cfg.DomainIDs) > 0 || len(cfg.DomainNames) > 0 { + query = query.Joins("JOIN domains ON domains.record_cid = records.record_cid") + + if len(cfg.DomainIDs) > 0 { + query = query.Where("domains.domain_id IN ?", cfg.DomainIDs) + } + + if len(cfg.DomainNames) > 0 { + condition, args := utils.BuildWildcardCondition("domains.name", cfg.DomainNames) + if condition != "" { + query = query.Where(condition, args...) + } + } + } + + // Handle created_at filter with comparison operator support. + if len(cfg.CreatedAts) > 0 { + condition, args := utils.BuildComparisonConditions("records.oasf_created_at", cfg.CreatedAts) + if condition != "" { + query = query.Where(condition, args...) + } + } + + // Handle author filters with wildcard support (searching in JSON array). + if len(cfg.Authors) > 0 { + // Build OR conditions for each author pattern against the JSON string + var authorConditions []string + + var authorArgs []interface{} + + for _, author := range cfg.Authors { + condition, arg := utils.BuildSingleWildcardCondition("records.authors", "*"+author+"*") + authorConditions = append(authorConditions, condition) + authorArgs = append(authorArgs, arg) + } + + if len(authorConditions) > 0 { + query = query.Where(strings.Join(authorConditions, " OR "), authorArgs...) + } + } + + // Handle schema version filter with comparison operator support. + if len(cfg.SchemaVersions) > 0 { + condition, args := utils.BuildComparisonConditions("records.schema_version", cfg.SchemaVersions) + if condition != "" { + query = query.Where(condition, args...) + } + } + + // Handle module ID filters. + if len(cfg.ModuleIDs) > 0 { + // Check if modules join already exists + if len(cfg.ModuleNames) == 0 { + query = query.Joins("JOIN modules ON modules.record_cid = records.record_cid") + } + + query = query.Where("modules.module_id IN ?", cfg.ModuleIDs) + } + + return query +} diff --git a/server/database/sqlite/record_test.go b/server/database/sqlite/record_test.go index 1f1ad8015..f93ee24a9 100644 --- a/server/database/sqlite/record_test.go +++ b/server/database/sqlite/record_test.go @@ -1,369 +1,369 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package sqlite - -import ( - "testing" - - "github.com/agntcy/dir/server/types" - "github.com/glebarez/sqlite" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "gorm.io/gorm" -) - -// Test helpers implementing types interfaces. - -type testRecord struct { - cid string - data *testRecordData -} - -func (r *testRecord) GetCid() string { return r.cid } -func (r *testRecord) GetRecordData() (types.RecordData, error) { return r.data, nil } - -type testRecordData struct { - name, version, schemaVersion, createdAt string - authors []string - skills []types.Skill - locators []types.Locator - modules []types.Module - domains []types.Domain -} - -func (d *testRecordData) GetName() string { return d.name } -func (d *testRecordData) GetVersion() string { return d.version } -func (d *testRecordData) GetSchemaVersion() string { return d.schemaVersion } -func (d *testRecordData) GetCreatedAt() string { return d.createdAt } -func (d *testRecordData) GetAuthors() []string { return d.authors } -func (d *testRecordData) GetSkills() []types.Skill { return d.skills } -func (d *testRecordData) GetLocators() []types.Locator { return d.locators } -func (d *testRecordData) GetModules() []types.Module { return d.modules } -func (d *testRecordData) GetDomains() []types.Domain { return d.domains } -func (d *testRecordData) GetDescription() string { return "" } -func (d *testRecordData) GetAnnotations() map[string]string { return nil } -func (d *testRecordData) GetSignature() types.Signature { return nil } -func (d *testRecordData) GetPreviousRecordCid() string { return "" } - -type testSkill struct { - id uint64 - name string -} - -func (s *testSkill) GetID() uint64 { return s.id } -func (s *testSkill) GetName() string { return s.name } -func (s *testSkill) GetAnnotations() map[string]string { return nil } - -type testLocator struct{ locType, url string } - -func (l *testLocator) GetType() string { return l.locType } -func (l *testLocator) GetURL() string { return l.url } -func (l *testLocator) GetSize() uint64 { return 0 } -func (l *testLocator) GetDigest() string { return "" } -func (l *testLocator) GetAnnotations() map[string]string { return nil } - -type testModule struct { - id uint64 - name string -} - -func (m *testModule) GetID() uint64 { return m.id } -func (m *testModule) GetName() string { return m.name } -func (m *testModule) GetData() map[string]any { return nil } - -type testDomain struct { - id uint64 - name string -} - -func (d *testDomain) GetID() uint64 { return d.id } -func (d *testDomain) GetName() string { return d.name } -func (d *testDomain) GetAnnotations() map[string]string { return nil } - -func setupTestDB(t *testing.T) *DB { - t.Helper() - - db, err := gorm.Open(sqlite.Open("file::memory:"), &gorm.Config{Logger: newCustomLogger()}) - require.NoError(t, err) - require.NoError(t, db.AutoMigrate(&Record{}, &Skill{}, &Locator{}, &Module{}, &Domain{}, &Sync{})) - - return &DB{gormDB: db} -} - -// Test fixtures based on OASF 0.8.0 schema. -// Domains, skills, and modules use real IDs from the schema. -var ( - // Marketing strategy agent - uses NLG, creative content, marketing domain. - marketingAgent = &testRecord{ - cid: "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi", - data: &testRecordData{ - name: "directory.agntcy.org/cisco/marketing-strategy", - version: "1.0.0", - schemaVersion: "0.8.0", - createdAt: "2024-01-15T10:30:00Z", - authors: []string{"alice@cisco.com", "bob@cisco.com"}, - skills: []types.Skill{ - &testSkill{id: 10201, name: "natural_language_processing/natural_language_generation/text_completion"}, - &testSkill{id: 104, name: "natural_language_processing/creative_content"}, - }, - locators: []types.Locator{ - &testLocator{locType: "docker_image", url: "ghcr.io/agntcy/marketing-strategy:v1.0.0"}, - }, - modules: []types.Module{ - &testModule{id: 201, name: "integration/acp"}, - }, - domains: []types.Domain{ - &testDomain{id: 2405, name: "marketing_and_advertising/marketing_analytics"}, - &testDomain{id: 2403, name: "marketing_and_advertising/digital_marketing"}, - }, - }, - } - - // Healthcare assistant - uses RAG, medical domain. - healthcareAgent = &testRecord{ - cid: "bafybeihkoviema7g3gxyt6la7b7kbblo2hm7zgi3f6d67dqd7wy3yqhqxu", - data: &testRecordData{ - name: "directory.agntcy.org/medtech/health-assistant", - version: "2.0.0", - schemaVersion: "0.7.0", - createdAt: "2024-06-20T14:45:00Z", - authors: []string{"charlie@medtech.io"}, - skills: []types.Skill{ - &testSkill{id: 601, name: "retrieval_augmented_generation/retrieval_of_information"}, - &testSkill{id: 10302, name: "natural_language_processing/information_retrieval_synthesis/question_answering"}, - }, - locators: []types.Locator{ - &testLocator{locType: "source_code", url: "https://github.com/medtech/health-assistant"}, - }, - modules: []types.Module{ - &testModule{id: 202, name: "integration/mcp"}, - &testModule{id: 102, name: "core/llm"}, - }, - domains: []types.Domain{ - &testDomain{id: 901, name: "healthcare/medical_technology"}, - &testDomain{id: 902, name: "healthcare/telemedicine"}, - }, - }, - } - - // Code assistant - uses coding skills, software engineering domain. - codeAssistant = &testRecord{ - cid: "bafybeihdwdcefgh4dqkjv67uzcmw7ojzge6uyuvma5kw7bzydb56wxfao", - data: &testRecordData{ - name: "directory.agntcy.org/devtools/code-assistant", - version: "1.0.0", - schemaVersion: "0.8.0", - createdAt: "2024-03-10T09:00:00Z", - authors: []string{"alice@cisco.com"}, - skills: []types.Skill{ - &testSkill{id: 50201, name: "analytical_skills/coding_skills/text_to_code"}, - &testSkill{id: 50204, name: "analytical_skills/coding_skills/code_optimization"}, - }, - locators: []types.Locator{ - &testLocator{locType: "docker_image", url: "ghcr.io/devtools/code-assistant:v1.0.0"}, - }, - modules: []types.Module{}, - domains: []types.Domain{ - &testDomain{id: 102, name: "technology/software_engineering"}, - &testDomain{id: 10201, name: "technology/software_engineering/software_development"}, - }, - }, - } -) - -func seedDB(t *testing.T, db *DB) { - t.Helper() - - for _, r := range []types.Record{marketingAgent, healthcareAgent, codeAssistant} { - require.NoError(t, db.AddRecord(r)) - } -} - -// CRUD tests - -func TestAddRecord(t *testing.T) { - db := setupTestDB(t) - require.NoError(t, db.AddRecord(marketingAgent)) - - cids, err := db.GetRecordCIDs() - require.NoError(t, err) - assert.Equal(t, []string{"bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi"}, cids) -} - -func TestAddRecord_Idempotent(t *testing.T) { - db := setupTestDB(t) - require.NoError(t, db.AddRecord(marketingAgent)) - require.NoError(t, db.AddRecord(marketingAgent)) - - cids, err := db.GetRecordCIDs() - require.NoError(t, err) - assert.Len(t, cids, 1) -} - -func TestRemoveRecord(t *testing.T) { - db := setupTestDB(t) - seedDB(t, db) - - require.NoError(t, db.RemoveRecord("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi")) - - cids, err := db.GetRecordCIDs() - require.NoError(t, err) - assert.Len(t, cids, 2) - assert.NotContains(t, cids, "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi") -} - -func TestRemoveRecord_NotFound(t *testing.T) { - db := setupTestDB(t) - err := db.RemoveRecord("nonexistent") - require.NoError(t, err) -} - -// Filter tests - -func TestGetRecordCIDs_Pagination(t *testing.T) { - db := setupTestDB(t) - seedDB(t, db) - - cids, _ := db.GetRecordCIDs(types.WithLimit(2)) - assert.Len(t, cids, 2) - - cids, _ = db.GetRecordCIDs(types.WithOffset(2)) - assert.Len(t, cids, 1) -} - -func TestGetRecordCIDs_Wildcards(t *testing.T) { - db := setupTestDB(t) - seedDB(t, db) - - tests := []struct { - pattern string - expected int - }{ - {"*cisco*", 1}, // marketing only (name contains cisco) - {"*medtech*", 1}, // healthcare only - {"directory.agntcy.org/*", 3}, // all agents - {"*assistant*", 2}, // healthcare + code (both have "assistant") - } - for _, tc := range tests { - cids, _ := db.GetRecordCIDs(types.WithNames(tc.pattern)) - assert.Len(t, cids, tc.expected, "pattern: %s", tc.pattern) - } -} - -func TestGetRecordCIDs_ComparisonOperators(t *testing.T) { - db := setupTestDB(t) - seedDB(t, db) - - tests := []struct { - name string - opts []types.FilterOption - expected int - }{ - // Version comparisons - {"version >=2.0.0", []types.FilterOption{types.WithVersions(">=2.0.0")}, 1}, - {"version <2.0.0", []types.FilterOption{types.WithVersions("<2.0.0")}, 2}, - {"version =1.0.0", []types.FilterOption{types.WithVersions("=1.0.0")}, 2}, - {"version range", []types.FilterOption{types.WithVersions(">=1.0.0", "<2.0.0")}, 2}, - - // CreatedAt comparisons (ISO format) - {"created >=2024-06-01", []types.FilterOption{types.WithCreatedAts(">=2024-06-01")}, 1}, - {"created <2024-04-01", []types.FilterOption{types.WithCreatedAts("<2024-04-01")}, 2}, - {"created Q1 range", []types.FilterOption{types.WithCreatedAts(">=2024-01-01", "<2024-04-01")}, 2}, - - // Schema version - {"schema 0.8.0", []types.FilterOption{types.WithSchemaVersions("0.8.0")}, 2}, - {"schema 0.7.*", []types.FilterOption{types.WithSchemaVersions("0.7.*")}, 1}, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - cids, err := db.GetRecordCIDs(tc.opts...) - require.NoError(t, err) - assert.Len(t, cids, tc.expected) - }) - } -} - -func TestGetRecordCIDs_Authors(t *testing.T) { - db := setupTestDB(t) - seedDB(t, db) - - // alice@cisco.com is author of marketing + code agents - cids, _ := db.GetRecordCIDs(types.WithAuthors("alice@cisco.com")) - assert.Len(t, cids, 2) - - cids, _ = db.GetRecordCIDs(types.WithAuthors("*@medtech.io")) - assert.Len(t, cids, 1) -} - -func TestGetRecordCIDs_RelatedTables(t *testing.T) { - db := setupTestDB(t) - seedDB(t, db) - - tests := []struct { - name string - opts []types.FilterOption - expected int - }{ - // Skills by name pattern - {"skill nlp/*", []types.FilterOption{types.WithSkillNames("natural_language_processing/*")}, 2}, - {"skill coding", []types.FilterOption{types.WithSkillNames("*coding*")}, 1}, - {"skill RAG", []types.FilterOption{types.WithSkillNames("retrieval_augmented_generation/*")}, 1}, - - // Skills by ID - {"skill ID text_completion", []types.FilterOption{types.WithSkillIDs(10201)}, 1}, - {"skill ID text_to_code", []types.FilterOption{types.WithSkillIDs(50201)}, 1}, - - // Locators - {"locator docker", []types.FilterOption{types.WithLocatorTypes("docker_image")}, 2}, - {"locator source", []types.FilterOption{types.WithLocatorTypes("source_code")}, 1}, - {"locator ghcr.io", []types.FilterOption{types.WithLocatorURLs("ghcr.io/*")}, 2}, - - // Modules - {"module acp", []types.FilterOption{types.WithModuleNames("integration/acp")}, 1}, - {"module mcp", []types.FilterOption{types.WithModuleNames("integration/mcp")}, 1}, - {"module ID 201", []types.FilterOption{types.WithModuleIDs(201)}, 1}, - - // Domains by name - {"domain marketing", []types.FilterOption{types.WithDomainNames("marketing_and_advertising/*")}, 1}, - {"domain healthcare", []types.FilterOption{types.WithDomainNames("healthcare/*")}, 1}, - {"domain technology", []types.FilterOption{types.WithDomainNames("technology/*")}, 1}, - - // Domains by ID - {"domain ID 901", []types.FilterOption{types.WithDomainIDs(901)}, 1}, // healthcare/medical_technology - {"domain ID 102", []types.FilterOption{types.WithDomainIDs(102)}, 1}, // technology/software_engineering - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - cids, err := db.GetRecordCIDs(tc.opts...) - require.NoError(t, err) - assert.Len(t, cids, tc.expected) - }) - } -} - -func TestGetRecordCIDs_CombinedFilters(t *testing.T) { - db := setupTestDB(t) - seedDB(t, db) - - // AND across different filter types - cids, _ := db.GetRecordCIDs(types.WithVersions("1.0.0"), types.WithLocatorTypes("docker_image")) - assert.Len(t, cids, 2) // marketing + code - - // OR within same filter type - cids, _ = db.GetRecordCIDs(types.WithDomainNames("marketing_and_advertising/*", "healthcare/*")) - assert.Len(t, cids, 2) // marketing + healthcare - - // Complex: schema 0.8.0 AND has modules - cids, _ = db.GetRecordCIDs(types.WithSchemaVersions("0.8.0"), types.WithModuleNames("*")) - assert.Len(t, cids, 1) // only marketing (code has no modules) -} - -func TestGetRecordCIDs_NilOption(t *testing.T) { - db := setupTestDB(t) - - var nilOpt types.FilterOption - - _, err := db.GetRecordCIDs(nilOpt) - assert.Error(t, err) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package sqlite + +import ( + "testing" + + "github.com/agntcy/dir/server/types" + "github.com/glebarez/sqlite" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gorm.io/gorm" +) + +// Test helpers implementing types interfaces. + +type testRecord struct { + cid string + data *testRecordData +} + +func (r *testRecord) GetCid() string { return r.cid } +func (r *testRecord) GetRecordData() (types.RecordData, error) { return r.data, nil } + +type testRecordData struct { + name, version, schemaVersion, createdAt string + authors []string + skills []types.Skill + locators []types.Locator + modules []types.Module + domains []types.Domain +} + +func (d *testRecordData) GetName() string { return d.name } +func (d *testRecordData) GetVersion() string { return d.version } +func (d *testRecordData) GetSchemaVersion() string { return d.schemaVersion } +func (d *testRecordData) GetCreatedAt() string { return d.createdAt } +func (d *testRecordData) GetAuthors() []string { return d.authors } +func (d *testRecordData) GetSkills() []types.Skill { return d.skills } +func (d *testRecordData) GetLocators() []types.Locator { return d.locators } +func (d *testRecordData) GetModules() []types.Module { return d.modules } +func (d *testRecordData) GetDomains() []types.Domain { return d.domains } +func (d *testRecordData) GetDescription() string { return "" } +func (d *testRecordData) GetAnnotations() map[string]string { return nil } +func (d *testRecordData) GetSignature() types.Signature { return nil } +func (d *testRecordData) GetPreviousRecordCid() string { return "" } + +type testSkill struct { + id uint64 + name string +} + +func (s *testSkill) GetID() uint64 { return s.id } +func (s *testSkill) GetName() string { return s.name } +func (s *testSkill) GetAnnotations() map[string]string { return nil } + +type testLocator struct{ locType, url string } + +func (l *testLocator) GetType() string { return l.locType } +func (l *testLocator) GetURL() string { return l.url } +func (l *testLocator) GetSize() uint64 { return 0 } +func (l *testLocator) GetDigest() string { return "" } +func (l *testLocator) GetAnnotations() map[string]string { return nil } + +type testModule struct { + id uint64 + name string +} + +func (m *testModule) GetID() uint64 { return m.id } +func (m *testModule) GetName() string { return m.name } +func (m *testModule) GetData() map[string]any { return nil } + +type testDomain struct { + id uint64 + name string +} + +func (d *testDomain) GetID() uint64 { return d.id } +func (d *testDomain) GetName() string { return d.name } +func (d *testDomain) GetAnnotations() map[string]string { return nil } + +func setupTestDB(t *testing.T) *DB { + t.Helper() + + db, err := gorm.Open(sqlite.Open("file::memory:"), &gorm.Config{Logger: newCustomLogger()}) + require.NoError(t, err) + require.NoError(t, db.AutoMigrate(&Record{}, &Skill{}, &Locator{}, &Module{}, &Domain{}, &Sync{})) + + return &DB{gormDB: db} +} + +// Test fixtures based on OASF 0.8.0 schema. +// Domains, skills, and modules use real IDs from the schema. +var ( + // Marketing strategy agent - uses NLG, creative content, marketing domain. + marketingAgent = &testRecord{ + cid: "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi", + data: &testRecordData{ + name: "directory.agntcy.org/cisco/marketing-strategy", + version: "1.0.0", + schemaVersion: "0.8.0", + createdAt: "2024-01-15T10:30:00Z", + authors: []string{"alice@cisco.com", "bob@cisco.com"}, + skills: []types.Skill{ + &testSkill{id: 10201, name: "natural_language_processing/natural_language_generation/text_completion"}, + &testSkill{id: 104, name: "natural_language_processing/creative_content"}, + }, + locators: []types.Locator{ + &testLocator{locType: "docker_image", url: "ghcr.io/agntcy/marketing-strategy:v1.0.0"}, + }, + modules: []types.Module{ + &testModule{id: 201, name: "integration/acp"}, + }, + domains: []types.Domain{ + &testDomain{id: 2405, name: "marketing_and_advertising/marketing_analytics"}, + &testDomain{id: 2403, name: "marketing_and_advertising/digital_marketing"}, + }, + }, + } + + // Healthcare assistant - uses RAG, medical domain. + healthcareAgent = &testRecord{ + cid: "bafybeihkoviema7g3gxyt6la7b7kbblo2hm7zgi3f6d67dqd7wy3yqhqxu", + data: &testRecordData{ + name: "directory.agntcy.org/medtech/health-assistant", + version: "2.0.0", + schemaVersion: "0.7.0", + createdAt: "2024-06-20T14:45:00Z", + authors: []string{"charlie@medtech.io"}, + skills: []types.Skill{ + &testSkill{id: 601, name: "retrieval_augmented_generation/retrieval_of_information"}, + &testSkill{id: 10302, name: "natural_language_processing/information_retrieval_synthesis/question_answering"}, + }, + locators: []types.Locator{ + &testLocator{locType: "source_code", url: "https://github.com/medtech/health-assistant"}, + }, + modules: []types.Module{ + &testModule{id: 202, name: "integration/mcp"}, + &testModule{id: 102, name: "core/llm"}, + }, + domains: []types.Domain{ + &testDomain{id: 901, name: "healthcare/medical_technology"}, + &testDomain{id: 902, name: "healthcare/telemedicine"}, + }, + }, + } + + // Code assistant - uses coding skills, software engineering domain. + codeAssistant = &testRecord{ + cid: "bafybeihdwdcefgh4dqkjv67uzcmw7ojzge6uyuvma5kw7bzydb56wxfao", + data: &testRecordData{ + name: "directory.agntcy.org/devtools/code-assistant", + version: "1.0.0", + schemaVersion: "0.8.0", + createdAt: "2024-03-10T09:00:00Z", + authors: []string{"alice@cisco.com"}, + skills: []types.Skill{ + &testSkill{id: 50201, name: "analytical_skills/coding_skills/text_to_code"}, + &testSkill{id: 50204, name: "analytical_skills/coding_skills/code_optimization"}, + }, + locators: []types.Locator{ + &testLocator{locType: "docker_image", url: "ghcr.io/devtools/code-assistant:v1.0.0"}, + }, + modules: []types.Module{}, + domains: []types.Domain{ + &testDomain{id: 102, name: "technology/software_engineering"}, + &testDomain{id: 10201, name: "technology/software_engineering/software_development"}, + }, + }, + } +) + +func seedDB(t *testing.T, db *DB) { + t.Helper() + + for _, r := range []types.Record{marketingAgent, healthcareAgent, codeAssistant} { + require.NoError(t, db.AddRecord(r)) + } +} + +// CRUD tests + +func TestAddRecord(t *testing.T) { + db := setupTestDB(t) + require.NoError(t, db.AddRecord(marketingAgent)) + + cids, err := db.GetRecordCIDs() + require.NoError(t, err) + assert.Equal(t, []string{"bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi"}, cids) +} + +func TestAddRecord_Idempotent(t *testing.T) { + db := setupTestDB(t) + require.NoError(t, db.AddRecord(marketingAgent)) + require.NoError(t, db.AddRecord(marketingAgent)) + + cids, err := db.GetRecordCIDs() + require.NoError(t, err) + assert.Len(t, cids, 1) +} + +func TestRemoveRecord(t *testing.T) { + db := setupTestDB(t) + seedDB(t, db) + + require.NoError(t, db.RemoveRecord("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi")) + + cids, err := db.GetRecordCIDs() + require.NoError(t, err) + assert.Len(t, cids, 2) + assert.NotContains(t, cids, "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi") +} + +func TestRemoveRecord_NotFound(t *testing.T) { + db := setupTestDB(t) + err := db.RemoveRecord("nonexistent") + require.NoError(t, err) +} + +// Filter tests + +func TestGetRecordCIDs_Pagination(t *testing.T) { + db := setupTestDB(t) + seedDB(t, db) + + cids, _ := db.GetRecordCIDs(types.WithLimit(2)) + assert.Len(t, cids, 2) + + cids, _ = db.GetRecordCIDs(types.WithOffset(2)) + assert.Len(t, cids, 1) +} + +func TestGetRecordCIDs_Wildcards(t *testing.T) { + db := setupTestDB(t) + seedDB(t, db) + + tests := []struct { + pattern string + expected int + }{ + {"*cisco*", 1}, // marketing only (name contains cisco) + {"*medtech*", 1}, // healthcare only + {"directory.agntcy.org/*", 3}, // all agents + {"*assistant*", 2}, // healthcare + code (both have "assistant") + } + for _, tc := range tests { + cids, _ := db.GetRecordCIDs(types.WithNames(tc.pattern)) + assert.Len(t, cids, tc.expected, "pattern: %s", tc.pattern) + } +} + +func TestGetRecordCIDs_ComparisonOperators(t *testing.T) { + db := setupTestDB(t) + seedDB(t, db) + + tests := []struct { + name string + opts []types.FilterOption + expected int + }{ + // Version comparisons + {"version >=2.0.0", []types.FilterOption{types.WithVersions(">=2.0.0")}, 1}, + {"version <2.0.0", []types.FilterOption{types.WithVersions("<2.0.0")}, 2}, + {"version =1.0.0", []types.FilterOption{types.WithVersions("=1.0.0")}, 2}, + {"version range", []types.FilterOption{types.WithVersions(">=1.0.0", "<2.0.0")}, 2}, + + // CreatedAt comparisons (ISO format) + {"created >=2024-06-01", []types.FilterOption{types.WithCreatedAts(">=2024-06-01")}, 1}, + {"created <2024-04-01", []types.FilterOption{types.WithCreatedAts("<2024-04-01")}, 2}, + {"created Q1 range", []types.FilterOption{types.WithCreatedAts(">=2024-01-01", "<2024-04-01")}, 2}, + + // Schema version + {"schema 0.8.0", []types.FilterOption{types.WithSchemaVersions("0.8.0")}, 2}, + {"schema 0.7.*", []types.FilterOption{types.WithSchemaVersions("0.7.*")}, 1}, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + cids, err := db.GetRecordCIDs(tc.opts...) + require.NoError(t, err) + assert.Len(t, cids, tc.expected) + }) + } +} + +func TestGetRecordCIDs_Authors(t *testing.T) { + db := setupTestDB(t) + seedDB(t, db) + + // alice@cisco.com is author of marketing + code agents + cids, _ := db.GetRecordCIDs(types.WithAuthors("alice@cisco.com")) + assert.Len(t, cids, 2) + + cids, _ = db.GetRecordCIDs(types.WithAuthors("*@medtech.io")) + assert.Len(t, cids, 1) +} + +func TestGetRecordCIDs_RelatedTables(t *testing.T) { + db := setupTestDB(t) + seedDB(t, db) + + tests := []struct { + name string + opts []types.FilterOption + expected int + }{ + // Skills by name pattern + {"skill nlp/*", []types.FilterOption{types.WithSkillNames("natural_language_processing/*")}, 2}, + {"skill coding", []types.FilterOption{types.WithSkillNames("*coding*")}, 1}, + {"skill RAG", []types.FilterOption{types.WithSkillNames("retrieval_augmented_generation/*")}, 1}, + + // Skills by ID + {"skill ID text_completion", []types.FilterOption{types.WithSkillIDs(10201)}, 1}, + {"skill ID text_to_code", []types.FilterOption{types.WithSkillIDs(50201)}, 1}, + + // Locators + {"locator docker", []types.FilterOption{types.WithLocatorTypes("docker_image")}, 2}, + {"locator source", []types.FilterOption{types.WithLocatorTypes("source_code")}, 1}, + {"locator ghcr.io", []types.FilterOption{types.WithLocatorURLs("ghcr.io/*")}, 2}, + + // Modules + {"module acp", []types.FilterOption{types.WithModuleNames("integration/acp")}, 1}, + {"module mcp", []types.FilterOption{types.WithModuleNames("integration/mcp")}, 1}, + {"module ID 201", []types.FilterOption{types.WithModuleIDs(201)}, 1}, + + // Domains by name + {"domain marketing", []types.FilterOption{types.WithDomainNames("marketing_and_advertising/*")}, 1}, + {"domain healthcare", []types.FilterOption{types.WithDomainNames("healthcare/*")}, 1}, + {"domain technology", []types.FilterOption{types.WithDomainNames("technology/*")}, 1}, + + // Domains by ID + {"domain ID 901", []types.FilterOption{types.WithDomainIDs(901)}, 1}, // healthcare/medical_technology + {"domain ID 102", []types.FilterOption{types.WithDomainIDs(102)}, 1}, // technology/software_engineering + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + cids, err := db.GetRecordCIDs(tc.opts...) + require.NoError(t, err) + assert.Len(t, cids, tc.expected) + }) + } +} + +func TestGetRecordCIDs_CombinedFilters(t *testing.T) { + db := setupTestDB(t) + seedDB(t, db) + + // AND across different filter types + cids, _ := db.GetRecordCIDs(types.WithVersions("1.0.0"), types.WithLocatorTypes("docker_image")) + assert.Len(t, cids, 2) // marketing + code + + // OR within same filter type + cids, _ = db.GetRecordCIDs(types.WithDomainNames("marketing_and_advertising/*", "healthcare/*")) + assert.Len(t, cids, 2) // marketing + healthcare + + // Complex: schema 0.8.0 AND has modules + cids, _ = db.GetRecordCIDs(types.WithSchemaVersions("0.8.0"), types.WithModuleNames("*")) + assert.Len(t, cids, 1) // only marketing (code has no modules) +} + +func TestGetRecordCIDs_NilOption(t *testing.T) { + db := setupTestDB(t) + + var nilOpt types.FilterOption + + _, err := db.GetRecordCIDs(nilOpt) + assert.Error(t, err) +} diff --git a/server/database/sqlite/skill.go b/server/database/sqlite/skill.go index 8268741ab..930d84a07 100644 --- a/server/database/sqlite/skill.go +++ b/server/database/sqlite/skill.go @@ -1,46 +1,46 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package sqlite - -import ( - "time" - - "github.com/agntcy/dir/server/types" -) - -type Skill struct { - ID uint `gorm:"primarykey"` - CreatedAt time.Time - UpdatedAt time.Time - RecordCID string `gorm:"column:record_cid;not null;index"` - SkillID uint64 `gorm:"not null"` - Name string `gorm:"not null"` -} - -func (skill *Skill) GetAnnotations() map[string]string { - // SQLite skills don't store annotations, return empty map - return make(map[string]string) -} - -func (skill *Skill) GetID() uint64 { - return skill.SkillID -} - -func (skill *Skill) GetName() string { - return skill.Name -} - -// convertSkills transforms interface types to SQLite structs. -func convertSkills(skills []types.Skill, recordCID string) []Skill { - result := make([]Skill, len(skills)) - for i, skill := range skills { - result[i] = Skill{ - RecordCID: recordCID, - SkillID: skill.GetID(), - Name: skill.GetName(), - } - } - - return result -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package sqlite + +import ( + "time" + + "github.com/agntcy/dir/server/types" +) + +type Skill struct { + ID uint `gorm:"primarykey"` + CreatedAt time.Time + UpdatedAt time.Time + RecordCID string `gorm:"column:record_cid;not null;index"` + SkillID uint64 `gorm:"not null"` + Name string `gorm:"not null"` +} + +func (skill *Skill) GetAnnotations() map[string]string { + // SQLite skills don't store annotations, return empty map + return make(map[string]string) +} + +func (skill *Skill) GetID() uint64 { + return skill.SkillID +} + +func (skill *Skill) GetName() string { + return skill.Name +} + +// convertSkills transforms interface types to SQLite structs. +func convertSkills(skills []types.Skill, recordCID string) []Skill { + result := make([]Skill, len(skills)) + for i, skill := range skills { + result[i] = Skill{ + RecordCID: recordCID, + SkillID: skill.GetID(), + Name: skill.GetName(), + } + } + + return result +} diff --git a/server/database/sqlite/sqlite.go b/server/database/sqlite/sqlite.go index 52ceafbdb..b9e675e9b 100644 --- a/server/database/sqlite/sqlite.go +++ b/server/database/sqlite/sqlite.go @@ -1,94 +1,94 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package sqlite - -import ( - "context" - "fmt" - "log" - "os" - "time" - - "github.com/agntcy/dir/utils/logging" - "github.com/glebarez/sqlite" - "gorm.io/gorm" - gormlogger "gorm.io/gorm/logger" -) - -var logger = logging.Logger("database/sqlite") - -type DB struct { - gormDB *gorm.DB -} - -func newCustomLogger() gormlogger.Interface { - // Create a custom logger configuration that ignores "record not found" errors - // since these are expected during normal operation (checking if records exist) - return gormlogger.New( - log.New(os.Stdout, "\r\n", log.LstdFlags), - gormlogger.Config{ - SlowThreshold: 200 * time.Millisecond, //nolint:mnd - LogLevel: gormlogger.Warn, - IgnoreRecordNotFoundError: true, - Colorful: true, - }, - ) -} - -func New(path string) (*DB, error) { - db, err := gorm.Open(sqlite.Open(path), &gorm.Config{ - Logger: newCustomLogger(), - }) - if err != nil { - return nil, fmt.Errorf("failed to connect to SQLite database: %w", err) - } - - // Migrate record-related schema - if err := db.AutoMigrate(Record{}, Locator{}, Skill{}, Module{}, Domain{}); err != nil { - return nil, fmt.Errorf("failed to migrate record schema: %w", err) - } - - // Migrate sync-related schema - if err := db.AutoMigrate(Sync{}); err != nil { - return nil, fmt.Errorf("failed to migrate sync schema: %w", err) - } - - // Migrate publication-related schema - if err := db.AutoMigrate(Publication{}); err != nil { - return nil, fmt.Errorf("failed to migrate publication schema: %w", err) - } - - return &DB{ - gormDB: db, - }, nil -} - -// IsReady checks if the database connection is ready to serve traffic. -// Returns true if the database connection is established and can execute queries. -func (d *DB) IsReady(ctx context.Context) bool { - if d.gormDB == nil { - logger.Debug("Database not ready: gormDB is nil") - - return false - } - - // Get the underlying SQL database - sqlDB, err := d.gormDB.DB() - if err != nil { - logger.Debug("Database not ready: failed to get SQL DB", "error", err) - - return false - } - - // Ping the database with context - if err := sqlDB.PingContext(ctx); err != nil { - logger.Debug("Database not ready: ping failed", "error", err) - - return false - } - - logger.Debug("Database ready") - - return true -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package sqlite + +import ( + "context" + "fmt" + "log" + "os" + "time" + + "github.com/agntcy/dir/utils/logging" + "github.com/glebarez/sqlite" + "gorm.io/gorm" + gormlogger "gorm.io/gorm/logger" +) + +var logger = logging.Logger("database/sqlite") + +type DB struct { + gormDB *gorm.DB +} + +func newCustomLogger() gormlogger.Interface { + // Create a custom logger configuration that ignores "record not found" errors + // since these are expected during normal operation (checking if records exist) + return gormlogger.New( + log.New(os.Stdout, "\r\n", log.LstdFlags), + gormlogger.Config{ + SlowThreshold: 200 * time.Millisecond, //nolint:mnd + LogLevel: gormlogger.Warn, + IgnoreRecordNotFoundError: true, + Colorful: true, + }, + ) +} + +func New(path string) (*DB, error) { + db, err := gorm.Open(sqlite.Open(path), &gorm.Config{ + Logger: newCustomLogger(), + }) + if err != nil { + return nil, fmt.Errorf("failed to connect to SQLite database: %w", err) + } + + // Migrate record-related schema + if err := db.AutoMigrate(Record{}, Locator{}, Skill{}, Module{}, Domain{}); err != nil { + return nil, fmt.Errorf("failed to migrate record schema: %w", err) + } + + // Migrate sync-related schema + if err := db.AutoMigrate(Sync{}); err != nil { + return nil, fmt.Errorf("failed to migrate sync schema: %w", err) + } + + // Migrate publication-related schema + if err := db.AutoMigrate(Publication{}); err != nil { + return nil, fmt.Errorf("failed to migrate publication schema: %w", err) + } + + return &DB{ + gormDB: db, + }, nil +} + +// IsReady checks if the database connection is ready to serve traffic. +// Returns true if the database connection is established and can execute queries. +func (d *DB) IsReady(ctx context.Context) bool { + if d.gormDB == nil { + logger.Debug("Database not ready: gormDB is nil") + + return false + } + + // Get the underlying SQL database + sqlDB, err := d.gormDB.DB() + if err != nil { + logger.Debug("Database not ready: failed to get SQL DB", "error", err) + + return false + } + + // Ping the database with context + if err := sqlDB.PingContext(ctx); err != nil { + logger.Debug("Database not ready: ping failed", "error", err) + + return false + } + + logger.Debug("Database ready") + + return true +} diff --git a/server/database/sqlite/sync.go b/server/database/sqlite/sync.go index 64f1fe0f6..fe19cd678 100644 --- a/server/database/sqlite/sync.go +++ b/server/database/sqlite/sync.go @@ -1,190 +1,190 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package sqlite - -import ( - "time" - - storev1 "github.com/agntcy/dir/api/store/v1" - "github.com/agntcy/dir/server/types" - "github.com/google/uuid" - "gorm.io/gorm" -) - -type Sync struct { - GormID uint `gorm:"primarykey"` - CreatedAt time.Time - UpdatedAt time.Time - ID string `gorm:"not null;index"` - RemoteDirectoryURL string `gorm:"not null"` - RemoteRegistryURL string `gorm:"not null"` - CIDs []string `gorm:"serializer:json;not null"` - Status storev1.SyncStatus `gorm:"not null"` -} - -func (sync *Sync) GetID() string { - return sync.ID -} - -func (sync *Sync) GetRemoteDirectoryURL() string { - return sync.RemoteDirectoryURL -} - -func (sync *Sync) GetRemoteRegistryURL() string { - return sync.RemoteRegistryURL -} - -func (sync *Sync) GetCIDs() []string { - return sync.CIDs -} - -func (sync *Sync) GetStatus() storev1.SyncStatus { - return sync.Status -} - -func (d *DB) CreateSync(remoteURL string, cids []string) (string, error) { - sync := &Sync{ - ID: uuid.NewString(), - RemoteDirectoryURL: remoteURL, - CIDs: cids, - Status: storev1.SyncStatus_SYNC_STATUS_PENDING, - } - - if err := d.gormDB.Create(sync).Error; err != nil { - return "", err - } - - logger.Debug("Added sync to SQLite database", "sync_id", sync.ID) - - return sync.ID, nil -} - -func (d *DB) GetSyncByID(syncID string) (types.SyncObject, error) { - var sync Sync - if err := d.gormDB.Where("id = ?", syncID).First(&sync).Error; err != nil { - return nil, err - } - - return &sync, nil -} - -func (d *DB) GetSyncs(offset, limit int) ([]types.SyncObject, error) { - var syncs []Sync - - query := d.gormDB.Offset(offset) - - // Only apply limit if it's greater than 0 - if limit > 0 { - query = query.Limit(limit) - } - - if err := query.Find(&syncs).Error; err != nil { - return nil, err - } - - // convert to types.SyncObject - syncObjects := make([]types.SyncObject, len(syncs)) - for i, sync := range syncs { - syncObjects[i] = &sync - } - - return syncObjects, nil -} - -func (d *DB) GetSyncsByStatus(status storev1.SyncStatus) ([]types.SyncObject, error) { - var syncs []Sync - if err := d.gormDB.Where("status = ?", status).Find(&syncs).Error; err != nil { - return nil, err - } - - // convert to types.SyncObject - syncObjects := make([]types.SyncObject, len(syncs)) - for i, sync := range syncs { - syncObjects[i] = &sync - } - - return syncObjects, nil -} - -func (d *DB) UpdateSyncStatus(syncID string, status storev1.SyncStatus) error { - syncObj, err := d.GetSyncByID(syncID) - if err != nil { - return err - } - - sync, ok := syncObj.(*Sync) - if !ok { - return gorm.ErrInvalidData - } - - sync.Status = status - - if err := d.gormDB.Save(sync).Error; err != nil { - return err - } - - logger.Debug("Updated sync in SQLite database", "sync_id", sync.GetID(), "status", sync.GetStatus()) - - return nil -} - -func (d *DB) UpdateSyncRemoteRegistry(syncID string, remoteRegistry string) error { - syncObj, err := d.GetSyncByID(syncID) - if err != nil { - return err - } - - sync, ok := syncObj.(*Sync) - if !ok { - return gorm.ErrInvalidData - } - - sync.RemoteRegistryURL = remoteRegistry - - if err := d.gormDB.Save(sync).Error; err != nil { - return err - } - - logger.Debug("Updated sync in SQLite database", "sync_id", sync.GetID(), "remote_registry", sync.GetRemoteRegistryURL()) - - return nil -} - -func (d *DB) GetSyncRemoteRegistry(syncID string) (string, error) { - syncObj, err := d.GetSyncByID(syncID) - if err != nil { - return "", err - } - - sync, ok := syncObj.(*Sync) - if !ok { - return "", gorm.ErrInvalidData - } - - return sync.GetRemoteRegistryURL(), nil -} - -func (d *DB) GetSyncCIDs(syncID string) ([]string, error) { - syncObj, err := d.GetSyncByID(syncID) - if err != nil { - return nil, err - } - - sync, ok := syncObj.(*Sync) - if !ok { - return nil, gorm.ErrInvalidData - } - - return sync.GetCIDs(), nil -} - -func (d *DB) DeleteSync(syncID string) error { - if err := d.gormDB.Where("id = ?", syncID).Delete(&Sync{}).Error; err != nil { - return err - } - - logger.Debug("Deleted sync from SQLite database", "sync_id", syncID) - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package sqlite + +import ( + "time" + + storev1 "github.com/agntcy/dir/api/store/v1" + "github.com/agntcy/dir/server/types" + "github.com/google/uuid" + "gorm.io/gorm" +) + +type Sync struct { + GormID uint `gorm:"primarykey"` + CreatedAt time.Time + UpdatedAt time.Time + ID string `gorm:"not null;index"` + RemoteDirectoryURL string `gorm:"not null"` + RemoteRegistryURL string `gorm:"not null"` + CIDs []string `gorm:"serializer:json;not null"` + Status storev1.SyncStatus `gorm:"not null"` +} + +func (sync *Sync) GetID() string { + return sync.ID +} + +func (sync *Sync) GetRemoteDirectoryURL() string { + return sync.RemoteDirectoryURL +} + +func (sync *Sync) GetRemoteRegistryURL() string { + return sync.RemoteRegistryURL +} + +func (sync *Sync) GetCIDs() []string { + return sync.CIDs +} + +func (sync *Sync) GetStatus() storev1.SyncStatus { + return sync.Status +} + +func (d *DB) CreateSync(remoteURL string, cids []string) (string, error) { + sync := &Sync{ + ID: uuid.NewString(), + RemoteDirectoryURL: remoteURL, + CIDs: cids, + Status: storev1.SyncStatus_SYNC_STATUS_PENDING, + } + + if err := d.gormDB.Create(sync).Error; err != nil { + return "", err + } + + logger.Debug("Added sync to SQLite database", "sync_id", sync.ID) + + return sync.ID, nil +} + +func (d *DB) GetSyncByID(syncID string) (types.SyncObject, error) { + var sync Sync + if err := d.gormDB.Where("id = ?", syncID).First(&sync).Error; err != nil { + return nil, err + } + + return &sync, nil +} + +func (d *DB) GetSyncs(offset, limit int) ([]types.SyncObject, error) { + var syncs []Sync + + query := d.gormDB.Offset(offset) + + // Only apply limit if it's greater than 0 + if limit > 0 { + query = query.Limit(limit) + } + + if err := query.Find(&syncs).Error; err != nil { + return nil, err + } + + // convert to types.SyncObject + syncObjects := make([]types.SyncObject, len(syncs)) + for i, sync := range syncs { + syncObjects[i] = &sync + } + + return syncObjects, nil +} + +func (d *DB) GetSyncsByStatus(status storev1.SyncStatus) ([]types.SyncObject, error) { + var syncs []Sync + if err := d.gormDB.Where("status = ?", status).Find(&syncs).Error; err != nil { + return nil, err + } + + // convert to types.SyncObject + syncObjects := make([]types.SyncObject, len(syncs)) + for i, sync := range syncs { + syncObjects[i] = &sync + } + + return syncObjects, nil +} + +func (d *DB) UpdateSyncStatus(syncID string, status storev1.SyncStatus) error { + syncObj, err := d.GetSyncByID(syncID) + if err != nil { + return err + } + + sync, ok := syncObj.(*Sync) + if !ok { + return gorm.ErrInvalidData + } + + sync.Status = status + + if err := d.gormDB.Save(sync).Error; err != nil { + return err + } + + logger.Debug("Updated sync in SQLite database", "sync_id", sync.GetID(), "status", sync.GetStatus()) + + return nil +} + +func (d *DB) UpdateSyncRemoteRegistry(syncID string, remoteRegistry string) error { + syncObj, err := d.GetSyncByID(syncID) + if err != nil { + return err + } + + sync, ok := syncObj.(*Sync) + if !ok { + return gorm.ErrInvalidData + } + + sync.RemoteRegistryURL = remoteRegistry + + if err := d.gormDB.Save(sync).Error; err != nil { + return err + } + + logger.Debug("Updated sync in SQLite database", "sync_id", sync.GetID(), "remote_registry", sync.GetRemoteRegistryURL()) + + return nil +} + +func (d *DB) GetSyncRemoteRegistry(syncID string) (string, error) { + syncObj, err := d.GetSyncByID(syncID) + if err != nil { + return "", err + } + + sync, ok := syncObj.(*Sync) + if !ok { + return "", gorm.ErrInvalidData + } + + return sync.GetRemoteRegistryURL(), nil +} + +func (d *DB) GetSyncCIDs(syncID string) ([]string, error) { + syncObj, err := d.GetSyncByID(syncID) + if err != nil { + return nil, err + } + + sync, ok := syncObj.(*Sync) + if !ok { + return nil, gorm.ErrInvalidData + } + + return sync.GetCIDs(), nil +} + +func (d *DB) DeleteSync(syncID string) error { + if err := d.gormDB.Where("id = ?", syncID).Delete(&Sync{}).Error; err != nil { + return err + } + + logger.Debug("Deleted sync from SQLite database", "sync_id", syncID) + + return nil +} diff --git a/server/database/utils/utils.go b/server/database/utils/utils.go index b60a8b009..aac938e84 100644 --- a/server/database/utils/utils.go +++ b/server/database/utils/utils.go @@ -1,201 +1,201 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import ( - "fmt" - "strconv" - "strings" - - searchv1 "github.com/agntcy/dir/api/search/v1" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/utils/logging" -) - -var logger = logging.Logger("database/utils") - -// ParseComparisonOperator parses a value that may have an operator prefix (>=, >, <=, <, =). -// Returns the operator and the actual value. If no operator prefix, returns empty operator. -func ParseComparisonOperator(value string) (string, string) { - // Check for two-character operators first - if strings.HasPrefix(value, ">=") { - return ">=", strings.TrimPrefix(value, ">=") - } - - if strings.HasPrefix(value, "<=") { - return "<=", strings.TrimPrefix(value, "<=") - } - - // Then single-character operators - if strings.HasPrefix(value, ">") { - return ">", strings.TrimPrefix(value, ">") - } - - if strings.HasPrefix(value, "<") { - return "<", strings.TrimPrefix(value, "<") - } - - if strings.HasPrefix(value, "=") { - return "=", strings.TrimPrefix(value, "=") - } - - // No operator prefix - return "", value -} - -// BuildComparisonConditions builds SQL conditions for values with comparison operators. -// Only values with operator prefixes (>=, >, <=, <, =) are processed as comparisons (AND logic). -// Values without operators are processed as wildcards (OR logic). -// If both are present, they are combined with OR. -func BuildComparisonConditions(column string, values []string) (string, []interface{}) { - if len(values) == 0 { - return "", nil - } - - var comparisonConditions []string - - var comparisonArgs []interface{} - - var wildcardValues []string - - // Separate comparison operators from regular values. - for _, value := range values { - op, actualValue := ParseComparisonOperator(value) - if op != "" { - comparisonConditions = append(comparisonConditions, fmt.Sprintf("%s %s ?", column, op)) - comparisonArgs = append(comparisonArgs, actualValue) - } else { - wildcardValues = append(wildcardValues, value) - } - } - - var allConditions []string - - var allArgs []interface{} - - // Comparison conditions are AND'd together (e.g., >= 1.0 AND < 2.0). - if len(comparisonConditions) > 0 { - allConditions = append(allConditions, "("+strings.Join(comparisonConditions, " AND ")+")") - allArgs = append(allArgs, comparisonArgs...) - } - - // Wildcard conditions are OR'd together - if len(wildcardValues) > 0 { - wildcardCondition, wildcardArgs := BuildWildcardCondition(column, wildcardValues) - if wildcardCondition != "" { - allConditions = append(allConditions, "("+wildcardCondition+")") - allArgs = append(allArgs, wildcardArgs...) - } - } - - if len(allConditions) == 0 { - return "", nil - } - - // If we have both comparison and wildcard, OR them together - return strings.Join(allConditions, " OR "), allArgs -} - -func QueryToFilters(queries []*searchv1.RecordQuery) ([]types.FilterOption, error) { //nolint:gocognit,cyclop - var options []types.FilterOption - - for _, query := range queries { - switch query.GetType() { - case searchv1.RecordQueryType_RECORD_QUERY_TYPE_UNSPECIFIED: - logger.Warn("Unspecified query type, skipping", "query", query) - - case searchv1.RecordQueryType_RECORD_QUERY_TYPE_NAME: - options = append(options, types.WithNames(query.GetValue())) - - case searchv1.RecordQueryType_RECORD_QUERY_TYPE_VERSION: - options = append(options, types.WithVersions(query.GetValue())) - - case searchv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL_ID: - u64, err := strconv.ParseUint(query.GetValue(), 10, 64) - if err != nil { - return nil, fmt.Errorf("failed to parse skill ID %q: %w", query.GetValue(), err) - } - - options = append(options, types.WithSkillIDs(u64)) - - case searchv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL_NAME: - options = append(options, types.WithSkillNames(query.GetValue())) - - case searchv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR: - l := strings.SplitN(query.GetValue(), ":", 2) //nolint:mnd - - // If the type starts with a wildcard, treat it as a URL pattern - // Example: "*marketing-strategy" - if len(l) == 1 && strings.HasPrefix(l[0], "*") { - options = append(options, types.WithLocatorURLs(l[0])) - - break - } - - if len(l) == 1 && strings.TrimSpace(l[0]) != "" { - options = append(options, types.WithLocatorTypes(l[0])) - - break - } - - // If the prefix is //, check if the part before : is a wildcard - // If it's a wildcard (like "*"), treat the whole thing as a URL pattern - // If it's not a wildcard (like "docker-image"), treat as type:url format - // Example: "*://ghcr.io/agntcy/marketing-strategy" -> pure URL pattern - if len(l) == 2 && strings.HasPrefix(l[1], "//") && strings.HasPrefix(l[0], "*") { - options = append(options, types.WithLocatorURLs(query.GetValue())) - - break - } - - if len(l) == 2 { //nolint:mnd - if strings.TrimSpace(l[0]) != "" { - options = append(options, types.WithLocatorTypes(l[0])) - } - - if strings.TrimSpace(l[1]) != "" { - options = append(options, types.WithLocatorURLs(l[1])) - } - } - - case searchv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE_NAME: - if strings.TrimSpace(query.GetValue()) != "" { - options = append(options, types.WithModuleNames(query.GetValue())) - } - - case searchv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_ID: - u64, err := strconv.ParseUint(query.GetValue(), 10, 64) - if err != nil { - return nil, fmt.Errorf("failed to parse domain ID %q: %w", query.GetValue(), err) - } - - options = append(options, types.WithDomainIDs(u64)) - - case searchv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_NAME: - options = append(options, types.WithDomainNames(query.GetValue())) - - case searchv1.RecordQueryType_RECORD_QUERY_TYPE_CREATED_AT: - options = append(options, types.WithCreatedAts(query.GetValue())) - - case searchv1.RecordQueryType_RECORD_QUERY_TYPE_AUTHOR: - options = append(options, types.WithAuthors(query.GetValue())) - - case searchv1.RecordQueryType_RECORD_QUERY_TYPE_SCHEMA_VERSION: - options = append(options, types.WithSchemaVersions(query.GetValue())) - - case searchv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE_ID: - u64, err := strconv.ParseUint(query.GetValue(), 10, 64) - if err != nil { - return nil, fmt.Errorf("failed to parse module ID %q: %w", query.GetValue(), err) - } - - options = append(options, types.WithModuleIDs(u64)) - - default: - logger.Warn("Unknown query type", "type", query.GetType()) - } - } - - return options, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "fmt" + "strconv" + "strings" + + searchv1 "github.com/agntcy/dir/api/search/v1" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/utils/logging" +) + +var logger = logging.Logger("database/utils") + +// ParseComparisonOperator parses a value that may have an operator prefix (>=, >, <=, <, =). +// Returns the operator and the actual value. If no operator prefix, returns empty operator. +func ParseComparisonOperator(value string) (string, string) { + // Check for two-character operators first + if strings.HasPrefix(value, ">=") { + return ">=", strings.TrimPrefix(value, ">=") + } + + if strings.HasPrefix(value, "<=") { + return "<=", strings.TrimPrefix(value, "<=") + } + + // Then single-character operators + if strings.HasPrefix(value, ">") { + return ">", strings.TrimPrefix(value, ">") + } + + if strings.HasPrefix(value, "<") { + return "<", strings.TrimPrefix(value, "<") + } + + if strings.HasPrefix(value, "=") { + return "=", strings.TrimPrefix(value, "=") + } + + // No operator prefix + return "", value +} + +// BuildComparisonConditions builds SQL conditions for values with comparison operators. +// Only values with operator prefixes (>=, >, <=, <, =) are processed as comparisons (AND logic). +// Values without operators are processed as wildcards (OR logic). +// If both are present, they are combined with OR. +func BuildComparisonConditions(column string, values []string) (string, []interface{}) { + if len(values) == 0 { + return "", nil + } + + var comparisonConditions []string + + var comparisonArgs []interface{} + + var wildcardValues []string + + // Separate comparison operators from regular values. + for _, value := range values { + op, actualValue := ParseComparisonOperator(value) + if op != "" { + comparisonConditions = append(comparisonConditions, fmt.Sprintf("%s %s ?", column, op)) + comparisonArgs = append(comparisonArgs, actualValue) + } else { + wildcardValues = append(wildcardValues, value) + } + } + + var allConditions []string + + var allArgs []interface{} + + // Comparison conditions are AND'd together (e.g., >= 1.0 AND < 2.0). + if len(comparisonConditions) > 0 { + allConditions = append(allConditions, "("+strings.Join(comparisonConditions, " AND ")+")") + allArgs = append(allArgs, comparisonArgs...) + } + + // Wildcard conditions are OR'd together + if len(wildcardValues) > 0 { + wildcardCondition, wildcardArgs := BuildWildcardCondition(column, wildcardValues) + if wildcardCondition != "" { + allConditions = append(allConditions, "("+wildcardCondition+")") + allArgs = append(allArgs, wildcardArgs...) + } + } + + if len(allConditions) == 0 { + return "", nil + } + + // If we have both comparison and wildcard, OR them together + return strings.Join(allConditions, " OR "), allArgs +} + +func QueryToFilters(queries []*searchv1.RecordQuery) ([]types.FilterOption, error) { //nolint:gocognit,cyclop + var options []types.FilterOption + + for _, query := range queries { + switch query.GetType() { + case searchv1.RecordQueryType_RECORD_QUERY_TYPE_UNSPECIFIED: + logger.Warn("Unspecified query type, skipping", "query", query) + + case searchv1.RecordQueryType_RECORD_QUERY_TYPE_NAME: + options = append(options, types.WithNames(query.GetValue())) + + case searchv1.RecordQueryType_RECORD_QUERY_TYPE_VERSION: + options = append(options, types.WithVersions(query.GetValue())) + + case searchv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL_ID: + u64, err := strconv.ParseUint(query.GetValue(), 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse skill ID %q: %w", query.GetValue(), err) + } + + options = append(options, types.WithSkillIDs(u64)) + + case searchv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL_NAME: + options = append(options, types.WithSkillNames(query.GetValue())) + + case searchv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR: + l := strings.SplitN(query.GetValue(), ":", 2) //nolint:mnd + + // If the type starts with a wildcard, treat it as a URL pattern + // Example: "*marketing-strategy" + if len(l) == 1 && strings.HasPrefix(l[0], "*") { + options = append(options, types.WithLocatorURLs(l[0])) + + break + } + + if len(l) == 1 && strings.TrimSpace(l[0]) != "" { + options = append(options, types.WithLocatorTypes(l[0])) + + break + } + + // If the prefix is //, check if the part before : is a wildcard + // If it's a wildcard (like "*"), treat the whole thing as a URL pattern + // If it's not a wildcard (like "docker-image"), treat as type:url format + // Example: "*://ghcr.io/agntcy/marketing-strategy" -> pure URL pattern + if len(l) == 2 && strings.HasPrefix(l[1], "//") && strings.HasPrefix(l[0], "*") { + options = append(options, types.WithLocatorURLs(query.GetValue())) + + break + } + + if len(l) == 2 { //nolint:mnd + if strings.TrimSpace(l[0]) != "" { + options = append(options, types.WithLocatorTypes(l[0])) + } + + if strings.TrimSpace(l[1]) != "" { + options = append(options, types.WithLocatorURLs(l[1])) + } + } + + case searchv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE_NAME: + if strings.TrimSpace(query.GetValue()) != "" { + options = append(options, types.WithModuleNames(query.GetValue())) + } + + case searchv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_ID: + u64, err := strconv.ParseUint(query.GetValue(), 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse domain ID %q: %w", query.GetValue(), err) + } + + options = append(options, types.WithDomainIDs(u64)) + + case searchv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN_NAME: + options = append(options, types.WithDomainNames(query.GetValue())) + + case searchv1.RecordQueryType_RECORD_QUERY_TYPE_CREATED_AT: + options = append(options, types.WithCreatedAts(query.GetValue())) + + case searchv1.RecordQueryType_RECORD_QUERY_TYPE_AUTHOR: + options = append(options, types.WithAuthors(query.GetValue())) + + case searchv1.RecordQueryType_RECORD_QUERY_TYPE_SCHEMA_VERSION: + options = append(options, types.WithSchemaVersions(query.GetValue())) + + case searchv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE_ID: + u64, err := strconv.ParseUint(query.GetValue(), 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse module ID %q: %w", query.GetValue(), err) + } + + options = append(options, types.WithModuleIDs(u64)) + + default: + logger.Warn("Unknown query type", "type", query.GetType()) + } + } + + return options, nil +} diff --git a/server/database/utils/wildcard.go b/server/database/utils/wildcard.go index b209d4934..9e2e2dc9f 100644 --- a/server/database/utils/wildcard.go +++ b/server/database/utils/wildcard.go @@ -1,55 +1,55 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import ( - "strings" -) - -// ContainsWildcards checks if a pattern contains wildcard characters (* or ? or []). -func ContainsWildcards(pattern string) bool { - return strings.Contains(pattern, "*") || strings.Contains(pattern, "?") || containsListWildcard(pattern) -} - -// containsListWildcard checks if a pattern contains list wildcard characters []. -func containsListWildcard(pattern string) bool { - openIdx := strings.Index(pattern, "[") - closeIdx := strings.Index(pattern, "]") - - return openIdx != -1 && closeIdx > openIdx -} - -// BuildWildcardCondition builds a WHERE condition for wildcard or exact matching. -// Returns the condition string and arguments for the WHERE clause. -func BuildWildcardCondition(field string, patterns []string) (string, []interface{}) { - if len(patterns) == 0 { - return "", nil - } - - conditions := make([]string, 0, len(patterns)) - args := make([]interface{}, 0, len(patterns)) - - for _, pattern := range patterns { - condition, arg := BuildSingleWildcardCondition(field, pattern) - conditions = append(conditions, condition) - args = append(args, arg) - } - - condition := strings.Join(conditions, " OR ") - if len(conditions) > 1 { - condition = "(" + condition + ")" - } - - return condition, args -} - -// BuildSingleWildcardCondition builds a WHERE condition for a single field with wildcard or exact matching. -// Returns the condition string and argument for the WHERE clause. -func BuildSingleWildcardCondition(field, pattern string) (string, string) { - if ContainsWildcards(pattern) { - return "LOWER(" + field + ") GLOB ?", strings.ToLower(pattern) - } - - return "LOWER(" + field + ") = ?", strings.ToLower(pattern) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "strings" +) + +// ContainsWildcards checks if a pattern contains wildcard characters (* or ? or []). +func ContainsWildcards(pattern string) bool { + return strings.Contains(pattern, "*") || strings.Contains(pattern, "?") || containsListWildcard(pattern) +} + +// containsListWildcard checks if a pattern contains list wildcard characters []. +func containsListWildcard(pattern string) bool { + openIdx := strings.Index(pattern, "[") + closeIdx := strings.Index(pattern, "]") + + return openIdx != -1 && closeIdx > openIdx +} + +// BuildWildcardCondition builds a WHERE condition for wildcard or exact matching. +// Returns the condition string and arguments for the WHERE clause. +func BuildWildcardCondition(field string, patterns []string) (string, []interface{}) { + if len(patterns) == 0 { + return "", nil + } + + conditions := make([]string, 0, len(patterns)) + args := make([]interface{}, 0, len(patterns)) + + for _, pattern := range patterns { + condition, arg := BuildSingleWildcardCondition(field, pattern) + conditions = append(conditions, condition) + args = append(args, arg) + } + + condition := strings.Join(conditions, " OR ") + if len(conditions) > 1 { + condition = "(" + condition + ")" + } + + return condition, args +} + +// BuildSingleWildcardCondition builds a WHERE condition for a single field with wildcard or exact matching. +// Returns the condition string and argument for the WHERE clause. +func BuildSingleWildcardCondition(field, pattern string) (string, string) { + if ContainsWildcards(pattern) { + return "LOWER(" + field + ") GLOB ?", strings.ToLower(pattern) + } + + return "LOWER(" + field + ") = ?", strings.ToLower(pattern) +} diff --git a/server/database/utils/wildcard_test.go b/server/database/utils/wildcard_test.go index abe873a2e..13ff8fae2 100644 --- a/server/database/utils/wildcard_test.go +++ b/server/database/utils/wildcard_test.go @@ -1,697 +1,697 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package utils - -import ( - "reflect" - "testing" -) - -func TestContainsWildcards(t *testing.T) { - tests := []struct { - name string - pattern string - expected bool - }{ - { - name: "no wildcards", - pattern: "simple", - expected: false, - }, - { - name: "single asterisk", - pattern: "test*", - expected: true, - }, - { - name: "asterisk at beginning", - pattern: "*test", - expected: true, - }, - { - name: "asterisk in middle", - pattern: "te*st", - expected: true, - }, - { - name: "multiple asterisks", - pattern: "*test*", - expected: true, - }, - { - name: "question mark (wildcard in GLOB)", - pattern: "test?", - expected: true, - }, - { - name: "mixed asterisk and question mark", - pattern: "test*?", - expected: true, - }, - { - name: "empty string", - pattern: "", - expected: false, - }, - { - name: "only asterisk", - pattern: "*", - expected: true, - }, - { - name: "complex pattern", - pattern: "api-*-v2", - expected: true, - }, - { - name: "only question mark", - pattern: "?", - expected: true, - }, - { - name: "multiple question marks", - pattern: "test???", - expected: true, - }, - { - name: "question mark at beginning", - pattern: "?test", - expected: true, - }, - { - name: "question mark in middle", - pattern: "te?st", - expected: true, - }, - { - name: "question mark at end", - pattern: "test?", - expected: true, - }, - { - name: "complex pattern with both wildcards", - pattern: "api-*-v?.?", - expected: true, - }, - { - name: "list wildcard - simple character list", - pattern: "test[abc]", - expected: true, - }, - { - name: "list wildcard - numeric range", - pattern: "version[0-9]", - expected: true, - }, - { - name: "list wildcard - alpha range", - pattern: "file[a-z].txt", - expected: true, - }, - { - name: "list wildcard - negated range", - pattern: "data[^0-9]", - expected: true, - }, - { - name: "list wildcard - alphanumeric range", - pattern: "id[a-zA-Z0-9]", - expected: true, - }, - { - name: "list wildcard - multiple in pattern", - pattern: "test[abc][123]", - expected: true, - }, - { - name: "list wildcard - with other wildcards", - pattern: "test[abc]*?.txt", - expected: true, - }, - { - name: "incomplete list wildcard - no closing bracket", - pattern: "test[abc", - expected: false, - }, - { - name: "incomplete list wildcard - no opening bracket", - pattern: "testabc]", - expected: false, - }, - { - name: "empty list wildcard", - pattern: "test[]", - expected: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := ContainsWildcards(tt.pattern) - if result != tt.expected { - t.Errorf("ContainsWildcards(%q) = %v, want %v", tt.pattern, result, tt.expected) - } - }) - } -} - -func TestBuildSingleWildcardCondition(t *testing.T) { - tests := []struct { - name string - field string - pattern string - expectedCondition string - expectedArg interface{} - }{ - { - name: "exact match", - field: "name", - pattern: "Test", - expectedCondition: "LOWER(name) = ?", - expectedArg: "test", - }, - { - name: "wildcard with asterisk", - field: "name", - pattern: "Test*", - expectedCondition: "LOWER(name) GLOB ?", - expectedArg: "test*", - }, - { - name: "wildcard with question mark", - field: "version", - pattern: "V1.?", - expectedCondition: "LOWER(version) GLOB ?", - expectedArg: "v1.?", - }, - { - name: "complex field name", - field: "skills.name", - pattern: "*Script", - expectedCondition: "LOWER(skills.name) GLOB ?", - expectedArg: "*script", - }, - { - name: "wildcard with mixed asterisk and question mark", - field: "name", - pattern: "Test*?.txt", - expectedCondition: "LOWER(name) GLOB ?", - expectedArg: "test*?.txt", - }, - { - name: "multiple question marks", - field: "code", - pattern: "AB??-XY?", - expectedCondition: "LOWER(code) GLOB ?", - expectedArg: "ab??-xy?", - }, - { - name: "list wildcard - simple character list", - field: "type", - pattern: "Test[ABC]", - expectedCondition: "LOWER(type) GLOB ?", - expectedArg: "test[abc]", - }, - { - name: "list wildcard - numeric range", - field: "version", - pattern: "V[0-9].0.0", - expectedCondition: "LOWER(version) GLOB ?", - expectedArg: "v[0-9].0.0", - }, - { - name: "list wildcard - alpha range", - field: "filename", - pattern: "File[A-Z].txt", - expectedCondition: "LOWER(filename) GLOB ?", - expectedArg: "file[a-z].txt", - }, - { - name: "list wildcard - negated range", - field: "code", - pattern: "Data[^0-9]", - expectedCondition: "LOWER(code) GLOB ?", - expectedArg: "data[^0-9]", - }, - { - name: "list wildcard - mixed with other wildcards", - field: "path", - pattern: "Test[ABC]*?.log", - expectedCondition: "LOWER(path) GLOB ?", - expectedArg: "test[abc]*?.log", - }, - { - name: "empty pattern", - field: "name", - pattern: "", - expectedCondition: "LOWER(name) = ?", - expectedArg: "", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - condition, arg := BuildSingleWildcardCondition(tt.field, tt.pattern) - - if condition != tt.expectedCondition { - t.Errorf("BuildSingleWildcardCondition(%q, %q) condition = %q, want %q", - tt.field, tt.pattern, condition, tt.expectedCondition) - } - - if arg != tt.expectedArg { - t.Errorf("BuildSingleWildcardCondition(%q, %q) arg = %v, want %v", - tt.field, tt.pattern, arg, tt.expectedArg) - } - }) - } -} - -func TestBuildWildcardCondition(t *testing.T) { - tests := []struct { - name string - field string - patterns []string - expectedCondition string - expectedArgs []interface{} - }{ - { - name: "empty patterns", - field: "field", - patterns: []string{}, - expectedCondition: "", - expectedArgs: nil, - }, - { - name: "single exact pattern", - field: "name", - patterns: []string{"Test"}, - expectedCondition: "LOWER(name) = ?", - expectedArgs: []interface{}{"test"}, - }, - { - name: "single wildcard pattern", - field: "name", - patterns: []string{"Test*"}, - expectedCondition: "LOWER(name) GLOB ?", - expectedArgs: []interface{}{"test*"}, - }, - { - name: "multiple exact patterns", - field: "name", - patterns: []string{"Test1", "Test2"}, - expectedCondition: "(LOWER(name) = ? OR LOWER(name) = ?)", - expectedArgs: []interface{}{"test1", "test2"}, - }, - { - name: "multiple wildcard patterns", - field: "name", - patterns: []string{"Test*", "*Service"}, - expectedCondition: "(LOWER(name) GLOB ? OR LOWER(name) GLOB ?)", - expectedArgs: []interface{}{"test*", "*service"}, - }, - { - name: "mixed exact and wildcard patterns", - field: "name", - patterns: []string{"Python*", "Go", "Java*"}, - expectedCondition: "(LOWER(name) GLOB ? OR LOWER(name) = ? OR LOWER(name) GLOB ?)", - expectedArgs: []interface{}{"python*", "go", "java*"}, - }, - { - name: "single pattern no parentheses", - field: "version", - patterns: []string{"V1.*"}, - expectedCondition: "LOWER(version) GLOB ?", - expectedArgs: []interface{}{"v1.*"}, - }, - { - name: "complex field name", - field: "skills.name", - patterns: []string{"*Script"}, - expectedCondition: "LOWER(skills.name) GLOB ?", - expectedArgs: []interface{}{"*script"}, - }, - { - name: "pattern with special chars (literal in GLOB)", - field: "name", - patterns: []string{"Test%_*"}, - expectedCondition: "LOWER(name) GLOB ?", - expectedArgs: []interface{}{"test%_*"}, - }, - { - name: "question mark as wildcard in GLOB", - field: "name", - patterns: []string{"Test?", "Pattern*"}, - expectedCondition: "(LOWER(name) GLOB ? OR LOWER(name) GLOB ?)", - expectedArgs: []interface{}{"test?", "pattern*"}, - }, - { - name: "multiple question marks in single pattern", - field: "version", - patterns: []string{"v?.?.?"}, - expectedCondition: "LOWER(version) GLOB ?", - expectedArgs: []interface{}{"v?.?.?"}, - }, - { - name: "mixed patterns with question marks", - field: "code", - patterns: []string{"AB??", "CD*", "EF", "GH?I"}, - expectedCondition: "(LOWER(code) GLOB ? OR LOWER(code) GLOB ? OR LOWER(code) = ? OR LOWER(code) GLOB ?)", - expectedArgs: []interface{}{"ab??", "cd*", "ef", "gh?i"}, - }, - { - name: "question mark with special characters", - field: "filename", - patterns: []string{"test?.txt", "data_?.csv"}, - expectedCondition: "(LOWER(filename) GLOB ? OR LOWER(filename) GLOB ?)", - expectedArgs: []interface{}{"test?.txt", "data_?.csv"}, - }, - { - name: "list wildcard - simple character lists", - field: "type", - patterns: []string{"Test[ABC]", "Data[XYZ]"}, - expectedCondition: "(LOWER(type) GLOB ? OR LOWER(type) GLOB ?)", - expectedArgs: []interface{}{"test[abc]", "data[xyz]"}, - }, - { - name: "list wildcard - numeric ranges", - field: "version", - patterns: []string{"V[0-9].0.0"}, - expectedCondition: "LOWER(version) GLOB ?", - expectedArgs: []interface{}{"v[0-9].0.0"}, - }, - { - name: "list wildcard - mixed with other patterns", - field: "filename", - patterns: []string{"File[A-Z].txt", "exact.log", "data*.csv"}, - expectedCondition: "(LOWER(filename) GLOB ? OR LOWER(filename) = ? OR LOWER(filename) GLOB ?)", - expectedArgs: []interface{}{"file[a-z].txt", "exact.log", "data*.csv"}, - }, - { - name: "list wildcard - negated ranges", - field: "code", - patterns: []string{"Data[^0-9]", "Test[^A-Z]"}, - expectedCondition: "(LOWER(code) GLOB ? OR LOWER(code) GLOB ?)", - expectedArgs: []interface{}{"data[^0-9]", "test[^a-z]"}, - }, - { - name: "list wildcard - complex combinations", - field: "path", - patterns: []string{"Log[0-9][A-Z]*", "File[abc]?.txt"}, - expectedCondition: "(LOWER(path) GLOB ? OR LOWER(path) GLOB ?)", - expectedArgs: []interface{}{"log[0-9][a-z]*", "file[abc]?.txt"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - condition, args := BuildWildcardCondition(tt.field, tt.patterns) - - if condition != tt.expectedCondition { - t.Errorf("BuildWildcardCondition(%q, %v) condition = %q, want %q", - tt.field, tt.patterns, condition, tt.expectedCondition) - } - - if !reflect.DeepEqual(args, tt.expectedArgs) { - t.Errorf("BuildWildcardCondition(%q, %v) args = %v, want %v", - tt.field, tt.patterns, args, tt.expectedArgs) - } - }) - } -} - -func TestWildcardIntegration(t *testing.T) { - // Test the integration of all functions together - tests := []struct { - name string - field string - patterns []string - expectedCondition string - expectedArgs []interface{} - }{ - { - name: "real world example - skill names", - field: "skills.name", - patterns: []string{"Python*", "JavaScript", "*Script", "Go"}, - expectedCondition: "(LOWER(skills.name) GLOB ? OR LOWER(skills.name) = ? OR LOWER(skills.name) GLOB ? OR LOWER(skills.name) = ?)", - expectedArgs: []interface{}{"python*", "javascript", "*script", "go"}, - }, - { - name: "real world example - locator types", - field: "locators.type", - patterns: []string{"HTTP*", "FTP*", "File"}, - expectedCondition: "(LOWER(locators.type) GLOB ? OR LOWER(locators.type) GLOB ? OR LOWER(locators.type) = ?)", - expectedArgs: []interface{}{"http*", "ftp*", "file"}, - }, - { - name: "real world example - extension names", - field: "extensions.name", - patterns: []string{"*-Plugin", "*-Extension", "Core"}, - expectedCondition: "(LOWER(extensions.name) GLOB ? OR LOWER(extensions.name) GLOB ? OR LOWER(extensions.name) = ?)", - expectedArgs: []interface{}{"*-plugin", "*-extension", "core"}, - }, - { - name: "real world example - version patterns with question marks", - field: "version", - patterns: []string{"v?.0.0", "v1.?.?", "v2.*"}, - expectedCondition: "(LOWER(version) GLOB ? OR LOWER(version) GLOB ? OR LOWER(version) GLOB ?)", - expectedArgs: []interface{}{"v?.0.0", "v1.?.?", "v2.*"}, - }, - { - name: "real world example - file extensions with question marks", - field: "filename", - patterns: []string{"*.tx?", "data_?.csv", "log???.txt"}, - expectedCondition: "(LOWER(filename) GLOB ? OR LOWER(filename) GLOB ? OR LOWER(filename) GLOB ?)", - expectedArgs: []interface{}{"*.tx?", "data_?.csv", "log???.txt"}, - }, - { - name: "real world example - version patterns with list wildcards", - field: "version", - patterns: []string{"v[0-9].0.0", "v[1-3].*", "beta[a-z]"}, - expectedCondition: "(LOWER(version) GLOB ? OR LOWER(version) GLOB ? OR LOWER(version) GLOB ?)", - expectedArgs: []interface{}{"v[0-9].0.0", "v[1-3].*", "beta[a-z]"}, - }, - { - name: "real world example - file types with list wildcards", - field: "filename", - patterns: []string{"*.tx[tx]", "data[0-9].csv", "log[^0-9]*"}, - expectedCondition: "(LOWER(filename) GLOB ? OR LOWER(filename) GLOB ? OR LOWER(filename) GLOB ?)", - expectedArgs: []interface{}{"*.tx[tx]", "data[0-9].csv", "log[^0-9]*"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - condition, args := BuildWildcardCondition(tt.field, tt.patterns) - - if condition != tt.expectedCondition { - t.Errorf("Integration test %q: condition = %q, want %q", - tt.name, condition, tt.expectedCondition) - } - - if !reflect.DeepEqual(args, tt.expectedArgs) { - t.Errorf("Integration test %q: args = %v, want %v", - tt.name, args, tt.expectedArgs) - } - }) - } -} - -func TestQuestionMarkWildcardFunctionality(t *testing.T) { - tests := []struct { - name string - field string - patterns []string - expectedCondition string - expectedArgs []interface{} - description string - }{ - { - name: "single character replacement", - field: "code", - patterns: []string{"A?C"}, - expectedCondition: "LOWER(code) GLOB ?", - expectedArgs: []interface{}{"a?c"}, - description: "? should match exactly one character", - }, - { - name: "multiple single character replacements", - field: "serial", - patterns: []string{"AB??EF"}, - expectedCondition: "LOWER(serial) GLOB ?", - expectedArgs: []interface{}{"ab??ef"}, - description: "Multiple ? should each match one character", - }, - { - name: "question mark with asterisk combination", - field: "filename", - patterns: []string{"*.tx?", "data*.?sv"}, - expectedCondition: "(LOWER(filename) GLOB ? OR LOWER(filename) GLOB ?)", - expectedArgs: []interface{}{"*.tx?", "data*.?sv"}, - description: "? and * should work together", - }, - { - name: "question mark in version patterns", - field: "version", - patterns: []string{"v1.?.0", "v?.0.0"}, - expectedCondition: "(LOWER(version) GLOB ? OR LOWER(version) GLOB ?)", - expectedArgs: []interface{}{"v1.?.0", "v?.0.0"}, - description: "? useful for version number wildcards", - }, - { - name: "question mark with exact matches", - field: "type", - patterns: []string{"A?B", "exact", "C?D"}, - expectedCondition: "(LOWER(type) GLOB ? OR LOWER(type) = ? OR LOWER(type) GLOB ?)", - expectedArgs: []interface{}{"a?b", "exact", "c?d"}, - description: "Mix of ? wildcards and exact matches", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - condition, args := BuildWildcardCondition(tt.field, tt.patterns) - - if condition != tt.expectedCondition { - t.Errorf("%s: condition = %q, want %q", tt.description, condition, tt.expectedCondition) - } - - if !reflect.DeepEqual(args, tt.expectedArgs) { - t.Errorf("%s: args = %v, want %v", tt.description, args, tt.expectedArgs) - } - }) - } -} - -func TestListWildcardFunctionality(t *testing.T) { - tests := []struct { - name string - field string - patterns []string - expectedCondition string - expectedArgs []interface{} - description string - }{ - { - name: "simple character list", - field: "type", - patterns: []string{"Test[ABC]"}, - expectedCondition: "LOWER(type) GLOB ?", - expectedArgs: []interface{}{"test[abc]"}, - description: "[ABC] should match exactly one of A, B, or C", - }, - { - name: "numeric range", - field: "version", - patterns: []string{"v[0-9].0.0"}, - expectedCondition: "LOWER(version) GLOB ?", - expectedArgs: []interface{}{"v[0-9].0.0"}, - description: "[0-9] should match any single digit", - }, - { - name: "alphabetic range", - field: "grade", - patterns: []string{"Grade[A-F]"}, - expectedCondition: "LOWER(grade) GLOB ?", - expectedArgs: []interface{}{"grade[a-f]"}, - description: "[A-F] should match any letter from A to F", - }, - { - name: "negated character class", - field: "code", - patterns: []string{"Data[^0-9]"}, - expectedCondition: "LOWER(code) GLOB ?", - expectedArgs: []interface{}{"data[^0-9]"}, - description: "[^0-9] should match any character except digits", - }, - { - name: "mixed alphanumeric range", - field: "id", - patterns: []string{"ID[a-zA-Z0-9]"}, - expectedCondition: "LOWER(id) GLOB ?", - expectedArgs: []interface{}{"id[a-za-z0-9]"}, - description: "[a-zA-Z0-9] should match any alphanumeric character", - }, - { - name: "multiple list wildcards", - field: "code", - patterns: []string{"Test[ABC][123]"}, - expectedCondition: "LOWER(code) GLOB ?", - expectedArgs: []interface{}{"test[abc][123]"}, - description: "Multiple list wildcards should work together", - }, - { - name: "list wildcard with other wildcards", - field: "filename", - patterns: []string{"File[0-9]*?.log"}, - expectedCondition: "LOWER(filename) GLOB ?", - expectedArgs: []interface{}{"file[0-9]*?.log"}, - description: "List wildcards should work with * and ? wildcards", - }, - { - name: "mixed exact and list wildcard patterns", - field: "type", - patterns: []string{"Test[ABC]", "exact", "Data[XYZ]"}, - expectedCondition: "(LOWER(type) GLOB ? OR LOWER(type) = ? OR LOWER(type) GLOB ?)", - expectedArgs: []interface{}{"test[abc]", "exact", "data[xyz]"}, - description: "Mix of list wildcards and exact matches", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - condition, args := BuildWildcardCondition(tt.field, tt.patterns) - - if condition != tt.expectedCondition { - t.Errorf("%s: condition = %q, want %q", tt.description, condition, tt.expectedCondition) - } - - if !reflect.DeepEqual(args, tt.expectedArgs) { - t.Errorf("%s: args = %v, want %v", tt.description, args, tt.expectedArgs) - } - }) - } -} - -// Benchmark tests to ensure performance is acceptable. -func BenchmarkContainsWildcards(b *testing.B) { - patterns := []string{ - "simple", - "test*", - "*test", - "te*st", - "*test*", - "test?", - "?test", - "te?st", - "test???", - "*test?", - "?test*", - "test[abc]", - "version[0-9]", - "file[a-z].txt", - "data[^0-9]", - "id[a-zA-Z0-9]", - "test[abc][123]", - "complex-pattern-*-with-multiple-*-wildcards-and-?-marks-[0-9]", - } - - b.ResetTimer() - - for range b.N { - for _, pattern := range patterns { - ContainsWildcards(pattern) - } - } -} - -func BenchmarkBuildWildcardCondition(b *testing.B) { - patterns := []string{"Python*", "Go", "Java*", "*Script", "TypeScript"} - field := "skills.name" - - b.ResetTimer() - - for range b.N { - BuildWildcardCondition(field, patterns) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "reflect" + "testing" +) + +func TestContainsWildcards(t *testing.T) { + tests := []struct { + name string + pattern string + expected bool + }{ + { + name: "no wildcards", + pattern: "simple", + expected: false, + }, + { + name: "single asterisk", + pattern: "test*", + expected: true, + }, + { + name: "asterisk at beginning", + pattern: "*test", + expected: true, + }, + { + name: "asterisk in middle", + pattern: "te*st", + expected: true, + }, + { + name: "multiple asterisks", + pattern: "*test*", + expected: true, + }, + { + name: "question mark (wildcard in GLOB)", + pattern: "test?", + expected: true, + }, + { + name: "mixed asterisk and question mark", + pattern: "test*?", + expected: true, + }, + { + name: "empty string", + pattern: "", + expected: false, + }, + { + name: "only asterisk", + pattern: "*", + expected: true, + }, + { + name: "complex pattern", + pattern: "api-*-v2", + expected: true, + }, + { + name: "only question mark", + pattern: "?", + expected: true, + }, + { + name: "multiple question marks", + pattern: "test???", + expected: true, + }, + { + name: "question mark at beginning", + pattern: "?test", + expected: true, + }, + { + name: "question mark in middle", + pattern: "te?st", + expected: true, + }, + { + name: "question mark at end", + pattern: "test?", + expected: true, + }, + { + name: "complex pattern with both wildcards", + pattern: "api-*-v?.?", + expected: true, + }, + { + name: "list wildcard - simple character list", + pattern: "test[abc]", + expected: true, + }, + { + name: "list wildcard - numeric range", + pattern: "version[0-9]", + expected: true, + }, + { + name: "list wildcard - alpha range", + pattern: "file[a-z].txt", + expected: true, + }, + { + name: "list wildcard - negated range", + pattern: "data[^0-9]", + expected: true, + }, + { + name: "list wildcard - alphanumeric range", + pattern: "id[a-zA-Z0-9]", + expected: true, + }, + { + name: "list wildcard - multiple in pattern", + pattern: "test[abc][123]", + expected: true, + }, + { + name: "list wildcard - with other wildcards", + pattern: "test[abc]*?.txt", + expected: true, + }, + { + name: "incomplete list wildcard - no closing bracket", + pattern: "test[abc", + expected: false, + }, + { + name: "incomplete list wildcard - no opening bracket", + pattern: "testabc]", + expected: false, + }, + { + name: "empty list wildcard", + pattern: "test[]", + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ContainsWildcards(tt.pattern) + if result != tt.expected { + t.Errorf("ContainsWildcards(%q) = %v, want %v", tt.pattern, result, tt.expected) + } + }) + } +} + +func TestBuildSingleWildcardCondition(t *testing.T) { + tests := []struct { + name string + field string + pattern string + expectedCondition string + expectedArg interface{} + }{ + { + name: "exact match", + field: "name", + pattern: "Test", + expectedCondition: "LOWER(name) = ?", + expectedArg: "test", + }, + { + name: "wildcard with asterisk", + field: "name", + pattern: "Test*", + expectedCondition: "LOWER(name) GLOB ?", + expectedArg: "test*", + }, + { + name: "wildcard with question mark", + field: "version", + pattern: "V1.?", + expectedCondition: "LOWER(version) GLOB ?", + expectedArg: "v1.?", + }, + { + name: "complex field name", + field: "skills.name", + pattern: "*Script", + expectedCondition: "LOWER(skills.name) GLOB ?", + expectedArg: "*script", + }, + { + name: "wildcard with mixed asterisk and question mark", + field: "name", + pattern: "Test*?.txt", + expectedCondition: "LOWER(name) GLOB ?", + expectedArg: "test*?.txt", + }, + { + name: "multiple question marks", + field: "code", + pattern: "AB??-XY?", + expectedCondition: "LOWER(code) GLOB ?", + expectedArg: "ab??-xy?", + }, + { + name: "list wildcard - simple character list", + field: "type", + pattern: "Test[ABC]", + expectedCondition: "LOWER(type) GLOB ?", + expectedArg: "test[abc]", + }, + { + name: "list wildcard - numeric range", + field: "version", + pattern: "V[0-9].0.0", + expectedCondition: "LOWER(version) GLOB ?", + expectedArg: "v[0-9].0.0", + }, + { + name: "list wildcard - alpha range", + field: "filename", + pattern: "File[A-Z].txt", + expectedCondition: "LOWER(filename) GLOB ?", + expectedArg: "file[a-z].txt", + }, + { + name: "list wildcard - negated range", + field: "code", + pattern: "Data[^0-9]", + expectedCondition: "LOWER(code) GLOB ?", + expectedArg: "data[^0-9]", + }, + { + name: "list wildcard - mixed with other wildcards", + field: "path", + pattern: "Test[ABC]*?.log", + expectedCondition: "LOWER(path) GLOB ?", + expectedArg: "test[abc]*?.log", + }, + { + name: "empty pattern", + field: "name", + pattern: "", + expectedCondition: "LOWER(name) = ?", + expectedArg: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + condition, arg := BuildSingleWildcardCondition(tt.field, tt.pattern) + + if condition != tt.expectedCondition { + t.Errorf("BuildSingleWildcardCondition(%q, %q) condition = %q, want %q", + tt.field, tt.pattern, condition, tt.expectedCondition) + } + + if arg != tt.expectedArg { + t.Errorf("BuildSingleWildcardCondition(%q, %q) arg = %v, want %v", + tt.field, tt.pattern, arg, tt.expectedArg) + } + }) + } +} + +func TestBuildWildcardCondition(t *testing.T) { + tests := []struct { + name string + field string + patterns []string + expectedCondition string + expectedArgs []interface{} + }{ + { + name: "empty patterns", + field: "field", + patterns: []string{}, + expectedCondition: "", + expectedArgs: nil, + }, + { + name: "single exact pattern", + field: "name", + patterns: []string{"Test"}, + expectedCondition: "LOWER(name) = ?", + expectedArgs: []interface{}{"test"}, + }, + { + name: "single wildcard pattern", + field: "name", + patterns: []string{"Test*"}, + expectedCondition: "LOWER(name) GLOB ?", + expectedArgs: []interface{}{"test*"}, + }, + { + name: "multiple exact patterns", + field: "name", + patterns: []string{"Test1", "Test2"}, + expectedCondition: "(LOWER(name) = ? OR LOWER(name) = ?)", + expectedArgs: []interface{}{"test1", "test2"}, + }, + { + name: "multiple wildcard patterns", + field: "name", + patterns: []string{"Test*", "*Service"}, + expectedCondition: "(LOWER(name) GLOB ? OR LOWER(name) GLOB ?)", + expectedArgs: []interface{}{"test*", "*service"}, + }, + { + name: "mixed exact and wildcard patterns", + field: "name", + patterns: []string{"Python*", "Go", "Java*"}, + expectedCondition: "(LOWER(name) GLOB ? OR LOWER(name) = ? OR LOWER(name) GLOB ?)", + expectedArgs: []interface{}{"python*", "go", "java*"}, + }, + { + name: "single pattern no parentheses", + field: "version", + patterns: []string{"V1.*"}, + expectedCondition: "LOWER(version) GLOB ?", + expectedArgs: []interface{}{"v1.*"}, + }, + { + name: "complex field name", + field: "skills.name", + patterns: []string{"*Script"}, + expectedCondition: "LOWER(skills.name) GLOB ?", + expectedArgs: []interface{}{"*script"}, + }, + { + name: "pattern with special chars (literal in GLOB)", + field: "name", + patterns: []string{"Test%_*"}, + expectedCondition: "LOWER(name) GLOB ?", + expectedArgs: []interface{}{"test%_*"}, + }, + { + name: "question mark as wildcard in GLOB", + field: "name", + patterns: []string{"Test?", "Pattern*"}, + expectedCondition: "(LOWER(name) GLOB ? OR LOWER(name) GLOB ?)", + expectedArgs: []interface{}{"test?", "pattern*"}, + }, + { + name: "multiple question marks in single pattern", + field: "version", + patterns: []string{"v?.?.?"}, + expectedCondition: "LOWER(version) GLOB ?", + expectedArgs: []interface{}{"v?.?.?"}, + }, + { + name: "mixed patterns with question marks", + field: "code", + patterns: []string{"AB??", "CD*", "EF", "GH?I"}, + expectedCondition: "(LOWER(code) GLOB ? OR LOWER(code) GLOB ? OR LOWER(code) = ? OR LOWER(code) GLOB ?)", + expectedArgs: []interface{}{"ab??", "cd*", "ef", "gh?i"}, + }, + { + name: "question mark with special characters", + field: "filename", + patterns: []string{"test?.txt", "data_?.csv"}, + expectedCondition: "(LOWER(filename) GLOB ? OR LOWER(filename) GLOB ?)", + expectedArgs: []interface{}{"test?.txt", "data_?.csv"}, + }, + { + name: "list wildcard - simple character lists", + field: "type", + patterns: []string{"Test[ABC]", "Data[XYZ]"}, + expectedCondition: "(LOWER(type) GLOB ? OR LOWER(type) GLOB ?)", + expectedArgs: []interface{}{"test[abc]", "data[xyz]"}, + }, + { + name: "list wildcard - numeric ranges", + field: "version", + patterns: []string{"V[0-9].0.0"}, + expectedCondition: "LOWER(version) GLOB ?", + expectedArgs: []interface{}{"v[0-9].0.0"}, + }, + { + name: "list wildcard - mixed with other patterns", + field: "filename", + patterns: []string{"File[A-Z].txt", "exact.log", "data*.csv"}, + expectedCondition: "(LOWER(filename) GLOB ? OR LOWER(filename) = ? OR LOWER(filename) GLOB ?)", + expectedArgs: []interface{}{"file[a-z].txt", "exact.log", "data*.csv"}, + }, + { + name: "list wildcard - negated ranges", + field: "code", + patterns: []string{"Data[^0-9]", "Test[^A-Z]"}, + expectedCondition: "(LOWER(code) GLOB ? OR LOWER(code) GLOB ?)", + expectedArgs: []interface{}{"data[^0-9]", "test[^a-z]"}, + }, + { + name: "list wildcard - complex combinations", + field: "path", + patterns: []string{"Log[0-9][A-Z]*", "File[abc]?.txt"}, + expectedCondition: "(LOWER(path) GLOB ? OR LOWER(path) GLOB ?)", + expectedArgs: []interface{}{"log[0-9][a-z]*", "file[abc]?.txt"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + condition, args := BuildWildcardCondition(tt.field, tt.patterns) + + if condition != tt.expectedCondition { + t.Errorf("BuildWildcardCondition(%q, %v) condition = %q, want %q", + tt.field, tt.patterns, condition, tt.expectedCondition) + } + + if !reflect.DeepEqual(args, tt.expectedArgs) { + t.Errorf("BuildWildcardCondition(%q, %v) args = %v, want %v", + tt.field, tt.patterns, args, tt.expectedArgs) + } + }) + } +} + +func TestWildcardIntegration(t *testing.T) { + // Test the integration of all functions together + tests := []struct { + name string + field string + patterns []string + expectedCondition string + expectedArgs []interface{} + }{ + { + name: "real world example - skill names", + field: "skills.name", + patterns: []string{"Python*", "JavaScript", "*Script", "Go"}, + expectedCondition: "(LOWER(skills.name) GLOB ? OR LOWER(skills.name) = ? OR LOWER(skills.name) GLOB ? OR LOWER(skills.name) = ?)", + expectedArgs: []interface{}{"python*", "javascript", "*script", "go"}, + }, + { + name: "real world example - locator types", + field: "locators.type", + patterns: []string{"HTTP*", "FTP*", "File"}, + expectedCondition: "(LOWER(locators.type) GLOB ? OR LOWER(locators.type) GLOB ? OR LOWER(locators.type) = ?)", + expectedArgs: []interface{}{"http*", "ftp*", "file"}, + }, + { + name: "real world example - extension names", + field: "extensions.name", + patterns: []string{"*-Plugin", "*-Extension", "Core"}, + expectedCondition: "(LOWER(extensions.name) GLOB ? OR LOWER(extensions.name) GLOB ? OR LOWER(extensions.name) = ?)", + expectedArgs: []interface{}{"*-plugin", "*-extension", "core"}, + }, + { + name: "real world example - version patterns with question marks", + field: "version", + patterns: []string{"v?.0.0", "v1.?.?", "v2.*"}, + expectedCondition: "(LOWER(version) GLOB ? OR LOWER(version) GLOB ? OR LOWER(version) GLOB ?)", + expectedArgs: []interface{}{"v?.0.0", "v1.?.?", "v2.*"}, + }, + { + name: "real world example - file extensions with question marks", + field: "filename", + patterns: []string{"*.tx?", "data_?.csv", "log???.txt"}, + expectedCondition: "(LOWER(filename) GLOB ? OR LOWER(filename) GLOB ? OR LOWER(filename) GLOB ?)", + expectedArgs: []interface{}{"*.tx?", "data_?.csv", "log???.txt"}, + }, + { + name: "real world example - version patterns with list wildcards", + field: "version", + patterns: []string{"v[0-9].0.0", "v[1-3].*", "beta[a-z]"}, + expectedCondition: "(LOWER(version) GLOB ? OR LOWER(version) GLOB ? OR LOWER(version) GLOB ?)", + expectedArgs: []interface{}{"v[0-9].0.0", "v[1-3].*", "beta[a-z]"}, + }, + { + name: "real world example - file types with list wildcards", + field: "filename", + patterns: []string{"*.tx[tx]", "data[0-9].csv", "log[^0-9]*"}, + expectedCondition: "(LOWER(filename) GLOB ? OR LOWER(filename) GLOB ? OR LOWER(filename) GLOB ?)", + expectedArgs: []interface{}{"*.tx[tx]", "data[0-9].csv", "log[^0-9]*"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + condition, args := BuildWildcardCondition(tt.field, tt.patterns) + + if condition != tt.expectedCondition { + t.Errorf("Integration test %q: condition = %q, want %q", + tt.name, condition, tt.expectedCondition) + } + + if !reflect.DeepEqual(args, tt.expectedArgs) { + t.Errorf("Integration test %q: args = %v, want %v", + tt.name, args, tt.expectedArgs) + } + }) + } +} + +func TestQuestionMarkWildcardFunctionality(t *testing.T) { + tests := []struct { + name string + field string + patterns []string + expectedCondition string + expectedArgs []interface{} + description string + }{ + { + name: "single character replacement", + field: "code", + patterns: []string{"A?C"}, + expectedCondition: "LOWER(code) GLOB ?", + expectedArgs: []interface{}{"a?c"}, + description: "? should match exactly one character", + }, + { + name: "multiple single character replacements", + field: "serial", + patterns: []string{"AB??EF"}, + expectedCondition: "LOWER(serial) GLOB ?", + expectedArgs: []interface{}{"ab??ef"}, + description: "Multiple ? should each match one character", + }, + { + name: "question mark with asterisk combination", + field: "filename", + patterns: []string{"*.tx?", "data*.?sv"}, + expectedCondition: "(LOWER(filename) GLOB ? OR LOWER(filename) GLOB ?)", + expectedArgs: []interface{}{"*.tx?", "data*.?sv"}, + description: "? and * should work together", + }, + { + name: "question mark in version patterns", + field: "version", + patterns: []string{"v1.?.0", "v?.0.0"}, + expectedCondition: "(LOWER(version) GLOB ? OR LOWER(version) GLOB ?)", + expectedArgs: []interface{}{"v1.?.0", "v?.0.0"}, + description: "? useful for version number wildcards", + }, + { + name: "question mark with exact matches", + field: "type", + patterns: []string{"A?B", "exact", "C?D"}, + expectedCondition: "(LOWER(type) GLOB ? OR LOWER(type) = ? OR LOWER(type) GLOB ?)", + expectedArgs: []interface{}{"a?b", "exact", "c?d"}, + description: "Mix of ? wildcards and exact matches", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + condition, args := BuildWildcardCondition(tt.field, tt.patterns) + + if condition != tt.expectedCondition { + t.Errorf("%s: condition = %q, want %q", tt.description, condition, tt.expectedCondition) + } + + if !reflect.DeepEqual(args, tt.expectedArgs) { + t.Errorf("%s: args = %v, want %v", tt.description, args, tt.expectedArgs) + } + }) + } +} + +func TestListWildcardFunctionality(t *testing.T) { + tests := []struct { + name string + field string + patterns []string + expectedCondition string + expectedArgs []interface{} + description string + }{ + { + name: "simple character list", + field: "type", + patterns: []string{"Test[ABC]"}, + expectedCondition: "LOWER(type) GLOB ?", + expectedArgs: []interface{}{"test[abc]"}, + description: "[ABC] should match exactly one of A, B, or C", + }, + { + name: "numeric range", + field: "version", + patterns: []string{"v[0-9].0.0"}, + expectedCondition: "LOWER(version) GLOB ?", + expectedArgs: []interface{}{"v[0-9].0.0"}, + description: "[0-9] should match any single digit", + }, + { + name: "alphabetic range", + field: "grade", + patterns: []string{"Grade[A-F]"}, + expectedCondition: "LOWER(grade) GLOB ?", + expectedArgs: []interface{}{"grade[a-f]"}, + description: "[A-F] should match any letter from A to F", + }, + { + name: "negated character class", + field: "code", + patterns: []string{"Data[^0-9]"}, + expectedCondition: "LOWER(code) GLOB ?", + expectedArgs: []interface{}{"data[^0-9]"}, + description: "[^0-9] should match any character except digits", + }, + { + name: "mixed alphanumeric range", + field: "id", + patterns: []string{"ID[a-zA-Z0-9]"}, + expectedCondition: "LOWER(id) GLOB ?", + expectedArgs: []interface{}{"id[a-za-z0-9]"}, + description: "[a-zA-Z0-9] should match any alphanumeric character", + }, + { + name: "multiple list wildcards", + field: "code", + patterns: []string{"Test[ABC][123]"}, + expectedCondition: "LOWER(code) GLOB ?", + expectedArgs: []interface{}{"test[abc][123]"}, + description: "Multiple list wildcards should work together", + }, + { + name: "list wildcard with other wildcards", + field: "filename", + patterns: []string{"File[0-9]*?.log"}, + expectedCondition: "LOWER(filename) GLOB ?", + expectedArgs: []interface{}{"file[0-9]*?.log"}, + description: "List wildcards should work with * and ? wildcards", + }, + { + name: "mixed exact and list wildcard patterns", + field: "type", + patterns: []string{"Test[ABC]", "exact", "Data[XYZ]"}, + expectedCondition: "(LOWER(type) GLOB ? OR LOWER(type) = ? OR LOWER(type) GLOB ?)", + expectedArgs: []interface{}{"test[abc]", "exact", "data[xyz]"}, + description: "Mix of list wildcards and exact matches", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + condition, args := BuildWildcardCondition(tt.field, tt.patterns) + + if condition != tt.expectedCondition { + t.Errorf("%s: condition = %q, want %q", tt.description, condition, tt.expectedCondition) + } + + if !reflect.DeepEqual(args, tt.expectedArgs) { + t.Errorf("%s: args = %v, want %v", tt.description, args, tt.expectedArgs) + } + }) + } +} + +// Benchmark tests to ensure performance is acceptable. +func BenchmarkContainsWildcards(b *testing.B) { + patterns := []string{ + "simple", + "test*", + "*test", + "te*st", + "*test*", + "test?", + "?test", + "te?st", + "test???", + "*test?", + "?test*", + "test[abc]", + "version[0-9]", + "file[a-z].txt", + "data[^0-9]", + "id[a-zA-Z0-9]", + "test[abc][123]", + "complex-pattern-*-with-multiple-*-wildcards-and-?-marks-[0-9]", + } + + b.ResetTimer() + + for range b.N { + for _, pattern := range patterns { + ContainsWildcards(pattern) + } + } +} + +func BenchmarkBuildWildcardCondition(b *testing.B) { + patterns := []string{"Python*", "Go", "Java*", "*Script", "TypeScript"} + field := "skills.name" + + b.ResetTimer() + + for range b.N { + BuildWildcardCondition(field, patterns) + } +} diff --git a/server/datastore/datastore.go b/server/datastore/datastore.go index 17e35e6e5..2f4c794d0 100644 --- a/server/datastore/datastore.go +++ b/server/datastore/datastore.go @@ -1,35 +1,35 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package datastore - -import ( - "fmt" - - "github.com/agntcy/dir/server/types" - "github.com/ipfs/go-datastore" - badger "github.com/ipfs/go-ds-badger" -) - -// New is shortcut to creating specific datastore. -// For now, we use memory store. -// -// We should only create a proper datastore from options, -// as we do not implement this interface. -func New(opts ...Option) (types.Datastore, error) { - // read options - options := &options{} - for _, opt := range opts { - if err := opt(options); err != nil { - return nil, fmt.Errorf("failed to apply option: %w", err) - } - } - - // create local datastore if requested - if localDir := options.localDir; localDir != "" { - return badger.NewDatastore(localDir, &badger.DefaultOptions) //nolint:wrapcheck - } - - // create in-memory datastore - return datastore.NewMapDatastore(), nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package datastore + +import ( + "fmt" + + "github.com/agntcy/dir/server/types" + "github.com/ipfs/go-datastore" + badger "github.com/ipfs/go-ds-badger" +) + +// New is shortcut to creating specific datastore. +// For now, we use memory store. +// +// We should only create a proper datastore from options, +// as we do not implement this interface. +func New(opts ...Option) (types.Datastore, error) { + // read options + options := &options{} + for _, opt := range opts { + if err := opt(options); err != nil { + return nil, fmt.Errorf("failed to apply option: %w", err) + } + } + + // create local datastore if requested + if localDir := options.localDir; localDir != "" { + return badger.NewDatastore(localDir, &badger.DefaultOptions) //nolint:wrapcheck + } + + // create in-memory datastore + return datastore.NewMapDatastore(), nil +} diff --git a/server/datastore/options.go b/server/datastore/options.go index 6a7fc7512..35b91e9dd 100644 --- a/server/datastore/options.go +++ b/server/datastore/options.go @@ -1,31 +1,31 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package datastore - -import ( - "fmt" - "os" -) - -type Option func(*options) error - -type options struct { - localDir string -} - -// WithFsProvider sets the filesystem as the datastore provider. -// It creates a local directory if it doesn't exist. -func WithFsProvider(dir string) Option { - return func(o *options) error { - // create local dir if it doesn't exist - if err := os.MkdirAll(dir, 0o755); err != nil { //nolint:mnd - return fmt.Errorf("failed to create local dir: %w", err) - } - - // set local dir - o.localDir = dir - - return nil - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package datastore + +import ( + "fmt" + "os" +) + +type Option func(*options) error + +type options struct { + localDir string +} + +// WithFsProvider sets the filesystem as the datastore provider. +// It creates a local directory if it doesn't exist. +func WithFsProvider(dir string) Option { + return func(o *options) error { + // create local dir if it doesn't exist + if err := os.MkdirAll(dir, 0o755); err != nil { //nolint:mnd + return fmt.Errorf("failed to create local dir: %w", err) + } + + // set local dir + o.localDir = dir + + return nil + } +} diff --git a/server/events/.gitkeep b/server/events/.gitkeep index 25c64fd01..c19d5b9d0 100644 --- a/server/events/.gitkeep +++ b/server/events/.gitkeep @@ -1,2 +1,2 @@ -# Events package files will be created here - +# Events package files will be created here + diff --git a/server/events/builder.go b/server/events/builder.go index c876d2dcc..5046bfe4f 100644 --- a/server/events/builder.go +++ b/server/events/builder.go @@ -1,144 +1,144 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package events - -import ( - "strconv" - - eventsv1 "github.com/agntcy/dir/api/events/v1" -) - -// EventBuilder provides a fluent interface for creating events. -// It is decoupled from EventBus - use Build() to get the event, -// then explicitly publish it with bus.Publish(event). -type EventBuilder struct { - event *Event -} - -// NewEventBuilder creates a new event builder. -// The event is created with auto-generated ID and timestamp. -// -// Example: -// -// event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "bafyxxx"). -// WithLabels([]string{"/skills/AI"}). -// Build() -// bus.Publish(event) -func NewEventBuilder(eventType eventsv1.EventType, resourceID string) *EventBuilder { - return &EventBuilder{ - event: NewEvent(eventType, resourceID), - } -} - -// WithLabels sets the event labels. -func (eb *EventBuilder) WithLabels(labels []string) *EventBuilder { - eb.event.Labels = labels - - return eb -} - -// WithMetadata adds a metadata key-value pair. -func (eb *EventBuilder) WithMetadata(key, value string) *EventBuilder { - if eb.event.Metadata == nil { - eb.event.Metadata = make(map[string]string) - } - - eb.event.Metadata[key] = value - - return eb -} - -// WithMetadataMap sets multiple metadata entries at once. -func (eb *EventBuilder) WithMetadataMap(metadata map[string]string) *EventBuilder { - if eb.event.Metadata == nil { - eb.event.Metadata = make(map[string]string) - } - - for k, v := range metadata { - eb.event.Metadata[k] = v - } - - return eb -} - -// Build returns the constructed event. -// After building, publish it explicitly with bus.Publish(event). -func (eb *EventBuilder) Build() *Event { - return eb.event -} - -// Convenience methods for common event patterns. -// These provide one-liner event publishing for typical scenarios. - -// RecordPushed publishes a record push event. -func (b *EventBus) RecordPushed(cid string, labels []string) { - event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, cid). - WithLabels(labels). - Build() - b.Publish(event) -} - -// RecordPulled publishes a record pull event. -func (b *EventBus) RecordPulled(cid string, labels []string) { - event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PULLED, cid). - WithLabels(labels). - Build() - b.Publish(event) -} - -// RecordDeleted publishes a record delete event. -func (b *EventBus) RecordDeleted(cid string) { - event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, cid). - Build() - b.Publish(event) -} - -// RecordPublished publishes a record publish event (announced to network). -func (b *EventBus) RecordPublished(cid string, labels []string) { - event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, cid). - WithLabels(labels). - Build() - b.Publish(event) -} - -// RecordUnpublished publishes a record unpublish event. -func (b *EventBus) RecordUnpublished(cid string) { - event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_UNPUBLISHED, cid). - Build() - b.Publish(event) -} - -// SyncCreated publishes a sync created event. -func (b *EventBus) SyncCreated(syncID, remoteURL string) { - event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_SYNC_CREATED, syncID). - WithMetadata("remote_url", remoteURL). - Build() - b.Publish(event) -} - -// SyncCompleted publishes a sync completed event. -func (b *EventBus) SyncCompleted(syncID, remoteURL string, recordCount int) { - event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED, syncID). - WithMetadata("remote_url", remoteURL). - WithMetadata("record_count", strconv.Itoa(recordCount)). - Build() - b.Publish(event) -} - -// SyncFailed publishes a sync failed event. -func (b *EventBus) SyncFailed(syncID, remoteURL, errorMsg string) { - event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_SYNC_FAILED, syncID). - WithMetadata("remote_url", remoteURL). - WithMetadata("error", errorMsg). - Build() - b.Publish(event) -} - -// RecordSigned publishes a record signed event. -func (b *EventBus) RecordSigned(cid, signer string) { - event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_SIGNED, cid). - WithMetadata("signer", signer). - Build() - b.Publish(event) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "strconv" + + eventsv1 "github.com/agntcy/dir/api/events/v1" +) + +// EventBuilder provides a fluent interface for creating events. +// It is decoupled from EventBus - use Build() to get the event, +// then explicitly publish it with bus.Publish(event). +type EventBuilder struct { + event *Event +} + +// NewEventBuilder creates a new event builder. +// The event is created with auto-generated ID and timestamp. +// +// Example: +// +// event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "bafyxxx"). +// WithLabels([]string{"/skills/AI"}). +// Build() +// bus.Publish(event) +func NewEventBuilder(eventType eventsv1.EventType, resourceID string) *EventBuilder { + return &EventBuilder{ + event: NewEvent(eventType, resourceID), + } +} + +// WithLabels sets the event labels. +func (eb *EventBuilder) WithLabels(labels []string) *EventBuilder { + eb.event.Labels = labels + + return eb +} + +// WithMetadata adds a metadata key-value pair. +func (eb *EventBuilder) WithMetadata(key, value string) *EventBuilder { + if eb.event.Metadata == nil { + eb.event.Metadata = make(map[string]string) + } + + eb.event.Metadata[key] = value + + return eb +} + +// WithMetadataMap sets multiple metadata entries at once. +func (eb *EventBuilder) WithMetadataMap(metadata map[string]string) *EventBuilder { + if eb.event.Metadata == nil { + eb.event.Metadata = make(map[string]string) + } + + for k, v := range metadata { + eb.event.Metadata[k] = v + } + + return eb +} + +// Build returns the constructed event. +// After building, publish it explicitly with bus.Publish(event). +func (eb *EventBuilder) Build() *Event { + return eb.event +} + +// Convenience methods for common event patterns. +// These provide one-liner event publishing for typical scenarios. + +// RecordPushed publishes a record push event. +func (b *EventBus) RecordPushed(cid string, labels []string) { + event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, cid). + WithLabels(labels). + Build() + b.Publish(event) +} + +// RecordPulled publishes a record pull event. +func (b *EventBus) RecordPulled(cid string, labels []string) { + event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PULLED, cid). + WithLabels(labels). + Build() + b.Publish(event) +} + +// RecordDeleted publishes a record delete event. +func (b *EventBus) RecordDeleted(cid string) { + event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, cid). + Build() + b.Publish(event) +} + +// RecordPublished publishes a record publish event (announced to network). +func (b *EventBus) RecordPublished(cid string, labels []string) { + event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, cid). + WithLabels(labels). + Build() + b.Publish(event) +} + +// RecordUnpublished publishes a record unpublish event. +func (b *EventBus) RecordUnpublished(cid string) { + event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_UNPUBLISHED, cid). + Build() + b.Publish(event) +} + +// SyncCreated publishes a sync created event. +func (b *EventBus) SyncCreated(syncID, remoteURL string) { + event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_SYNC_CREATED, syncID). + WithMetadata("remote_url", remoteURL). + Build() + b.Publish(event) +} + +// SyncCompleted publishes a sync completed event. +func (b *EventBus) SyncCompleted(syncID, remoteURL string, recordCount int) { + event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED, syncID). + WithMetadata("remote_url", remoteURL). + WithMetadata("record_count", strconv.Itoa(recordCount)). + Build() + b.Publish(event) +} + +// SyncFailed publishes a sync failed event. +func (b *EventBus) SyncFailed(syncID, remoteURL, errorMsg string) { + event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_SYNC_FAILED, syncID). + WithMetadata("remote_url", remoteURL). + WithMetadata("error", errorMsg). + Build() + b.Publish(event) +} + +// RecordSigned publishes a record signed event. +func (b *EventBus) RecordSigned(cid, signer string) { + event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_SIGNED, cid). + WithMetadata("signer", signer). + Build() + b.Publish(event) +} diff --git a/server/events/builder_test.go b/server/events/builder_test.go index 8c84b0d55..523949a86 100644 --- a/server/events/builder_test.go +++ b/server/events/builder_test.go @@ -1,370 +1,370 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package events - -import ( - "testing" - - eventsv1 "github.com/agntcy/dir/api/events/v1" -) - -func TestEventBuilder(t *testing.T) { - // Build event with builder pattern (no bus coupling) - event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID123). - WithLabels([]string{"/skills/AI", "/domains/research"}). - WithMetadata("key1", "value1"). - WithMetadata("key2", "value2"). - Build() - - // Verify event properties - if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED { - t.Errorf("Expected type RECORD_PUSHED, got %v", event.Type) - } - - if event.ResourceID != TestCID123 { - t.Errorf("Expected resource ID bafytest123, got %s", event.ResourceID) - } - - if len(event.Labels) != 2 { - t.Errorf("Expected 2 labels, got %d", len(event.Labels)) - } - - if event.Metadata["key1"] != "value1" { - t.Errorf("Expected metadata key1=value1, got %s", event.Metadata["key1"]) - } - - if event.Metadata["key2"] != "value2" { - t.Errorf("Expected metadata key2=value2, got %s", event.Metadata["key2"]) - } -} - -func TestEventBuilderWithMetadataMap(t *testing.T) { - metadata := map[string]string{ - "key1": "value1", - "key2": "value2", - "key3": "value3", - } - - event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_SYNC_CREATED, "sync-123"). - WithMetadataMap(metadata). - Build() - - if len(event.Metadata) != 3 { - t.Errorf("Expected 3 metadata entries, got %d", len(event.Metadata)) - } - - for k, v := range metadata { - if event.Metadata[k] != v { - t.Errorf("Expected metadata %s=%s, got %s", k, v, event.Metadata[k]) - } - } -} - -func TestEventBuilderPublish(t *testing.T) { - bus := NewEventBus() - - // Subscribe to receive event - req := &eventsv1.ListenRequest{} - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - // Use builder to create event, then explicitly publish - event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID123). - WithLabels([]string{"/skills/AI"}). - Build() - bus.Publish(event) - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - // Receive event - select { - case receivedEvent := <-eventCh: - if receivedEvent.ResourceID != TestCID123 { - t.Errorf("Expected resource ID bafytest123, got %s", receivedEvent.ResourceID) - } - - if len(receivedEvent.Labels) != 1 || receivedEvent.Labels[0] != "/skills/AI" { - t.Errorf("Expected label /skills/AI, got %v", receivedEvent.Labels) - } - default: - t.Error("Expected to receive event, got nothing") - } -} - -func TestRecordPushedConvenience(t *testing.T) { - bus := NewEventBus() - - req := &eventsv1.ListenRequest{} - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - // Use convenience method - bus.RecordPushed(TestCID123, []string{"/skills/AI"}) - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - // Verify event received - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED { - t.Errorf("Expected RECORD_PUSHED, got %v", event.Type) - } - - if event.ResourceID != TestCID123 { - t.Errorf("Expected bafytest123, got %s", event.ResourceID) - } - default: - t.Error("Expected to receive event") - } -} - -func TestRecordPulledConvenience(t *testing.T) { - bus := NewEventBus() - - req := &eventsv1.ListenRequest{} - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - bus.RecordPulled("bafytest456", []string{"/domains/research"}) - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PULLED { - t.Errorf("Expected RECORD_PULLED, got %v", event.Type) - } - default: - t.Error("Expected to receive event") - } -} - -func TestRecordDeletedConvenience(t *testing.T) { - bus := NewEventBus() - - req := &eventsv1.ListenRequest{} - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - bus.RecordDeleted("bafytest789") - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_DELETED { - t.Errorf("Expected RECORD_DELETED, got %v", event.Type) - } - - if event.ResourceID != "bafytest789" { - t.Errorf("Expected bafytest789, got %s", event.ResourceID) - } - default: - t.Error("Expected to receive event") - } -} - -func TestRecordPublishedConvenience(t *testing.T) { - bus := NewEventBus() - - req := &eventsv1.ListenRequest{} - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - bus.RecordPublished(TestCID123, []string{"/skills/AI"}) - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED { - t.Errorf("Expected RECORD_PUBLISHED, got %v", event.Type) - } - default: - t.Error("Expected to receive event") - } -} - -func TestRecordUnpublishedConvenience(t *testing.T) { - bus := NewEventBus() - - req := &eventsv1.ListenRequest{} - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - bus.RecordUnpublished(TestCID123) - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_UNPUBLISHED { - t.Errorf("Expected RECORD_UNPUBLISHED, got %v", event.Type) - } - default: - t.Error("Expected to receive event") - } -} - -func TestSyncCreatedConvenience(t *testing.T) { - bus := NewEventBus() - - req := &eventsv1.ListenRequest{} - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - bus.SyncCreated("sync-123", "https://example.com/registry") - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_SYNC_CREATED { - t.Errorf("Expected SYNC_CREATED, got %v", event.Type) - } - - if event.ResourceID != "sync-123" { - t.Errorf("Expected sync-123, got %s", event.ResourceID) - } - - if event.Metadata["remote_url"] != "https://example.com/registry" { - t.Errorf("Expected remote_url in metadata, got %v", event.Metadata) - } - default: - t.Error("Expected to receive event") - } -} - -func TestSyncCompletedConvenience(t *testing.T) { - bus := NewEventBus() - - req := &eventsv1.ListenRequest{} - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - bus.SyncCompleted("sync-456", "https://example.com/registry", 42) - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED { - t.Errorf("Expected SYNC_COMPLETED, got %v", event.Type) - } - - if event.Metadata["record_count"] != "42" { - t.Errorf("Expected record_count=42, got %s", event.Metadata["record_count"]) - } - default: - t.Error("Expected to receive event") - } -} - -func TestSyncFailedConvenience(t *testing.T) { - bus := NewEventBus() - - req := &eventsv1.ListenRequest{} - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - bus.SyncFailed("sync-789", "https://example.com/registry", "connection timeout") - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_SYNC_FAILED { - t.Errorf("Expected SYNC_FAILED, got %v", event.Type) - } - - if event.Metadata["error"] != "connection timeout" { - t.Errorf("Expected error in metadata, got %v", event.Metadata) - } - default: - t.Error("Expected to receive event") - } -} - -func TestRecordSignedConvenience(t *testing.T) { - bus := NewEventBus() - - req := &eventsv1.ListenRequest{} - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - bus.RecordSigned(TestCID123, "user@example.com") - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_SIGNED { - t.Errorf("Expected RECORD_SIGNED, got %v", event.Type) - } - - if event.Metadata["signer"] != "user@example.com" { - t.Errorf("Expected signer in metadata, got %v", event.Metadata) - } - default: - t.Error("Expected to receive event") - } -} - -func TestBuilderChaining(t *testing.T) { - // Test that chaining returns the builder for fluent API - builder := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test") - - // Each method should return the builder - result1 := builder.WithLabels([]string{"/test"}) - if result1 != builder { - t.Error("WithLabels should return builder for chaining") - } - - result2 := builder.WithMetadata("key", "value") - if result2 != builder { - t.Error("WithMetadata should return builder for chaining") - } - - result3 := builder.WithMetadataMap(map[string]string{"k": "v"}) - if result3 != builder { - t.Error("WithMetadataMap should return builder for chaining") - } -} - -func TestBuilderMetadataAccumulation(t *testing.T) { - // Test that multiple WithMetadata calls accumulate - event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test"). - WithMetadata("key1", "value1"). - WithMetadata("key2", "value2"). - WithMetadataMap(map[string]string{"key3": "value3"}). - Build() - - if len(event.Metadata) != 3 { - t.Errorf("Expected 3 metadata entries, got %d", len(event.Metadata)) - } - - if event.Metadata["key1"] != "value1" || event.Metadata["key2"] != "value2" || event.Metadata["key3"] != "value3" { - t.Errorf("Metadata not accumulated correctly: %v", event.Metadata) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "testing" + + eventsv1 "github.com/agntcy/dir/api/events/v1" +) + +func TestEventBuilder(t *testing.T) { + // Build event with builder pattern (no bus coupling) + event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID123). + WithLabels([]string{"/skills/AI", "/domains/research"}). + WithMetadata("key1", "value1"). + WithMetadata("key2", "value2"). + Build() + + // Verify event properties + if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED { + t.Errorf("Expected type RECORD_PUSHED, got %v", event.Type) + } + + if event.ResourceID != TestCID123 { + t.Errorf("Expected resource ID bafytest123, got %s", event.ResourceID) + } + + if len(event.Labels) != 2 { + t.Errorf("Expected 2 labels, got %d", len(event.Labels)) + } + + if event.Metadata["key1"] != "value1" { + t.Errorf("Expected metadata key1=value1, got %s", event.Metadata["key1"]) + } + + if event.Metadata["key2"] != "value2" { + t.Errorf("Expected metadata key2=value2, got %s", event.Metadata["key2"]) + } +} + +func TestEventBuilderWithMetadataMap(t *testing.T) { + metadata := map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + + event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_SYNC_CREATED, "sync-123"). + WithMetadataMap(metadata). + Build() + + if len(event.Metadata) != 3 { + t.Errorf("Expected 3 metadata entries, got %d", len(event.Metadata)) + } + + for k, v := range metadata { + if event.Metadata[k] != v { + t.Errorf("Expected metadata %s=%s, got %s", k, v, event.Metadata[k]) + } + } +} + +func TestEventBuilderPublish(t *testing.T) { + bus := NewEventBus() + + // Subscribe to receive event + req := &eventsv1.ListenRequest{} + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + // Use builder to create event, then explicitly publish + event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID123). + WithLabels([]string{"/skills/AI"}). + Build() + bus.Publish(event) + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + // Receive event + select { + case receivedEvent := <-eventCh: + if receivedEvent.ResourceID != TestCID123 { + t.Errorf("Expected resource ID bafytest123, got %s", receivedEvent.ResourceID) + } + + if len(receivedEvent.Labels) != 1 || receivedEvent.Labels[0] != "/skills/AI" { + t.Errorf("Expected label /skills/AI, got %v", receivedEvent.Labels) + } + default: + t.Error("Expected to receive event, got nothing") + } +} + +func TestRecordPushedConvenience(t *testing.T) { + bus := NewEventBus() + + req := &eventsv1.ListenRequest{} + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + // Use convenience method + bus.RecordPushed(TestCID123, []string{"/skills/AI"}) + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + // Verify event received + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED { + t.Errorf("Expected RECORD_PUSHED, got %v", event.Type) + } + + if event.ResourceID != TestCID123 { + t.Errorf("Expected bafytest123, got %s", event.ResourceID) + } + default: + t.Error("Expected to receive event") + } +} + +func TestRecordPulledConvenience(t *testing.T) { + bus := NewEventBus() + + req := &eventsv1.ListenRequest{} + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + bus.RecordPulled("bafytest456", []string{"/domains/research"}) + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PULLED { + t.Errorf("Expected RECORD_PULLED, got %v", event.Type) + } + default: + t.Error("Expected to receive event") + } +} + +func TestRecordDeletedConvenience(t *testing.T) { + bus := NewEventBus() + + req := &eventsv1.ListenRequest{} + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + bus.RecordDeleted("bafytest789") + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_DELETED { + t.Errorf("Expected RECORD_DELETED, got %v", event.Type) + } + + if event.ResourceID != "bafytest789" { + t.Errorf("Expected bafytest789, got %s", event.ResourceID) + } + default: + t.Error("Expected to receive event") + } +} + +func TestRecordPublishedConvenience(t *testing.T) { + bus := NewEventBus() + + req := &eventsv1.ListenRequest{} + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + bus.RecordPublished(TestCID123, []string{"/skills/AI"}) + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED { + t.Errorf("Expected RECORD_PUBLISHED, got %v", event.Type) + } + default: + t.Error("Expected to receive event") + } +} + +func TestRecordUnpublishedConvenience(t *testing.T) { + bus := NewEventBus() + + req := &eventsv1.ListenRequest{} + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + bus.RecordUnpublished(TestCID123) + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_UNPUBLISHED { + t.Errorf("Expected RECORD_UNPUBLISHED, got %v", event.Type) + } + default: + t.Error("Expected to receive event") + } +} + +func TestSyncCreatedConvenience(t *testing.T) { + bus := NewEventBus() + + req := &eventsv1.ListenRequest{} + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + bus.SyncCreated("sync-123", "https://example.com/registry") + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_SYNC_CREATED { + t.Errorf("Expected SYNC_CREATED, got %v", event.Type) + } + + if event.ResourceID != "sync-123" { + t.Errorf("Expected sync-123, got %s", event.ResourceID) + } + + if event.Metadata["remote_url"] != "https://example.com/registry" { + t.Errorf("Expected remote_url in metadata, got %v", event.Metadata) + } + default: + t.Error("Expected to receive event") + } +} + +func TestSyncCompletedConvenience(t *testing.T) { + bus := NewEventBus() + + req := &eventsv1.ListenRequest{} + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + bus.SyncCompleted("sync-456", "https://example.com/registry", 42) + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED { + t.Errorf("Expected SYNC_COMPLETED, got %v", event.Type) + } + + if event.Metadata["record_count"] != "42" { + t.Errorf("Expected record_count=42, got %s", event.Metadata["record_count"]) + } + default: + t.Error("Expected to receive event") + } +} + +func TestSyncFailedConvenience(t *testing.T) { + bus := NewEventBus() + + req := &eventsv1.ListenRequest{} + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + bus.SyncFailed("sync-789", "https://example.com/registry", "connection timeout") + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_SYNC_FAILED { + t.Errorf("Expected SYNC_FAILED, got %v", event.Type) + } + + if event.Metadata["error"] != "connection timeout" { + t.Errorf("Expected error in metadata, got %v", event.Metadata) + } + default: + t.Error("Expected to receive event") + } +} + +func TestRecordSignedConvenience(t *testing.T) { + bus := NewEventBus() + + req := &eventsv1.ListenRequest{} + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + bus.RecordSigned(TestCID123, "user@example.com") + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_SIGNED { + t.Errorf("Expected RECORD_SIGNED, got %v", event.Type) + } + + if event.Metadata["signer"] != "user@example.com" { + t.Errorf("Expected signer in metadata, got %v", event.Metadata) + } + default: + t.Error("Expected to receive event") + } +} + +func TestBuilderChaining(t *testing.T) { + // Test that chaining returns the builder for fluent API + builder := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test") + + // Each method should return the builder + result1 := builder.WithLabels([]string{"/test"}) + if result1 != builder { + t.Error("WithLabels should return builder for chaining") + } + + result2 := builder.WithMetadata("key", "value") + if result2 != builder { + t.Error("WithMetadata should return builder for chaining") + } + + result3 := builder.WithMetadataMap(map[string]string{"k": "v"}) + if result3 != builder { + t.Error("WithMetadataMap should return builder for chaining") + } +} + +func TestBuilderMetadataAccumulation(t *testing.T) { + // Test that multiple WithMetadata calls accumulate + event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test"). + WithMetadata("key1", "value1"). + WithMetadata("key2", "value2"). + WithMetadataMap(map[string]string{"key3": "value3"}). + Build() + + if len(event.Metadata) != 3 { + t.Errorf("Expected 3 metadata entries, got %d", len(event.Metadata)) + } + + if event.Metadata["key1"] != "value1" || event.Metadata["key2"] != "value2" || event.Metadata["key3"] != "value3" { + t.Errorf("Metadata not accumulated correctly: %v", event.Metadata) + } +} diff --git a/server/events/bus.go b/server/events/bus.go index 010dc6f73..fb230786e 100644 --- a/server/events/bus.go +++ b/server/events/bus.go @@ -1,274 +1,274 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package events - -import ( - "sync" - - eventsv1 "github.com/agntcy/dir/api/events/v1" - "github.com/agntcy/dir/server/events/config" - "github.com/agntcy/dir/utils/logging" - "github.com/google/uuid" -) - -var logger = logging.Logger("events") - -// Subscription represents an active event listener. -type Subscription struct { - id string - ch chan *Event - filters []Filter - cancel chan struct{} -} - -// EventBus manages event distribution to subscribers. -// It provides a thread-safe pub/sub mechanism with filtering support. -type EventBus struct { - mu sync.RWMutex - subscribers map[string]*Subscription - config config.Config - metrics Metrics - wg sync.WaitGroup // Tracks in-flight publishAsync goroutines -} - -// NewEventBus creates a new event bus with default configuration. -func NewEventBus() *EventBus { - return NewEventBusWithConfig(config.DefaultConfig()) -} - -// NewEventBusWithConfig creates a new event bus with custom configuration. -func NewEventBusWithConfig(cfg config.Config) *EventBus { - return &EventBus{ - subscribers: make(map[string]*Subscription), - config: cfg, - } -} - -// Publish broadcasts an event to all matching subscribers asynchronously. -// This method returns immediately without blocking the caller, making it safe -// to call from API handlers and other performance-critical code paths. -// -// Events are validated before publishing and delivered only to subscribers -// whose filters match the event. If a subscriber's channel is full (slow consumer), -// the event is dropped for that subscriber and a warning is logged (if configured). -// -// The actual delivery happens in a background goroutine, so there is no guarantee -// about the order or timing of delivery relative to other operations. -func (b *EventBus) Publish(event *Event) { - // Validate event before publishing - if err := event.Validate(); err != nil { - logger.Error("Invalid event rejected", "error", err) - - return - } - - b.metrics.PublishedTotal.Add(1) - - // Copy event to avoid race conditions when accessed from background goroutine. - // The caller may reuse or modify the event struct after Publish returns. - eventCopy := &Event{ - ID: event.ID, - Type: event.Type, - ResourceID: event.ResourceID, - Labels: append([]string(nil), event.Labels...), - Metadata: make(map[string]string, len(event.Metadata)), - Timestamp: event.Timestamp, - } - - for k, v := range event.Metadata { - eventCopy.Metadata[k] = v - } - - // Track the async goroutine so Unsubscribe can wait for completion - b.wg.Add(1) - - // Publish in background goroutine - returns immediately! - // This ensures the caller (API handler) is never blocked by event delivery. - go b.publishAsync(eventCopy) -} - -// publishAsync handles the actual event delivery in a background goroutine. -// It takes a snapshot of subscribers while holding the lock briefly, then -// delivers events without holding any locks. -func (b *EventBus) publishAsync(event *Event) { - defer b.wg.Done() // Signal completion when done - - if b.config.LogPublishedEvents { - logger.Debug("Event published", - "event_id", event.ID, - "type", event.Type, - "resource_id", event.ResourceID) - } - - // Take a snapshot of subscribers while holding the lock briefly. - // This minimizes lock contention - we only hold the lock long enough - // to copy the subscriber list (~1µs), not during actual delivery. - b.mu.RLock() - snapshot := make([]*Subscription, 0, len(b.subscribers)) - - for _, sub := range b.subscribers { - snapshot = append(snapshot, sub) - } - - b.mu.RUnlock() - - // Now deliver to all matching subscribers without holding any locks. - // This prevents blocking Subscribe/Unsubscribe operations and allows - // parallel event delivery. - var delivered uint64 - - var dropped uint64 - - for _, sub := range snapshot { - // Check if subscription was cancelled before attempting delivery. - // This prevents most cases of sending to a closed channel. - select { - case <-sub.cancel: - // Subscription was cancelled/closed, skip this subscriber - continue - default: - // Subscription still active, continue to delivery - } - - if Matches(event, sub.filters) { - // Use a closure with recover to handle the race condition where - // the channel is closed between the cancel check above and the send below. - // This is acceptable for async event delivery - the subscriber unsubscribed, - // so it doesn't need the event anyway. - func() { - defer func() { - // Recover from panic if channel was closed between cancel check and send. - // This is expected behavior when a subscriber unsubscribes, not an error. - _ = recover() - }() - - select { - case sub.ch <- event: - delivered++ - case <-sub.cancel: - // Subscription was cancelled during send attempt, skip - default: - // Channel is full (slow consumer) - dropped++ - - if b.config.LogSlowConsumers { - // Logging happens outside the lock, so slow I/O won't block the API - logger.Warn("Dropped event due to slow consumer", - "subscription_id", sub.id, - "event_type", event.Type, - "event_id", event.ID) - } - } - }() - } - } - - b.metrics.DeliveredTotal.Add(delivered) - - if dropped > 0 { - b.metrics.DroppedTotal.Add(dropped) - } -} - -// Subscribe creates a new subscription with the specified filters. -// Returns a unique subscription ID and a channel for receiving events. -// -// The caller is responsible for calling Unsubscribe when done to clean up resources. -// -// Example: -// -// req := &eventsv1.ListenRequest{ -// EventTypes: []eventsv1.EventType{eventsv1.EVENT_TYPE_RECORD_PUSHED}, -// } -// subID, eventCh := bus.Subscribe(req) -// defer bus.Unsubscribe(subID) -// -// for event := range eventCh { -// // Process event -// } -func (b *EventBus) Subscribe(req *eventsv1.ListenRequest) (string, <-chan *Event) { - b.mu.Lock() - defer b.mu.Unlock() - - id := uuid.New().String() - sub := &Subscription{ - id: id, - ch: make(chan *Event, b.config.SubscriberBufferSize), - filters: BuildFilters(req), - cancel: make(chan struct{}), - } - - b.subscribers[id] = sub - b.metrics.SubscribersTotal.Add(1) - - logger.Info("New subscription created", - "subscription_id", id, - "event_types", req.GetEventTypes(), - "label_filters", req.GetLabelFilters(), - "cid_filters", req.GetCidFilters()) - - return id, sub.ch -} - -// Unsubscribe removes a subscription and cleans up resources. -// The event channel will be closed. -// -// This method waits for any in-flight publishAsync goroutines to complete -// before closing the channel to prevent race conditions. -// -// It is safe to call Unsubscribe multiple times with the same ID or -// with an ID that doesn't exist. -func (b *EventBus) Unsubscribe(id string) { - b.mu.Lock() - - sub, ok := b.subscribers[id] - if !ok { - b.mu.Unlock() - - return - } - - // Remove from map first (while holding lock) - delete(b.subscribers, id) - b.metrics.SubscribersTotal.Add(-1) - b.mu.Unlock() - - // Signal cancellation first (publishAsync will check this) - close(sub.cancel) - - // Wait for all in-flight publishAsync goroutines to complete. - // This prevents closing the channel while a goroutine might still send to it. - b.wg.Wait() - - // Now it's safe to close the channel - close(sub.ch) - - logger.Info("Subscription removed", "subscription_id", id) -} - -// SubscriberCount returns the current number of active subscribers. -func (b *EventBus) SubscriberCount() int { - b.mu.RLock() - defer b.mu.RUnlock() - - return len(b.subscribers) -} - -// GetMetrics returns a snapshot of current metrics. -// This creates a copy with the current values. -func (b *EventBus) GetMetrics() MetricsSnapshot { - return MetricsSnapshot{ - PublishedTotal: b.metrics.PublishedTotal.Load(), - DeliveredTotal: b.metrics.DeliveredTotal.Load(), - DroppedTotal: b.metrics.DroppedTotal.Load(), - SubscribersTotal: b.metrics.SubscribersTotal.Load(), - } -} - -// WaitForAsyncPublish waits for all in-flight publishAsync goroutines to complete. -// This is useful for testing to ensure all events have been delivered before -// checking results. -func (b *EventBus) WaitForAsyncPublish() { - b.wg.Wait() -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "sync" + + eventsv1 "github.com/agntcy/dir/api/events/v1" + "github.com/agntcy/dir/server/events/config" + "github.com/agntcy/dir/utils/logging" + "github.com/google/uuid" +) + +var logger = logging.Logger("events") + +// Subscription represents an active event listener. +type Subscription struct { + id string + ch chan *Event + filters []Filter + cancel chan struct{} +} + +// EventBus manages event distribution to subscribers. +// It provides a thread-safe pub/sub mechanism with filtering support. +type EventBus struct { + mu sync.RWMutex + subscribers map[string]*Subscription + config config.Config + metrics Metrics + wg sync.WaitGroup // Tracks in-flight publishAsync goroutines +} + +// NewEventBus creates a new event bus with default configuration. +func NewEventBus() *EventBus { + return NewEventBusWithConfig(config.DefaultConfig()) +} + +// NewEventBusWithConfig creates a new event bus with custom configuration. +func NewEventBusWithConfig(cfg config.Config) *EventBus { + return &EventBus{ + subscribers: make(map[string]*Subscription), + config: cfg, + } +} + +// Publish broadcasts an event to all matching subscribers asynchronously. +// This method returns immediately without blocking the caller, making it safe +// to call from API handlers and other performance-critical code paths. +// +// Events are validated before publishing and delivered only to subscribers +// whose filters match the event. If a subscriber's channel is full (slow consumer), +// the event is dropped for that subscriber and a warning is logged (if configured). +// +// The actual delivery happens in a background goroutine, so there is no guarantee +// about the order or timing of delivery relative to other operations. +func (b *EventBus) Publish(event *Event) { + // Validate event before publishing + if err := event.Validate(); err != nil { + logger.Error("Invalid event rejected", "error", err) + + return + } + + b.metrics.PublishedTotal.Add(1) + + // Copy event to avoid race conditions when accessed from background goroutine. + // The caller may reuse or modify the event struct after Publish returns. + eventCopy := &Event{ + ID: event.ID, + Type: event.Type, + ResourceID: event.ResourceID, + Labels: append([]string(nil), event.Labels...), + Metadata: make(map[string]string, len(event.Metadata)), + Timestamp: event.Timestamp, + } + + for k, v := range event.Metadata { + eventCopy.Metadata[k] = v + } + + // Track the async goroutine so Unsubscribe can wait for completion + b.wg.Add(1) + + // Publish in background goroutine - returns immediately! + // This ensures the caller (API handler) is never blocked by event delivery. + go b.publishAsync(eventCopy) +} + +// publishAsync handles the actual event delivery in a background goroutine. +// It takes a snapshot of subscribers while holding the lock briefly, then +// delivers events without holding any locks. +func (b *EventBus) publishAsync(event *Event) { + defer b.wg.Done() // Signal completion when done + + if b.config.LogPublishedEvents { + logger.Debug("Event published", + "event_id", event.ID, + "type", event.Type, + "resource_id", event.ResourceID) + } + + // Take a snapshot of subscribers while holding the lock briefly. + // This minimizes lock contention - we only hold the lock long enough + // to copy the subscriber list (~1µs), not during actual delivery. + b.mu.RLock() + snapshot := make([]*Subscription, 0, len(b.subscribers)) + + for _, sub := range b.subscribers { + snapshot = append(snapshot, sub) + } + + b.mu.RUnlock() + + // Now deliver to all matching subscribers without holding any locks. + // This prevents blocking Subscribe/Unsubscribe operations and allows + // parallel event delivery. + var delivered uint64 + + var dropped uint64 + + for _, sub := range snapshot { + // Check if subscription was cancelled before attempting delivery. + // This prevents most cases of sending to a closed channel. + select { + case <-sub.cancel: + // Subscription was cancelled/closed, skip this subscriber + continue + default: + // Subscription still active, continue to delivery + } + + if Matches(event, sub.filters) { + // Use a closure with recover to handle the race condition where + // the channel is closed between the cancel check above and the send below. + // This is acceptable for async event delivery - the subscriber unsubscribed, + // so it doesn't need the event anyway. + func() { + defer func() { + // Recover from panic if channel was closed between cancel check and send. + // This is expected behavior when a subscriber unsubscribes, not an error. + _ = recover() + }() + + select { + case sub.ch <- event: + delivered++ + case <-sub.cancel: + // Subscription was cancelled during send attempt, skip + default: + // Channel is full (slow consumer) + dropped++ + + if b.config.LogSlowConsumers { + // Logging happens outside the lock, so slow I/O won't block the API + logger.Warn("Dropped event due to slow consumer", + "subscription_id", sub.id, + "event_type", event.Type, + "event_id", event.ID) + } + } + }() + } + } + + b.metrics.DeliveredTotal.Add(delivered) + + if dropped > 0 { + b.metrics.DroppedTotal.Add(dropped) + } +} + +// Subscribe creates a new subscription with the specified filters. +// Returns a unique subscription ID and a channel for receiving events. +// +// The caller is responsible for calling Unsubscribe when done to clean up resources. +// +// Example: +// +// req := &eventsv1.ListenRequest{ +// EventTypes: []eventsv1.EventType{eventsv1.EVENT_TYPE_RECORD_PUSHED}, +// } +// subID, eventCh := bus.Subscribe(req) +// defer bus.Unsubscribe(subID) +// +// for event := range eventCh { +// // Process event +// } +func (b *EventBus) Subscribe(req *eventsv1.ListenRequest) (string, <-chan *Event) { + b.mu.Lock() + defer b.mu.Unlock() + + id := uuid.New().String() + sub := &Subscription{ + id: id, + ch: make(chan *Event, b.config.SubscriberBufferSize), + filters: BuildFilters(req), + cancel: make(chan struct{}), + } + + b.subscribers[id] = sub + b.metrics.SubscribersTotal.Add(1) + + logger.Info("New subscription created", + "subscription_id", id, + "event_types", req.GetEventTypes(), + "label_filters", req.GetLabelFilters(), + "cid_filters", req.GetCidFilters()) + + return id, sub.ch +} + +// Unsubscribe removes a subscription and cleans up resources. +// The event channel will be closed. +// +// This method waits for any in-flight publishAsync goroutines to complete +// before closing the channel to prevent race conditions. +// +// It is safe to call Unsubscribe multiple times with the same ID or +// with an ID that doesn't exist. +func (b *EventBus) Unsubscribe(id string) { + b.mu.Lock() + + sub, ok := b.subscribers[id] + if !ok { + b.mu.Unlock() + + return + } + + // Remove from map first (while holding lock) + delete(b.subscribers, id) + b.metrics.SubscribersTotal.Add(-1) + b.mu.Unlock() + + // Signal cancellation first (publishAsync will check this) + close(sub.cancel) + + // Wait for all in-flight publishAsync goroutines to complete. + // This prevents closing the channel while a goroutine might still send to it. + b.wg.Wait() + + // Now it's safe to close the channel + close(sub.ch) + + logger.Info("Subscription removed", "subscription_id", id) +} + +// SubscriberCount returns the current number of active subscribers. +func (b *EventBus) SubscriberCount() int { + b.mu.RLock() + defer b.mu.RUnlock() + + return len(b.subscribers) +} + +// GetMetrics returns a snapshot of current metrics. +// This creates a copy with the current values. +func (b *EventBus) GetMetrics() MetricsSnapshot { + return MetricsSnapshot{ + PublishedTotal: b.metrics.PublishedTotal.Load(), + DeliveredTotal: b.metrics.DeliveredTotal.Load(), + DroppedTotal: b.metrics.DroppedTotal.Load(), + SubscribersTotal: b.metrics.SubscribersTotal.Load(), + } +} + +// WaitForAsyncPublish waits for all in-flight publishAsync goroutines to complete. +// This is useful for testing to ensure all events have been delivered before +// checking results. +func (b *EventBus) WaitForAsyncPublish() { + b.wg.Wait() +} diff --git a/server/events/bus_test.go b/server/events/bus_test.go index aec0dc2a2..890301718 100644 --- a/server/events/bus_test.go +++ b/server/events/bus_test.go @@ -1,372 +1,372 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package events - -import ( - "testing" - "time" - - eventsv1 "github.com/agntcy/dir/api/events/v1" - "github.com/agntcy/dir/server/events/config" -) - -func TestEventBusPublishSubscribe(t *testing.T) { - bus := NewEventBus() - - // Subscribe - req := &eventsv1.ListenRequest{ - EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, - } - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - // Publish event - event := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID123) - bus.Publish(event) - - // Receive event - select { - case receivedEvent := <-eventCh: - if receivedEvent.ID != event.ID { - t.Errorf("Expected event ID %s, got %s", event.ID, receivedEvent.ID) - } - - if receivedEvent.Type != event.Type { - t.Errorf("Expected event type %v, got %v", event.Type, receivedEvent.Type) - } - case <-time.After(time.Second): - t.Error("Timeout waiting for event") - } -} - -func TestEventBusFiltering(t *testing.T) { - bus := NewEventBus() - - // Subscribe only to PUSHED events - req := &eventsv1.ListenRequest{ - EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, - } - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - // Publish PUBLISHED event (should not be received) - publishedEvent := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, TestCID123) - bus.Publish(publishedEvent) - - // Publish PUSHED event (should be received) - pushedEvent := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID456) - bus.Publish(pushedEvent) - - // Should receive only the PUSHED event - select { - case receivedEvent := <-eventCh: - if receivedEvent.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED { - t.Errorf("Expected PUSHED event, got %v", receivedEvent.Type) - } - - if receivedEvent.ResourceID != TestCID456 { - t.Errorf("Expected resource ID bafytest456, got %s", receivedEvent.ResourceID) - } - case <-time.After(time.Second): - t.Error("Timeout waiting for PUSHED event") - } - - // Should not receive the PUBLISHED event - select { - case unexpectedEvent := <-eventCh: - t.Errorf("Unexpected event received: %v", unexpectedEvent.Type) - case <-time.After(100 * time.Millisecond): - } -} - -func TestEventBusMultipleSubscribers(t *testing.T) { - bus := NewEventBus() - - // Create two subscribers - req := &eventsv1.ListenRequest{ - EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, - } - - subID1, eventCh1 := bus.Subscribe(req) - defer bus.Unsubscribe(subID1) - - subID2, eventCh2 := bus.Subscribe(req) - defer bus.Unsubscribe(subID2) - - // Publish event - event := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID123) - bus.Publish(event) - - // Both subscribers should receive the event - received1 := false - received2 := false - - select { - case <-eventCh1: - received1 = true - case <-time.After(time.Second): - t.Error("Timeout waiting for event on subscriber 1") - } - - select { - case <-eventCh2: - received2 = true - case <-time.After(time.Second): - t.Error("Timeout waiting for event on subscriber 2") - } - - if !received1 || !received2 { - t.Error("Not all subscribers received the event") - } -} - -func TestEventBusUnsubscribe(t *testing.T) { - bus := NewEventBus() - - // Subscribe - req := &eventsv1.ListenRequest{} - subID, eventCh := bus.Subscribe(req) - - // Check subscriber count - if count := bus.SubscriberCount(); count != 1 { - t.Errorf("Expected 1 subscriber, got %d", count) - } - - // Unsubscribe - bus.Unsubscribe(subID) - - // Check subscriber count - if count := bus.SubscriberCount(); count != 0 { - t.Errorf("Expected 0 subscribers, got %d", count) - } - - // Channel should be closed - _, ok := <-eventCh - if ok { - t.Error("Expected channel to be closed after unsubscribe") - } - - // Unsubscribing again should be safe - bus.Unsubscribe(subID) -} - -func TestEventBusSlowConsumer(t *testing.T) { - // Create bus with small buffer - cfg := config.DefaultConfig() - cfg.SubscriberBufferSize = 2 - cfg.LogSlowConsumers = false // Disable logging for cleaner test output - bus := NewEventBusWithConfig(cfg) - - // Subscribe but don't consume events - req := &eventsv1.ListenRequest{} - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - // Publish more events than buffer size - for range 10 { - event := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "bafytest") - bus.Publish(event) - } - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - // Check metrics - some events should be dropped - metrics := bus.GetMetrics() - if metrics.DroppedTotal == 0 { - t.Error("Expected some events to be dropped for slow consumer") - } - - // Drain the channel - drained := 0 - - for { - select { - case <-eventCh: - drained++ - default: - goto done - } - } - -done: - - // Should have drained exactly buffer size - if drained != cfg.SubscriberBufferSize { - t.Errorf("Expected to drain %d events, got %d", cfg.SubscriberBufferSize, drained) - } -} - -func TestEventBusLabelFiltering(t *testing.T) { - bus := NewEventBus() - - // Subscribe with label filter - req := &eventsv1.ListenRequest{ - LabelFilters: []string{"/skills/AI"}, - } - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - // Publish event without matching labels - event1 := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID123) - event1.Labels = []string{"/domains/medical"} - bus.Publish(event1) - - // Publish event with matching labels - event2 := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID456) - event2.Labels = []string{"/skills/AI/ML"} - bus.Publish(event2) - - // Should receive only the event with matching labels - select { - case receivedEvent := <-eventCh: - if receivedEvent.ResourceID != TestCID456 { - t.Errorf("Expected event with bafytest456, got %s", receivedEvent.ResourceID) - } - case <-time.After(time.Second): - t.Error("Timeout waiting for filtered event") - } - - // Should not receive the non-matching event - select { - case unexpectedEvent := <-eventCh: - t.Errorf("Unexpected event received: %s", unexpectedEvent.ResourceID) - case <-time.After(100 * time.Millisecond): - } -} - -func TestEventBusCIDFiltering(t *testing.T) { - bus := NewEventBus() - - // Subscribe with CID filter - req := &eventsv1.ListenRequest{ - CidFilters: []string{TestCID123}, - } - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - // Publish event with different CID - event1 := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID456) - bus.Publish(event1) - - // Publish event with matching CID - event2 := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID123) - bus.Publish(event2) - - // Should receive only the event with matching CID - select { - case receivedEvent := <-eventCh: - if receivedEvent.ResourceID != TestCID123 { - t.Errorf("Expected event with bafytest123, got %s", receivedEvent.ResourceID) - } - case <-time.After(time.Second): - t.Error("Timeout waiting for filtered event") - } - - // Should not receive the non-matching event - select { - case unexpectedEvent := <-eventCh: - t.Errorf("Unexpected event received: %s", unexpectedEvent.ResourceID) - case <-time.After(100 * time.Millisecond): - } -} - -func TestEventBusInvalidEvent(t *testing.T) { - bus := NewEventBus() - - // Subscribe - req := &eventsv1.ListenRequest{} - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - // Publish invalid event (missing resource ID) - event := &Event{ - ID: "test-id", - Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - Timestamp: time.Now(), - ResourceID: "", // Invalid: empty resource ID - } - bus.Publish(event) - - // Should not receive the invalid event - select { - case unexpectedEvent := <-eventCh: - t.Errorf("Should not receive invalid event: %v", unexpectedEvent) - case <-time.After(100 * time.Millisecond): - } - - // Metrics should not count invalid events - metrics := bus.GetMetrics() - if metrics.PublishedTotal != 0 { - t.Errorf("Invalid event should not be counted in metrics") - } -} - -func TestEventBusMetrics(t *testing.T) { - bus := NewEventBus() - - // Subscribe - req := &eventsv1.ListenRequest{} - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - // Initial metrics - metrics := bus.GetMetrics() - if metrics.SubscribersTotal != 1 { - t.Errorf("Expected 1 subscriber, got %d", metrics.SubscribersTotal) - } - - // Publish and consume events - for range 5 { - event := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "bafytest") - bus.Publish(event) - <-eventCh // Consume - } - - // Check metrics - metrics = bus.GetMetrics() - if published := metrics.PublishedTotal; published != 5 { - t.Errorf("Expected 5 published events, got %d", published) - } - - if delivered := metrics.DeliveredTotal; delivered != 5 { - t.Errorf("Expected 5 delivered events, got %d", delivered) - } - - // Unsubscribe and check - bus.Unsubscribe(subID) - - metrics = bus.GetMetrics() - if metrics.SubscribersTotal != 0 { - t.Errorf("Expected 0 subscribers after unsubscribe, got %d", metrics.SubscribersTotal) - } -} - -func TestEventBusNoSubscribers(t *testing.T) { - bus := NewEventBus() - - // Publish without subscribers (should not panic) - event := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID123) - bus.Publish(event) - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - // Check metrics - metrics := bus.GetMetrics() - if metrics.PublishedTotal != 1 { - t.Errorf("Expected 1 published event, got %d", metrics.PublishedTotal) - } - - if metrics.DeliveredTotal != 0 { - t.Errorf("Expected 0 delivered events (no subscribers), got %d", metrics.DeliveredTotal) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "testing" + "time" + + eventsv1 "github.com/agntcy/dir/api/events/v1" + "github.com/agntcy/dir/server/events/config" +) + +func TestEventBusPublishSubscribe(t *testing.T) { + bus := NewEventBus() + + // Subscribe + req := &eventsv1.ListenRequest{ + EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, + } + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + // Publish event + event := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID123) + bus.Publish(event) + + // Receive event + select { + case receivedEvent := <-eventCh: + if receivedEvent.ID != event.ID { + t.Errorf("Expected event ID %s, got %s", event.ID, receivedEvent.ID) + } + + if receivedEvent.Type != event.Type { + t.Errorf("Expected event type %v, got %v", event.Type, receivedEvent.Type) + } + case <-time.After(time.Second): + t.Error("Timeout waiting for event") + } +} + +func TestEventBusFiltering(t *testing.T) { + bus := NewEventBus() + + // Subscribe only to PUSHED events + req := &eventsv1.ListenRequest{ + EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, + } + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + // Publish PUBLISHED event (should not be received) + publishedEvent := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, TestCID123) + bus.Publish(publishedEvent) + + // Publish PUSHED event (should be received) + pushedEvent := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID456) + bus.Publish(pushedEvent) + + // Should receive only the PUSHED event + select { + case receivedEvent := <-eventCh: + if receivedEvent.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED { + t.Errorf("Expected PUSHED event, got %v", receivedEvent.Type) + } + + if receivedEvent.ResourceID != TestCID456 { + t.Errorf("Expected resource ID bafytest456, got %s", receivedEvent.ResourceID) + } + case <-time.After(time.Second): + t.Error("Timeout waiting for PUSHED event") + } + + // Should not receive the PUBLISHED event + select { + case unexpectedEvent := <-eventCh: + t.Errorf("Unexpected event received: %v", unexpectedEvent.Type) + case <-time.After(100 * time.Millisecond): + } +} + +func TestEventBusMultipleSubscribers(t *testing.T) { + bus := NewEventBus() + + // Create two subscribers + req := &eventsv1.ListenRequest{ + EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, + } + + subID1, eventCh1 := bus.Subscribe(req) + defer bus.Unsubscribe(subID1) + + subID2, eventCh2 := bus.Subscribe(req) + defer bus.Unsubscribe(subID2) + + // Publish event + event := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID123) + bus.Publish(event) + + // Both subscribers should receive the event + received1 := false + received2 := false + + select { + case <-eventCh1: + received1 = true + case <-time.After(time.Second): + t.Error("Timeout waiting for event on subscriber 1") + } + + select { + case <-eventCh2: + received2 = true + case <-time.After(time.Second): + t.Error("Timeout waiting for event on subscriber 2") + } + + if !received1 || !received2 { + t.Error("Not all subscribers received the event") + } +} + +func TestEventBusUnsubscribe(t *testing.T) { + bus := NewEventBus() + + // Subscribe + req := &eventsv1.ListenRequest{} + subID, eventCh := bus.Subscribe(req) + + // Check subscriber count + if count := bus.SubscriberCount(); count != 1 { + t.Errorf("Expected 1 subscriber, got %d", count) + } + + // Unsubscribe + bus.Unsubscribe(subID) + + // Check subscriber count + if count := bus.SubscriberCount(); count != 0 { + t.Errorf("Expected 0 subscribers, got %d", count) + } + + // Channel should be closed + _, ok := <-eventCh + if ok { + t.Error("Expected channel to be closed after unsubscribe") + } + + // Unsubscribing again should be safe + bus.Unsubscribe(subID) +} + +func TestEventBusSlowConsumer(t *testing.T) { + // Create bus with small buffer + cfg := config.DefaultConfig() + cfg.SubscriberBufferSize = 2 + cfg.LogSlowConsumers = false // Disable logging for cleaner test output + bus := NewEventBusWithConfig(cfg) + + // Subscribe but don't consume events + req := &eventsv1.ListenRequest{} + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + // Publish more events than buffer size + for range 10 { + event := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "bafytest") + bus.Publish(event) + } + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + // Check metrics - some events should be dropped + metrics := bus.GetMetrics() + if metrics.DroppedTotal == 0 { + t.Error("Expected some events to be dropped for slow consumer") + } + + // Drain the channel + drained := 0 + + for { + select { + case <-eventCh: + drained++ + default: + goto done + } + } + +done: + + // Should have drained exactly buffer size + if drained != cfg.SubscriberBufferSize { + t.Errorf("Expected to drain %d events, got %d", cfg.SubscriberBufferSize, drained) + } +} + +func TestEventBusLabelFiltering(t *testing.T) { + bus := NewEventBus() + + // Subscribe with label filter + req := &eventsv1.ListenRequest{ + LabelFilters: []string{"/skills/AI"}, + } + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + // Publish event without matching labels + event1 := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID123) + event1.Labels = []string{"/domains/medical"} + bus.Publish(event1) + + // Publish event with matching labels + event2 := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID456) + event2.Labels = []string{"/skills/AI/ML"} + bus.Publish(event2) + + // Should receive only the event with matching labels + select { + case receivedEvent := <-eventCh: + if receivedEvent.ResourceID != TestCID456 { + t.Errorf("Expected event with bafytest456, got %s", receivedEvent.ResourceID) + } + case <-time.After(time.Second): + t.Error("Timeout waiting for filtered event") + } + + // Should not receive the non-matching event + select { + case unexpectedEvent := <-eventCh: + t.Errorf("Unexpected event received: %s", unexpectedEvent.ResourceID) + case <-time.After(100 * time.Millisecond): + } +} + +func TestEventBusCIDFiltering(t *testing.T) { + bus := NewEventBus() + + // Subscribe with CID filter + req := &eventsv1.ListenRequest{ + CidFilters: []string{TestCID123}, + } + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + // Publish event with different CID + event1 := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID456) + bus.Publish(event1) + + // Publish event with matching CID + event2 := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID123) + bus.Publish(event2) + + // Should receive only the event with matching CID + select { + case receivedEvent := <-eventCh: + if receivedEvent.ResourceID != TestCID123 { + t.Errorf("Expected event with bafytest123, got %s", receivedEvent.ResourceID) + } + case <-time.After(time.Second): + t.Error("Timeout waiting for filtered event") + } + + // Should not receive the non-matching event + select { + case unexpectedEvent := <-eventCh: + t.Errorf("Unexpected event received: %s", unexpectedEvent.ResourceID) + case <-time.After(100 * time.Millisecond): + } +} + +func TestEventBusInvalidEvent(t *testing.T) { + bus := NewEventBus() + + // Subscribe + req := &eventsv1.ListenRequest{} + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + // Publish invalid event (missing resource ID) + event := &Event{ + ID: "test-id", + Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + Timestamp: time.Now(), + ResourceID: "", // Invalid: empty resource ID + } + bus.Publish(event) + + // Should not receive the invalid event + select { + case unexpectedEvent := <-eventCh: + t.Errorf("Should not receive invalid event: %v", unexpectedEvent) + case <-time.After(100 * time.Millisecond): + } + + // Metrics should not count invalid events + metrics := bus.GetMetrics() + if metrics.PublishedTotal != 0 { + t.Errorf("Invalid event should not be counted in metrics") + } +} + +func TestEventBusMetrics(t *testing.T) { + bus := NewEventBus() + + // Subscribe + req := &eventsv1.ListenRequest{} + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + // Initial metrics + metrics := bus.GetMetrics() + if metrics.SubscribersTotal != 1 { + t.Errorf("Expected 1 subscriber, got %d", metrics.SubscribersTotal) + } + + // Publish and consume events + for range 5 { + event := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "bafytest") + bus.Publish(event) + <-eventCh // Consume + } + + // Check metrics + metrics = bus.GetMetrics() + if published := metrics.PublishedTotal; published != 5 { + t.Errorf("Expected 5 published events, got %d", published) + } + + if delivered := metrics.DeliveredTotal; delivered != 5 { + t.Errorf("Expected 5 delivered events, got %d", delivered) + } + + // Unsubscribe and check + bus.Unsubscribe(subID) + + metrics = bus.GetMetrics() + if metrics.SubscribersTotal != 0 { + t.Errorf("Expected 0 subscribers after unsubscribe, got %d", metrics.SubscribersTotal) + } +} + +func TestEventBusNoSubscribers(t *testing.T) { + bus := NewEventBus() + + // Publish without subscribers (should not panic) + event := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, TestCID123) + bus.Publish(event) + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + // Check metrics + metrics := bus.GetMetrics() + if metrics.PublishedTotal != 1 { + t.Errorf("Expected 1 published event, got %d", metrics.PublishedTotal) + } + + if metrics.DeliveredTotal != 0 { + t.Errorf("Expected 0 delivered events (no subscribers), got %d", metrics.DeliveredTotal) + } +} diff --git a/server/events/config/config.go b/server/events/config/config.go index 86d63d537..14d57210d 100644 --- a/server/events/config/config.go +++ b/server/events/config/config.go @@ -1,43 +1,43 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package config - -const ( - // DefaultSubscriberBufferSize is the default channel buffer size per subscriber. - DefaultSubscriberBufferSize = 100 - - // DefaultLogSlowConsumers is the default setting for logging slow consumers. - DefaultLogSlowConsumers = true - - // DefaultLogPublishedEvents is the default setting for logging all published events. - DefaultLogPublishedEvents = false -) - -// Config holds event system configuration. -type Config struct { - // SubscriberBufferSize is the channel buffer size per subscriber. - // Larger buffers allow subscribers to fall behind temporarily without - // dropping events, but use more memory. - // Default: 100 - SubscriberBufferSize int - - // LogSlowConsumers enables logging when events are dropped due to - // full subscriber buffers (slow consumers). - // Default: true - LogSlowConsumers bool - - // LogPublishedEvents enables debug logging of all published events. - // This can be very verbose in production. - // Default: false - LogPublishedEvents bool -} - -// DefaultConfig returns the default event system configuration. -func DefaultConfig() Config { - return Config{ - SubscriberBufferSize: DefaultSubscriberBufferSize, - LogSlowConsumers: DefaultLogSlowConsumers, - LogPublishedEvents: DefaultLogPublishedEvents, - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package config + +const ( + // DefaultSubscriberBufferSize is the default channel buffer size per subscriber. + DefaultSubscriberBufferSize = 100 + + // DefaultLogSlowConsumers is the default setting for logging slow consumers. + DefaultLogSlowConsumers = true + + // DefaultLogPublishedEvents is the default setting for logging all published events. + DefaultLogPublishedEvents = false +) + +// Config holds event system configuration. +type Config struct { + // SubscriberBufferSize is the channel buffer size per subscriber. + // Larger buffers allow subscribers to fall behind temporarily without + // dropping events, but use more memory. + // Default: 100 + SubscriberBufferSize int + + // LogSlowConsumers enables logging when events are dropped due to + // full subscriber buffers (slow consumers). + // Default: true + LogSlowConsumers bool + + // LogPublishedEvents enables debug logging of all published events. + // This can be very verbose in production. + // Default: false + LogPublishedEvents bool +} + +// DefaultConfig returns the default event system configuration. +func DefaultConfig() Config { + return Config{ + SubscriberBufferSize: DefaultSubscriberBufferSize, + LogSlowConsumers: DefaultLogSlowConsumers, + LogPublishedEvents: DefaultLogPublishedEvents, + } +} diff --git a/server/events/events.go b/server/events/events.go index 511d7ef18..d88e61223 100644 --- a/server/events/events.go +++ b/server/events/events.go @@ -1,81 +1,81 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package events - -import ( - "github.com/agntcy/dir/server/events/config" -) - -// Service manages the event streaming system lifecycle. -// It creates and owns the EventBus and provides access to it -// for other services to publish events. -type Service struct { - bus *EventBus - config config.Config -} - -// New creates a new event service with default configuration. -// The service is ready to use immediately (no Start() needed). -// -// Example: -// -// eventService := events.New() -// defer eventService.Stop() -// -// // Get bus for publishing -// bus := eventService.Bus() -// bus.RecordPushed("bafyxxx", labels) -func New() *Service { - cfg := config.DefaultConfig() - - logger.Info("Initializing event service", - "subscriber_buffer_size", cfg.SubscriberBufferSize, - "log_slow_consumers", cfg.LogSlowConsumers, - "log_published_events", cfg.LogPublishedEvents) - - return &Service{ - bus: NewEventBusWithConfig(cfg), - config: cfg, - } -} - -// NewWithConfig creates a new event service with custom configuration. -func NewWithConfig(cfg config.Config) *Service { - logger.Info("Initializing event service with custom config", - "subscriber_buffer_size", cfg.SubscriberBufferSize, - "log_slow_consumers", cfg.LogSlowConsumers, - "log_published_events", cfg.LogPublishedEvents) - - return &Service{ - bus: NewEventBusWithConfig(cfg), - config: cfg, - } -} - -// Bus returns the event bus for publishing events. -// Other services should use this to emit events. -func (s *Service) Bus() *EventBus { - return s.bus -} - -// Stop gracefully shuts down the event service. -// This closes all active subscriptions and prevents new ones. -func (s *Service) Stop() error { - logger.Info("Stopping event service", - "active_subscribers", s.bus.SubscriberCount()) - - // Get final metrics - metrics := s.bus.GetMetrics() - logger.Info("Event service stopped", - "total_published", metrics.PublishedTotal, - "total_delivered", metrics.DeliveredTotal, - "total_dropped", metrics.DroppedTotal) - - // Note: We don't close subscriptions here because: - // 1. They are managed by the controller (gRPC stream lifecycle) - // 2. Subscribers will naturally disconnect when server stops - // 3. Forcing close could cause panics in active subscribers - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "github.com/agntcy/dir/server/events/config" +) + +// Service manages the event streaming system lifecycle. +// It creates and owns the EventBus and provides access to it +// for other services to publish events. +type Service struct { + bus *EventBus + config config.Config +} + +// New creates a new event service with default configuration. +// The service is ready to use immediately (no Start() needed). +// +// Example: +// +// eventService := events.New() +// defer eventService.Stop() +// +// // Get bus for publishing +// bus := eventService.Bus() +// bus.RecordPushed("bafyxxx", labels) +func New() *Service { + cfg := config.DefaultConfig() + + logger.Info("Initializing event service", + "subscriber_buffer_size", cfg.SubscriberBufferSize, + "log_slow_consumers", cfg.LogSlowConsumers, + "log_published_events", cfg.LogPublishedEvents) + + return &Service{ + bus: NewEventBusWithConfig(cfg), + config: cfg, + } +} + +// NewWithConfig creates a new event service with custom configuration. +func NewWithConfig(cfg config.Config) *Service { + logger.Info("Initializing event service with custom config", + "subscriber_buffer_size", cfg.SubscriberBufferSize, + "log_slow_consumers", cfg.LogSlowConsumers, + "log_published_events", cfg.LogPublishedEvents) + + return &Service{ + bus: NewEventBusWithConfig(cfg), + config: cfg, + } +} + +// Bus returns the event bus for publishing events. +// Other services should use this to emit events. +func (s *Service) Bus() *EventBus { + return s.bus +} + +// Stop gracefully shuts down the event service. +// This closes all active subscriptions and prevents new ones. +func (s *Service) Stop() error { + logger.Info("Stopping event service", + "active_subscribers", s.bus.SubscriberCount()) + + // Get final metrics + metrics := s.bus.GetMetrics() + logger.Info("Event service stopped", + "total_published", metrics.PublishedTotal, + "total_delivered", metrics.DeliveredTotal, + "total_dropped", metrics.DroppedTotal) + + // Note: We don't close subscriptions here because: + // 1. They are managed by the controller (gRPC stream lifecycle) + // 2. Subscribers will naturally disconnect when server stops + // 3. Forcing close could cause panics in active subscribers + + return nil +} diff --git a/server/events/events_test.go b/server/events/events_test.go index c8988e341..0cbff4470 100644 --- a/server/events/events_test.go +++ b/server/events/events_test.go @@ -1,130 +1,130 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package events - -import ( - "testing" - - eventsv1 "github.com/agntcy/dir/api/events/v1" -) - -func TestServiceLifecycle(t *testing.T) { - // Create service - service := New() - - // Bus should be accessible - bus := service.Bus() - if bus == nil { - t.Error("Expected non-nil bus") - } - - // Verify bus works - req := &eventsv1.ListenRequest{} - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - // Publish event - bus.RecordPushed(TestCID123, []string{"/test"}) - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - // Receive event - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED { - t.Errorf("Expected RECORD_PUSHED, got %v", event.Type) - } - default: - t.Error("Expected to receive event") - } - - // Stop service - if err := service.Stop(); err != nil { - t.Errorf("Failed to stop service: %v", err) - } -} - -func TestServiceBusAccess(t *testing.T) { - service := New() - - defer func() { _ = service.Stop() }() - - // Bus should be accessible and usable - bus := service.Bus() - - // Test convenience methods - bus.RecordPushed(TestCID123, nil) - bus.SyncCreated("sync-id", "url") - - // Verify events were published - metrics := bus.GetMetrics() - if metrics.PublishedTotal != 2 { - t.Errorf("Expected 2 published events, got %d", metrics.PublishedTotal) - } -} - -func TestServiceBusReturnsEventBus(t *testing.T) { - service := New() - - defer func() { _ = service.Stop() }() - - // Bus() should return the EventBus - bus := service.Bus() - if bus == nil { - t.Error("Bus() should return non-nil") - } - - // Verify it's a working bus - req := &eventsv1.ListenRequest{} - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - bus.RecordPushed(TestCID123, nil) - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED { - t.Errorf("Expected RECORD_PUSHED, got %v", event.Type) - } - default: - t.Error("Expected to receive event") - } -} - -func TestServiceStopWithActiveSubscribers(t *testing.T) { - service := New() - - bus := service.Bus() - - // Create multiple subscriptions - req := &eventsv1.ListenRequest{} - subID1, _ := bus.Subscribe(req) - subID2, _ := bus.Subscribe(req) - subID3, _ := bus.Subscribe(req) - - // Verify subscribers - if count := bus.SubscriberCount(); count != 3 { - t.Errorf("Expected 3 subscribers, got %d", count) - } - - // Publish some events - bus.RecordPushed(TestCID123, nil) - bus.RecordPushed(TestCID456, nil) - - // Stop should not error even with active subscribers - if err := service.Stop(); err != nil { - t.Errorf("Stop() with active subscribers should not error: %v", err) - } - - // Cleanup - bus.Unsubscribe(subID1) - bus.Unsubscribe(subID2) - bus.Unsubscribe(subID3) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "testing" + + eventsv1 "github.com/agntcy/dir/api/events/v1" +) + +func TestServiceLifecycle(t *testing.T) { + // Create service + service := New() + + // Bus should be accessible + bus := service.Bus() + if bus == nil { + t.Error("Expected non-nil bus") + } + + // Verify bus works + req := &eventsv1.ListenRequest{} + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + // Publish event + bus.RecordPushed(TestCID123, []string{"/test"}) + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + // Receive event + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED { + t.Errorf("Expected RECORD_PUSHED, got %v", event.Type) + } + default: + t.Error("Expected to receive event") + } + + // Stop service + if err := service.Stop(); err != nil { + t.Errorf("Failed to stop service: %v", err) + } +} + +func TestServiceBusAccess(t *testing.T) { + service := New() + + defer func() { _ = service.Stop() }() + + // Bus should be accessible and usable + bus := service.Bus() + + // Test convenience methods + bus.RecordPushed(TestCID123, nil) + bus.SyncCreated("sync-id", "url") + + // Verify events were published + metrics := bus.GetMetrics() + if metrics.PublishedTotal != 2 { + t.Errorf("Expected 2 published events, got %d", metrics.PublishedTotal) + } +} + +func TestServiceBusReturnsEventBus(t *testing.T) { + service := New() + + defer func() { _ = service.Stop() }() + + // Bus() should return the EventBus + bus := service.Bus() + if bus == nil { + t.Error("Bus() should return non-nil") + } + + // Verify it's a working bus + req := &eventsv1.ListenRequest{} + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + bus.RecordPushed(TestCID123, nil) + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED { + t.Errorf("Expected RECORD_PUSHED, got %v", event.Type) + } + default: + t.Error("Expected to receive event") + } +} + +func TestServiceStopWithActiveSubscribers(t *testing.T) { + service := New() + + bus := service.Bus() + + // Create multiple subscriptions + req := &eventsv1.ListenRequest{} + subID1, _ := bus.Subscribe(req) + subID2, _ := bus.Subscribe(req) + subID3, _ := bus.Subscribe(req) + + // Verify subscribers + if count := bus.SubscriberCount(); count != 3 { + t.Errorf("Expected 3 subscribers, got %d", count) + } + + // Publish some events + bus.RecordPushed(TestCID123, nil) + bus.RecordPushed(TestCID456, nil) + + // Stop should not error even with active subscribers + if err := service.Stop(); err != nil { + t.Errorf("Stop() with active subscribers should not error: %v", err) + } + + // Cleanup + bus.Unsubscribe(subID1) + bus.Unsubscribe(subID2) + bus.Unsubscribe(subID3) +} diff --git a/server/events/filters.go b/server/events/filters.go index fa31fdbc8..4cba4cc2f 100644 --- a/server/events/filters.go +++ b/server/events/filters.go @@ -1,165 +1,165 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package events - -import ( - "strings" - - eventsv1 "github.com/agntcy/dir/api/events/v1" -) - -// Filter is a function that checks if an event matches certain criteria. -// Filters are composable and can be combined using And, Or, and Not operators. -type Filter func(*Event) bool - -// BuildFilters converts a ListenRequest into a list of filter functions. -// These filters are applied when determining which events to deliver to a subscriber. -// -// If no filters are specified in the request, returns an empty slice (matches all events). -func BuildFilters(req *eventsv1.ListenRequest) []Filter { - var filters []Filter - - if len(req.GetEventTypes()) > 0 { - filters = append(filters, EventTypeFilter(req.GetEventTypes()...)) - } - - if len(req.GetCidFilters()) > 0 { - filters = append(filters, CIDFilter(req.GetCidFilters()...)) - } - - if len(req.GetLabelFilters()) > 0 { - filters = append(filters, LabelFilter(req.GetLabelFilters()...)) - } - - return filters -} - -// Matches checks if an event passes all the given filters. -// Returns true if all filters pass (AND logic), false otherwise. -// If filters slice is empty, returns true (matches everything). -func Matches(event *Event, filters []Filter) bool { - for _, filter := range filters { - if !filter(event) { - return false - } - } - - return true -} - -// EventTypeFilter creates a filter that matches events with any of the specified types. -// Returns true if the event's type matches any of the provided types (OR logic). -// -// Example: -// -// filter := EventTypeFilter( -// eventsv1.EVENT_TYPE_RECORD_PUSHED, -// eventsv1.EVENT_TYPE_RECORD_PUBLISHED, -// ) -func EventTypeFilter(types ...eventsv1.EventType) Filter { - return func(e *Event) bool { - for _, t := range types { - if e.Type == t { - return true - } - } - - return false - } -} - -// CIDFilter creates a filter that matches events with any of the specified CIDs. -// Returns true if the event's resource ID matches any of the provided CIDs (OR logic). -// -// Example: -// -// filter := CIDFilter("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi") -func CIDFilter(cids ...string) Filter { - return func(e *Event) bool { - for _, cid := range cids { - if e.ResourceID == cid { - return true - } - } - - return false - } -} - -// LabelFilter creates a filter that matches events with labels containing any of the specified substrings. -// Returns true if any of the event's labels contains any of the filter strings (OR logic). -// Uses substring matching for flexibility. -// -// Example: -// -// filter := LabelFilter("/skills/AI", "/domains/research") -// // Matches events with labels like "/skills/AI/ML" or "/domains/research/quantum" -func LabelFilter(labelFilters ...string) Filter { - return func(e *Event) bool { - for _, filter := range labelFilters { - for _, label := range e.Labels { - if strings.Contains(label, filter) { - return true - } - } - } - - return false - } -} - -// Or combines multiple filters with OR logic. -// Returns true if ANY of the filters matches (short-circuits on first match). -// -// Example: -// -// filter := Or( -// EventTypeFilter(EVENT_TYPE_RECORD_PUSHED), -// EventTypeFilter(EVENT_TYPE_RECORD_PULLED), -// ) -func Or(filters ...Filter) Filter { - return func(e *Event) bool { - for _, filter := range filters { - if filter(e) { - return true - } - } - - return false - } -} - -// And combines multiple filters with AND logic. -// Returns true if ALL of the filters match (short-circuits on first failure). -// -// Example: -// -// filter := And( -// EventTypeFilter(EVENT_TYPE_RECORD_PUSHED), -// LabelFilter("/skills/AI"), -// ) -func And(filters ...Filter) Filter { - return func(e *Event) bool { - for _, filter := range filters { - if !filter(e) { - return false - } - } - - return true - } -} - -// Not negates a filter. -// Returns true if the filter does NOT match, false if it does match. -// -// Example: -// -// filter := Not(EventTypeFilter(EVENT_TYPE_RECORD_DELETED)) -// // Matches all events EXCEPT deletions -func Not(filter Filter) Filter { - return func(e *Event) bool { - return !filter(e) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "strings" + + eventsv1 "github.com/agntcy/dir/api/events/v1" +) + +// Filter is a function that checks if an event matches certain criteria. +// Filters are composable and can be combined using And, Or, and Not operators. +type Filter func(*Event) bool + +// BuildFilters converts a ListenRequest into a list of filter functions. +// These filters are applied when determining which events to deliver to a subscriber. +// +// If no filters are specified in the request, returns an empty slice (matches all events). +func BuildFilters(req *eventsv1.ListenRequest) []Filter { + var filters []Filter + + if len(req.GetEventTypes()) > 0 { + filters = append(filters, EventTypeFilter(req.GetEventTypes()...)) + } + + if len(req.GetCidFilters()) > 0 { + filters = append(filters, CIDFilter(req.GetCidFilters()...)) + } + + if len(req.GetLabelFilters()) > 0 { + filters = append(filters, LabelFilter(req.GetLabelFilters()...)) + } + + return filters +} + +// Matches checks if an event passes all the given filters. +// Returns true if all filters pass (AND logic), false otherwise. +// If filters slice is empty, returns true (matches everything). +func Matches(event *Event, filters []Filter) bool { + for _, filter := range filters { + if !filter(event) { + return false + } + } + + return true +} + +// EventTypeFilter creates a filter that matches events with any of the specified types. +// Returns true if the event's type matches any of the provided types (OR logic). +// +// Example: +// +// filter := EventTypeFilter( +// eventsv1.EVENT_TYPE_RECORD_PUSHED, +// eventsv1.EVENT_TYPE_RECORD_PUBLISHED, +// ) +func EventTypeFilter(types ...eventsv1.EventType) Filter { + return func(e *Event) bool { + for _, t := range types { + if e.Type == t { + return true + } + } + + return false + } +} + +// CIDFilter creates a filter that matches events with any of the specified CIDs. +// Returns true if the event's resource ID matches any of the provided CIDs (OR logic). +// +// Example: +// +// filter := CIDFilter("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi") +func CIDFilter(cids ...string) Filter { + return func(e *Event) bool { + for _, cid := range cids { + if e.ResourceID == cid { + return true + } + } + + return false + } +} + +// LabelFilter creates a filter that matches events with labels containing any of the specified substrings. +// Returns true if any of the event's labels contains any of the filter strings (OR logic). +// Uses substring matching for flexibility. +// +// Example: +// +// filter := LabelFilter("/skills/AI", "/domains/research") +// // Matches events with labels like "/skills/AI/ML" or "/domains/research/quantum" +func LabelFilter(labelFilters ...string) Filter { + return func(e *Event) bool { + for _, filter := range labelFilters { + for _, label := range e.Labels { + if strings.Contains(label, filter) { + return true + } + } + } + + return false + } +} + +// Or combines multiple filters with OR logic. +// Returns true if ANY of the filters matches (short-circuits on first match). +// +// Example: +// +// filter := Or( +// EventTypeFilter(EVENT_TYPE_RECORD_PUSHED), +// EventTypeFilter(EVENT_TYPE_RECORD_PULLED), +// ) +func Or(filters ...Filter) Filter { + return func(e *Event) bool { + for _, filter := range filters { + if filter(e) { + return true + } + } + + return false + } +} + +// And combines multiple filters with AND logic. +// Returns true if ALL of the filters match (short-circuits on first failure). +// +// Example: +// +// filter := And( +// EventTypeFilter(EVENT_TYPE_RECORD_PUSHED), +// LabelFilter("/skills/AI"), +// ) +func And(filters ...Filter) Filter { + return func(e *Event) bool { + for _, filter := range filters { + if !filter(e) { + return false + } + } + + return true + } +} + +// Not negates a filter. +// Returns true if the filter does NOT match, false if it does match. +// +// Example: +// +// filter := Not(EventTypeFilter(EVENT_TYPE_RECORD_DELETED)) +// // Matches all events EXCEPT deletions +func Not(filter Filter) Filter { + return func(e *Event) bool { + return !filter(e) + } +} diff --git a/server/events/filters_test.go b/server/events/filters_test.go index c783b9f6c..8e2a34885 100644 --- a/server/events/filters_test.go +++ b/server/events/filters_test.go @@ -1,471 +1,471 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package events - -import ( - "testing" - "time" - - eventsv1 "github.com/agntcy/dir/api/events/v1" -) - -func TestEventTypeFilter(t *testing.T) { - filter := EventTypeFilter( - eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, - ) - - tests := []struct { - name string - eventType eventsv1.EventType - want bool - }{ - { - name: "matches first type", - eventType: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - want: true, - }, - { - name: "matches second type", - eventType: eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, - want: true, - }, - { - name: "does not match other type", - eventType: eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, - want: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - event := &Event{ - ID: TestEventID, - Type: tt.eventType, - Timestamp: time.Now(), - ResourceID: TestCID123, - } - - if got := filter(event); got != tt.want { - t.Errorf("EventTypeFilter() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestCIDFilter(t *testing.T) { - filter := CIDFilter("bafytest123", "bafytest456") - - tests := []struct { - name string - resourceID string - want bool - }{ - { - name: "matches first CID", - resourceID: "bafytest123", - want: true, - }, - { - name: "matches second CID", - resourceID: "bafytest456", - want: true, - }, - { - name: "does not match other CID", - resourceID: "bafytest789", - want: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - event := &Event{ - ID: TestEventID, - Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - Timestamp: time.Now(), - ResourceID: tt.resourceID, - } - - if got := filter(event); got != tt.want { - t.Errorf("CIDFilter() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestLabelFilter(t *testing.T) { - filter := LabelFilter("/skills/AI", "/domains/research") - - tests := []struct { - name string - labels []string - want bool - }{ - { - name: "matches first label substring", - labels: []string{"/skills/AI/ML"}, - want: true, - }, - { - name: "matches second label substring", - labels: []string{"/domains/research/quantum"}, - want: true, - }, - { - name: "matches exact label", - labels: []string{"/skills/AI"}, - want: true, - }, - { - name: "matches one of multiple labels", - labels: []string{"/modules/tensorflow", "/skills/AI/NLP"}, - want: true, - }, - { - name: "does not match other labels", - labels: []string{"/modules/pytorch", "/domains/medical"}, - want: false, - }, - { - name: "empty labels does not match", - labels: []string{}, - want: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - event := &Event{ - ID: TestEventID, - Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - Timestamp: time.Now(), - ResourceID: TestCID123, - Labels: tt.labels, - } - - if got := filter(event); got != tt.want { - t.Errorf("LabelFilter() = %v, want %v", got, tt.want) - } - }) - } -} - -//nolint:dupl // Similar test structure to TestAndFilter is intentional -func TestOrFilter(t *testing.T) { - filter := Or( - EventTypeFilter(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED), - LabelFilter("/skills/AI"), - ) - - tests := []struct { - name string - event *Event - want bool - }{ - { - name: "matches first filter only", - event: &Event{ - Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - Labels: []string{"/domains/medical"}, - }, - want: true, - }, - { - name: "matches second filter only", - event: &Event{ - Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, - Labels: []string{"/skills/AI"}, - }, - want: true, - }, - { - name: "matches both filters", - event: &Event{ - Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - Labels: []string{"/skills/AI"}, - }, - want: true, - }, - { - name: "matches neither filter", - event: &Event{ - Type: eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, - Labels: []string{"/domains/medical"}, - }, - want: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.event.ID = TestEventID - tt.event.Timestamp = time.Now() - tt.event.ResourceID = TestCID123 - - if got := filter(tt.event); got != tt.want { - t.Errorf("Or() = %v, want %v", got, tt.want) - } - }) - } -} - -//nolint:dupl // Similar test structure to TestOrFilter is intentional -func TestAndFilter(t *testing.T) { - filter := And( - EventTypeFilter(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED), - LabelFilter("/skills/AI"), - ) - - tests := []struct { - name string - event *Event - want bool - }{ - { - name: "matches both filters", - event: &Event{ - Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - Labels: []string{"/skills/AI"}, - }, - want: true, - }, - { - name: "matches first filter only", - event: &Event{ - Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - Labels: []string{"/domains/medical"}, - }, - want: false, - }, - { - name: "matches second filter only", - event: &Event{ - Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, - Labels: []string{"/skills/AI"}, - }, - want: false, - }, - { - name: "matches neither filter", - event: &Event{ - Type: eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, - Labels: []string{"/domains/medical"}, - }, - want: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.event.ID = TestEventID - tt.event.Timestamp = time.Now() - tt.event.ResourceID = TestCID123 - - if got := filter(tt.event); got != tt.want { - t.Errorf("And() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestNotFilter(t *testing.T) { - filter := Not(EventTypeFilter(eventsv1.EventType_EVENT_TYPE_RECORD_DELETED)) - - tests := []struct { - name string - eventType eventsv1.EventType - want bool - }{ - { - name: "negates matching type", - eventType: eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, - want: false, - }, - { - name: "allows non-matching type", - eventType: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - want: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - event := &Event{ - ID: TestEventID, - Type: tt.eventType, - Timestamp: time.Now(), - ResourceID: TestCID123, - } - - if got := filter(event); got != tt.want { - t.Errorf("Not() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestBuildFilters(t *testing.T) { - tests := []struct { - name string - req *eventsv1.ListenRequest - wantLen int - }{ - { - name: "all filters specified", - req: &eventsv1.ListenRequest{ - EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, - CidFilters: []string{"bafytest123"}, - LabelFilters: []string{"/skills/AI"}, - }, - wantLen: 3, - }, - { - name: "only event type filter", - req: &eventsv1.ListenRequest{ - EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, - }, - wantLen: 1, - }, - { - name: "only CID filter", - req: &eventsv1.ListenRequest{ - CidFilters: []string{"bafytest123"}, - }, - wantLen: 1, - }, - { - name: "only label filter", - req: &eventsv1.ListenRequest{ - LabelFilters: []string{"/skills/AI"}, - }, - wantLen: 1, - }, - { - name: "no filters", - req: &eventsv1.ListenRequest{}, - wantLen: 0, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - filters := BuildFilters(tt.req) - - if len(filters) != tt.wantLen { - t.Errorf("BuildFilters() returned %d filters, want %d", len(filters), tt.wantLen) - } - }) - } -} - -func TestMatches(t *testing.T) { - event := &Event{ - ID: TestEventID, - Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - Timestamp: time.Now(), - ResourceID: "bafytest123", - Labels: []string{"/skills/AI"}, - } - - tests := []struct { - name string - filters []Filter - want bool - }{ - { - name: "matches all filters", - filters: []Filter{ - EventTypeFilter(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED), - CIDFilter("bafytest123"), - LabelFilter("/skills/AI"), - }, - want: true, - }, - { - name: "fails one filter", - filters: []Filter{ - EventTypeFilter(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED), - CIDFilter("different-cid"), - }, - want: false, - }, - { - name: "empty filters matches everything", - filters: []Filter{}, - want: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := Matches(event, tt.filters); got != tt.want { - t.Errorf("Matches() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestComplexFilterComposition(t *testing.T) { - // Complex filter: (PUSHED OR PUBLISHED) AND AI labels AND NOT deleted - filter := And( - Or( - EventTypeFilter(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED), - EventTypeFilter(eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED), - ), - LabelFilter("/skills/AI"), - Not(EventTypeFilter(eventsv1.EventType_EVENT_TYPE_RECORD_DELETED)), - ) - - tests := []struct { - name string - event *Event - want bool - }{ - { - name: "matches: pushed + AI", - event: &Event{ - Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - Labels: []string{"/skills/AI"}, - }, - want: true, - }, - { - name: "matches: published + AI", - event: &Event{ - Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, - Labels: []string{"/skills/AI"}, - }, - want: true, - }, - { - name: "fails: deleted (even with AI)", - event: &Event{ - Type: eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, - Labels: []string{"/skills/AI"}, - }, - want: false, - }, - { - name: "fails: pushed but no AI", - event: &Event{ - Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - Labels: []string{"/domains/medical"}, - }, - want: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.event.ID = TestEventID - tt.event.Timestamp = time.Now() - tt.event.ResourceID = TestCID123 - - if got := filter(tt.event); got != tt.want { - t.Errorf("Complex filter = %v, want %v", got, tt.want) - } - }) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "testing" + "time" + + eventsv1 "github.com/agntcy/dir/api/events/v1" +) + +func TestEventTypeFilter(t *testing.T) { + filter := EventTypeFilter( + eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, + ) + + tests := []struct { + name string + eventType eventsv1.EventType + want bool + }{ + { + name: "matches first type", + eventType: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + want: true, + }, + { + name: "matches second type", + eventType: eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, + want: true, + }, + { + name: "does not match other type", + eventType: eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + event := &Event{ + ID: TestEventID, + Type: tt.eventType, + Timestamp: time.Now(), + ResourceID: TestCID123, + } + + if got := filter(event); got != tt.want { + t.Errorf("EventTypeFilter() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestCIDFilter(t *testing.T) { + filter := CIDFilter("bafytest123", "bafytest456") + + tests := []struct { + name string + resourceID string + want bool + }{ + { + name: "matches first CID", + resourceID: "bafytest123", + want: true, + }, + { + name: "matches second CID", + resourceID: "bafytest456", + want: true, + }, + { + name: "does not match other CID", + resourceID: "bafytest789", + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + event := &Event{ + ID: TestEventID, + Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + Timestamp: time.Now(), + ResourceID: tt.resourceID, + } + + if got := filter(event); got != tt.want { + t.Errorf("CIDFilter() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestLabelFilter(t *testing.T) { + filter := LabelFilter("/skills/AI", "/domains/research") + + tests := []struct { + name string + labels []string + want bool + }{ + { + name: "matches first label substring", + labels: []string{"/skills/AI/ML"}, + want: true, + }, + { + name: "matches second label substring", + labels: []string{"/domains/research/quantum"}, + want: true, + }, + { + name: "matches exact label", + labels: []string{"/skills/AI"}, + want: true, + }, + { + name: "matches one of multiple labels", + labels: []string{"/modules/tensorflow", "/skills/AI/NLP"}, + want: true, + }, + { + name: "does not match other labels", + labels: []string{"/modules/pytorch", "/domains/medical"}, + want: false, + }, + { + name: "empty labels does not match", + labels: []string{}, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + event := &Event{ + ID: TestEventID, + Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + Timestamp: time.Now(), + ResourceID: TestCID123, + Labels: tt.labels, + } + + if got := filter(event); got != tt.want { + t.Errorf("LabelFilter() = %v, want %v", got, tt.want) + } + }) + } +} + +//nolint:dupl // Similar test structure to TestAndFilter is intentional +func TestOrFilter(t *testing.T) { + filter := Or( + EventTypeFilter(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED), + LabelFilter("/skills/AI"), + ) + + tests := []struct { + name string + event *Event + want bool + }{ + { + name: "matches first filter only", + event: &Event{ + Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + Labels: []string{"/domains/medical"}, + }, + want: true, + }, + { + name: "matches second filter only", + event: &Event{ + Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, + Labels: []string{"/skills/AI"}, + }, + want: true, + }, + { + name: "matches both filters", + event: &Event{ + Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + Labels: []string{"/skills/AI"}, + }, + want: true, + }, + { + name: "matches neither filter", + event: &Event{ + Type: eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, + Labels: []string{"/domains/medical"}, + }, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.event.ID = TestEventID + tt.event.Timestamp = time.Now() + tt.event.ResourceID = TestCID123 + + if got := filter(tt.event); got != tt.want { + t.Errorf("Or() = %v, want %v", got, tt.want) + } + }) + } +} + +//nolint:dupl // Similar test structure to TestOrFilter is intentional +func TestAndFilter(t *testing.T) { + filter := And( + EventTypeFilter(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED), + LabelFilter("/skills/AI"), + ) + + tests := []struct { + name string + event *Event + want bool + }{ + { + name: "matches both filters", + event: &Event{ + Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + Labels: []string{"/skills/AI"}, + }, + want: true, + }, + { + name: "matches first filter only", + event: &Event{ + Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + Labels: []string{"/domains/medical"}, + }, + want: false, + }, + { + name: "matches second filter only", + event: &Event{ + Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, + Labels: []string{"/skills/AI"}, + }, + want: false, + }, + { + name: "matches neither filter", + event: &Event{ + Type: eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, + Labels: []string{"/domains/medical"}, + }, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.event.ID = TestEventID + tt.event.Timestamp = time.Now() + tt.event.ResourceID = TestCID123 + + if got := filter(tt.event); got != tt.want { + t.Errorf("And() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestNotFilter(t *testing.T) { + filter := Not(EventTypeFilter(eventsv1.EventType_EVENT_TYPE_RECORD_DELETED)) + + tests := []struct { + name string + eventType eventsv1.EventType + want bool + }{ + { + name: "negates matching type", + eventType: eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, + want: false, + }, + { + name: "allows non-matching type", + eventType: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + event := &Event{ + ID: TestEventID, + Type: tt.eventType, + Timestamp: time.Now(), + ResourceID: TestCID123, + } + + if got := filter(event); got != tt.want { + t.Errorf("Not() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestBuildFilters(t *testing.T) { + tests := []struct { + name string + req *eventsv1.ListenRequest + wantLen int + }{ + { + name: "all filters specified", + req: &eventsv1.ListenRequest{ + EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, + CidFilters: []string{"bafytest123"}, + LabelFilters: []string{"/skills/AI"}, + }, + wantLen: 3, + }, + { + name: "only event type filter", + req: &eventsv1.ListenRequest{ + EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED}, + }, + wantLen: 1, + }, + { + name: "only CID filter", + req: &eventsv1.ListenRequest{ + CidFilters: []string{"bafytest123"}, + }, + wantLen: 1, + }, + { + name: "only label filter", + req: &eventsv1.ListenRequest{ + LabelFilters: []string{"/skills/AI"}, + }, + wantLen: 1, + }, + { + name: "no filters", + req: &eventsv1.ListenRequest{}, + wantLen: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + filters := BuildFilters(tt.req) + + if len(filters) != tt.wantLen { + t.Errorf("BuildFilters() returned %d filters, want %d", len(filters), tt.wantLen) + } + }) + } +} + +func TestMatches(t *testing.T) { + event := &Event{ + ID: TestEventID, + Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + Timestamp: time.Now(), + ResourceID: "bafytest123", + Labels: []string{"/skills/AI"}, + } + + tests := []struct { + name string + filters []Filter + want bool + }{ + { + name: "matches all filters", + filters: []Filter{ + EventTypeFilter(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED), + CIDFilter("bafytest123"), + LabelFilter("/skills/AI"), + }, + want: true, + }, + { + name: "fails one filter", + filters: []Filter{ + EventTypeFilter(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED), + CIDFilter("different-cid"), + }, + want: false, + }, + { + name: "empty filters matches everything", + filters: []Filter{}, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := Matches(event, tt.filters); got != tt.want { + t.Errorf("Matches() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestComplexFilterComposition(t *testing.T) { + // Complex filter: (PUSHED OR PUBLISHED) AND AI labels AND NOT deleted + filter := And( + Or( + EventTypeFilter(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED), + EventTypeFilter(eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED), + ), + LabelFilter("/skills/AI"), + Not(EventTypeFilter(eventsv1.EventType_EVENT_TYPE_RECORD_DELETED)), + ) + + tests := []struct { + name string + event *Event + want bool + }{ + { + name: "matches: pushed + AI", + event: &Event{ + Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + Labels: []string{"/skills/AI"}, + }, + want: true, + }, + { + name: "matches: published + AI", + event: &Event{ + Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, + Labels: []string{"/skills/AI"}, + }, + want: true, + }, + { + name: "fails: deleted (even with AI)", + event: &Event{ + Type: eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, + Labels: []string{"/skills/AI"}, + }, + want: false, + }, + { + name: "fails: pushed but no AI", + event: &Event{ + Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + Labels: []string{"/domains/medical"}, + }, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.event.ID = TestEventID + tt.event.Timestamp = time.Now() + tt.event.ResourceID = TestCID123 + + if got := filter(tt.event); got != tt.want { + t.Errorf("Complex filter = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/server/events/mock_interface_test.go b/server/events/mock_interface_test.go index 0309312a4..72065fe9d 100644 --- a/server/events/mock_interface_test.go +++ b/server/events/mock_interface_test.go @@ -1,323 +1,323 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package events - -import ( - "fmt" - "testing" - "time" - - eventsv1 "github.com/agntcy/dir/api/events/v1" -) - -func TestMockEventBusPublish(t *testing.T) { - mock := NewMockEventBus() - - // Publish some events - event1 := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "bafytest123") - event2 := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, "bafytest456") - - mock.Publish(event1) - mock.Publish(event2) - - // Verify events were recorded - events := mock.GetEvents() - if len(events) != 2 { - t.Errorf("Expected 2 events, got %d", len(events)) - } - - if events[0].ResourceID != "bafytest123" { - t.Errorf("Expected first event resource_id bafytest123, got %s", events[0].ResourceID) - } - - if events[1].ResourceID != "bafytest456" { - t.Errorf("Expected second event resource_id bafytest456, got %s", events[1].ResourceID) - } -} - -func TestMockEventBusGetEventsByType(t *testing.T) { - mock := NewMockEventBus() - - // Publish different event types - mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "cid1")) - mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, "cid2")) - mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "cid3")) - - // Get only PUSHED events - pushedEvents := mock.GetEventsByType(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED) - if len(pushedEvents) != 2 { - t.Errorf("Expected 2 PUSHED events, got %d", len(pushedEvents)) - } - - // Get only PUBLISHED events - publishedEvents := mock.GetEventsByType(eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED) - if len(publishedEvents) != 1 { - t.Errorf("Expected 1 PUBLISHED event, got %d", len(publishedEvents)) - } -} - -func TestMockEventBusGetEventByResourceID(t *testing.T) { - mock := NewMockEventBus() - - mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "bafytest123")) - mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "bafytest456")) - - // Find by resource ID - event := mock.GetEventByResourceID("bafytest456") - if event == nil { - t.Fatal("Expected to find event with resource_id bafytest456") - } - - if event.ResourceID != "bafytest456" { - t.Errorf("Expected resource_id bafytest456, got %s", event.ResourceID) - } - - // Try to find non-existent resource ID - notFound := mock.GetEventByResourceID("nonexistent") - if notFound != nil { - t.Error("Expected nil for non-existent resource ID") - } -} - -func TestMockEventBusWaitForEvent(t *testing.T) { - mock := NewMockEventBus() - - // Publish event in background - go func() { - time.Sleep(50 * time.Millisecond) - mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED, "sync-123")) - }() - - // Wait for event - filter := EventTypeFilter(eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED) - event, ok := mock.WaitForEvent(filter, time.Second) - - if !ok { - t.Fatal("Expected to find event within timeout") - } - - if event.Type != eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED { - t.Errorf("Expected SYNC_COMPLETED, got %v", event.Type) - } - - if event.ResourceID != "sync-123" { - t.Errorf("Expected sync-123, got %s", event.ResourceID) - } -} - -func TestMockEventBusWaitForEventTimeout(t *testing.T) { - mock := NewMockEventBus() - - // Publish wrong event type - mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test")) - - // Wait for event that won't come - filter := EventTypeFilter(eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED) - event, ok := mock.WaitForEvent(filter, 100*time.Millisecond) - - if ok { - t.Error("Expected timeout, but found event") - } - - if event != nil { - t.Error("Expected nil event on timeout") - } -} - -func TestMockEventBusReset(t *testing.T) { - mock := NewMockEventBus() - - // Publish events - mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test1")) - mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test2")) - - if mock.Count() != 2 { - t.Errorf("Expected 2 events before reset, got %d", mock.Count()) - } - - // Reset - mock.Reset() - - // Should be empty - if mock.Count() != 0 { - t.Errorf("Expected 0 events after reset, got %d", mock.Count()) - } - - events := mock.GetEvents() - if len(events) != 0 { - t.Errorf("Expected empty events after reset, got %d", len(events)) - } -} - -func TestMockEventBusCount(t *testing.T) { - mock := NewMockEventBus() - - if mock.Count() != 0 { - t.Errorf("Expected initial count 0, got %d", mock.Count()) - } - - mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test1")) - - if mock.Count() != 1 { - t.Errorf("Expected count 1, got %d", mock.Count()) - } - - mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test2")) - - if mock.Count() != 2 { - t.Errorf("Expected count 2, got %d", mock.Count()) - } -} - -func TestMockEventBusAssertEventPublished(t *testing.T) { - mock := NewMockEventBus() - mockT := &mockTestingT{} - - // Publish event - mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test")) - - // Assert existing event - should pass - if !mock.AssertEventPublished(mockT, eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED) { - t.Error("Expected assertion to pass for published event") - } - - if mockT.errorCalled { - t.Error("Expected no error for existing event") - } - - // Assert non-existing event - should fail - mockT.Reset() - - if mock.AssertEventPublished(mockT, eventsv1.EventType_EVENT_TYPE_SYNC_CREATED) { - t.Error("Expected assertion to fail for non-existent event") - } - - if !mockT.errorCalled { - t.Error("Expected error to be called for non-existent event") - } -} - -func TestMockEventBusAssertEventWithResourceID(t *testing.T) { - mock := NewMockEventBus() - mockT := &mockTestingT{} - - // Publish event - mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "bafytest123")) - - // Assert existing resource ID - should pass - if !mock.AssertEventWithResourceID(mockT, "bafytest123") { - t.Error("Expected assertion to pass for existing resource ID") - } - - if mockT.errorCalled { - t.Error("Expected no error for existing resource ID") - } - - // Assert non-existing resource ID - should fail - mockT.Reset() - - if mock.AssertEventWithResourceID(mockT, "nonexistent") { - t.Error("Expected assertion to fail for non-existent resource ID") - } - - if !mockT.errorCalled { - t.Error("Expected error to be called for non-existent resource ID") - } -} - -func TestMockEventBusAssertEventCount(t *testing.T) { - mock := NewMockEventBus() - mockT := &mockTestingT{} - - // Publish 3 events - mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test1")) - mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test2")) - mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test3")) - - // Assert correct count - should pass - if !mock.AssertEventCount(mockT, 3) { - t.Error("Expected assertion to pass for correct count") - } - - if mockT.errorCalled { - t.Error("Expected no error for correct count") - } - - // Assert wrong count - should fail - mockT.Reset() - - if mock.AssertEventCount(mockT, 5) { - t.Error("Expected assertion to fail for wrong count") - } - - if !mockT.errorCalled { - t.Error("Expected error to be called for wrong count") - } -} - -func TestMockEventBusAssertNoEvents(t *testing.T) { - mock := NewMockEventBus() - mockT := &mockTestingT{} - - // Assert no events - should pass - if !mock.AssertNoEvents(mockT) { - t.Error("Expected assertion to pass when no events") - } - - // Publish event - mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test")) - - // Assert no events - should fail - mockT.Reset() - - if mock.AssertNoEvents(mockT) { - t.Error("Expected assertion to fail when events exist") - } - - if !mockT.errorCalled { - t.Error("Expected error to be called when events exist") - } -} - -func TestMockEventBusConcurrentAccess(t *testing.T) { - mock := NewMockEventBus() - - // Publish from multiple goroutines - done := make(chan bool) - - for i := range 10 { - go func(n int) { - event := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, fmt.Sprintf("cid-%d", n)) - mock.Publish(event) - - done <- true - }(i) - } - - // Wait for all to complete - for range 10 { - <-done - } - - // Should have all events - if mock.Count() != 10 { - t.Errorf("Expected 10 events, got %d", mock.Count()) - } -} - -// mockTestingT is a test helper that implements TestingT interface. -type mockTestingT struct { - errorCalled bool - lastError string -} - -func (m *mockTestingT) Errorf(format string, args ...interface{}) { - m.errorCalled = true - m.lastError = fmt.Sprintf(format, args...) -} - -func (m *mockTestingT) Reset() { - m.errorCalled = false - m.lastError = "" -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "fmt" + "testing" + "time" + + eventsv1 "github.com/agntcy/dir/api/events/v1" +) + +func TestMockEventBusPublish(t *testing.T) { + mock := NewMockEventBus() + + // Publish some events + event1 := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "bafytest123") + event2 := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, "bafytest456") + + mock.Publish(event1) + mock.Publish(event2) + + // Verify events were recorded + events := mock.GetEvents() + if len(events) != 2 { + t.Errorf("Expected 2 events, got %d", len(events)) + } + + if events[0].ResourceID != "bafytest123" { + t.Errorf("Expected first event resource_id bafytest123, got %s", events[0].ResourceID) + } + + if events[1].ResourceID != "bafytest456" { + t.Errorf("Expected second event resource_id bafytest456, got %s", events[1].ResourceID) + } +} + +func TestMockEventBusGetEventsByType(t *testing.T) { + mock := NewMockEventBus() + + // Publish different event types + mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "cid1")) + mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, "cid2")) + mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "cid3")) + + // Get only PUSHED events + pushedEvents := mock.GetEventsByType(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED) + if len(pushedEvents) != 2 { + t.Errorf("Expected 2 PUSHED events, got %d", len(pushedEvents)) + } + + // Get only PUBLISHED events + publishedEvents := mock.GetEventsByType(eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED) + if len(publishedEvents) != 1 { + t.Errorf("Expected 1 PUBLISHED event, got %d", len(publishedEvents)) + } +} + +func TestMockEventBusGetEventByResourceID(t *testing.T) { + mock := NewMockEventBus() + + mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "bafytest123")) + mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "bafytest456")) + + // Find by resource ID + event := mock.GetEventByResourceID("bafytest456") + if event == nil { + t.Fatal("Expected to find event with resource_id bafytest456") + } + + if event.ResourceID != "bafytest456" { + t.Errorf("Expected resource_id bafytest456, got %s", event.ResourceID) + } + + // Try to find non-existent resource ID + notFound := mock.GetEventByResourceID("nonexistent") + if notFound != nil { + t.Error("Expected nil for non-existent resource ID") + } +} + +func TestMockEventBusWaitForEvent(t *testing.T) { + mock := NewMockEventBus() + + // Publish event in background + go func() { + time.Sleep(50 * time.Millisecond) + mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED, "sync-123")) + }() + + // Wait for event + filter := EventTypeFilter(eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED) + event, ok := mock.WaitForEvent(filter, time.Second) + + if !ok { + t.Fatal("Expected to find event within timeout") + } + + if event.Type != eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED { + t.Errorf("Expected SYNC_COMPLETED, got %v", event.Type) + } + + if event.ResourceID != "sync-123" { + t.Errorf("Expected sync-123, got %s", event.ResourceID) + } +} + +func TestMockEventBusWaitForEventTimeout(t *testing.T) { + mock := NewMockEventBus() + + // Publish wrong event type + mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test")) + + // Wait for event that won't come + filter := EventTypeFilter(eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED) + event, ok := mock.WaitForEvent(filter, 100*time.Millisecond) + + if ok { + t.Error("Expected timeout, but found event") + } + + if event != nil { + t.Error("Expected nil event on timeout") + } +} + +func TestMockEventBusReset(t *testing.T) { + mock := NewMockEventBus() + + // Publish events + mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test1")) + mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test2")) + + if mock.Count() != 2 { + t.Errorf("Expected 2 events before reset, got %d", mock.Count()) + } + + // Reset + mock.Reset() + + // Should be empty + if mock.Count() != 0 { + t.Errorf("Expected 0 events after reset, got %d", mock.Count()) + } + + events := mock.GetEvents() + if len(events) != 0 { + t.Errorf("Expected empty events after reset, got %d", len(events)) + } +} + +func TestMockEventBusCount(t *testing.T) { + mock := NewMockEventBus() + + if mock.Count() != 0 { + t.Errorf("Expected initial count 0, got %d", mock.Count()) + } + + mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test1")) + + if mock.Count() != 1 { + t.Errorf("Expected count 1, got %d", mock.Count()) + } + + mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test2")) + + if mock.Count() != 2 { + t.Errorf("Expected count 2, got %d", mock.Count()) + } +} + +func TestMockEventBusAssertEventPublished(t *testing.T) { + mock := NewMockEventBus() + mockT := &mockTestingT{} + + // Publish event + mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test")) + + // Assert existing event - should pass + if !mock.AssertEventPublished(mockT, eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED) { + t.Error("Expected assertion to pass for published event") + } + + if mockT.errorCalled { + t.Error("Expected no error for existing event") + } + + // Assert non-existing event - should fail + mockT.Reset() + + if mock.AssertEventPublished(mockT, eventsv1.EventType_EVENT_TYPE_SYNC_CREATED) { + t.Error("Expected assertion to fail for non-existent event") + } + + if !mockT.errorCalled { + t.Error("Expected error to be called for non-existent event") + } +} + +func TestMockEventBusAssertEventWithResourceID(t *testing.T) { + mock := NewMockEventBus() + mockT := &mockTestingT{} + + // Publish event + mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "bafytest123")) + + // Assert existing resource ID - should pass + if !mock.AssertEventWithResourceID(mockT, "bafytest123") { + t.Error("Expected assertion to pass for existing resource ID") + } + + if mockT.errorCalled { + t.Error("Expected no error for existing resource ID") + } + + // Assert non-existing resource ID - should fail + mockT.Reset() + + if mock.AssertEventWithResourceID(mockT, "nonexistent") { + t.Error("Expected assertion to fail for non-existent resource ID") + } + + if !mockT.errorCalled { + t.Error("Expected error to be called for non-existent resource ID") + } +} + +func TestMockEventBusAssertEventCount(t *testing.T) { + mock := NewMockEventBus() + mockT := &mockTestingT{} + + // Publish 3 events + mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test1")) + mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test2")) + mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test3")) + + // Assert correct count - should pass + if !mock.AssertEventCount(mockT, 3) { + t.Error("Expected assertion to pass for correct count") + } + + if mockT.errorCalled { + t.Error("Expected no error for correct count") + } + + // Assert wrong count - should fail + mockT.Reset() + + if mock.AssertEventCount(mockT, 5) { + t.Error("Expected assertion to fail for wrong count") + } + + if !mockT.errorCalled { + t.Error("Expected error to be called for wrong count") + } +} + +func TestMockEventBusAssertNoEvents(t *testing.T) { + mock := NewMockEventBus() + mockT := &mockTestingT{} + + // Assert no events - should pass + if !mock.AssertNoEvents(mockT) { + t.Error("Expected assertion to pass when no events") + } + + // Publish event + mock.Publish(NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test")) + + // Assert no events - should fail + mockT.Reset() + + if mock.AssertNoEvents(mockT) { + t.Error("Expected assertion to fail when events exist") + } + + if !mockT.errorCalled { + t.Error("Expected error to be called when events exist") + } +} + +func TestMockEventBusConcurrentAccess(t *testing.T) { + mock := NewMockEventBus() + + // Publish from multiple goroutines + done := make(chan bool) + + for i := range 10 { + go func(n int) { + event := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, fmt.Sprintf("cid-%d", n)) + mock.Publish(event) + + done <- true + }(i) + } + + // Wait for all to complete + for range 10 { + <-done + } + + // Should have all events + if mock.Count() != 10 { + t.Errorf("Expected 10 events, got %d", mock.Count()) + } +} + +// mockTestingT is a test helper that implements TestingT interface. +type mockTestingT struct { + errorCalled bool + lastError string +} + +func (m *mockTestingT) Errorf(format string, args ...interface{}) { + m.errorCalled = true + m.lastError = fmt.Sprintf(format, args...) +} + +func (m *mockTestingT) Reset() { + m.errorCalled = false + m.lastError = "" +} diff --git a/server/events/observability.go b/server/events/observability.go index 54a462173..9a1abdefe 100644 --- a/server/events/observability.go +++ b/server/events/observability.go @@ -1,32 +1,32 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package events - -import "sync/atomic" - -// Metrics holds simple counters for event system observability. -// All counters use atomic operations for thread-safety. -type Metrics struct { - // PublishedTotal is the total number of events published to the bus - PublishedTotal atomic.Uint64 - - // DeliveredTotal is the total number of events delivered to subscribers - DeliveredTotal atomic.Uint64 - - // DroppedTotal is the total number of events dropped due to slow consumers - DroppedTotal atomic.Uint64 - - // SubscribersTotal is the current number of active subscribers - // This can be negative temporarily during concurrent operations, but will stabilize - SubscribersTotal atomic.Int64 -} - -// MetricsSnapshot is a point-in-time snapshot of metrics values. -// Unlike Metrics, this is safe to copy and serialize. -type MetricsSnapshot struct { - PublishedTotal uint64 - DeliveredTotal uint64 - DroppedTotal uint64 - SubscribersTotal int64 -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package events + +import "sync/atomic" + +// Metrics holds simple counters for event system observability. +// All counters use atomic operations for thread-safety. +type Metrics struct { + // PublishedTotal is the total number of events published to the bus + PublishedTotal atomic.Uint64 + + // DeliveredTotal is the total number of events delivered to subscribers + DeliveredTotal atomic.Uint64 + + // DroppedTotal is the total number of events dropped due to slow consumers + DroppedTotal atomic.Uint64 + + // SubscribersTotal is the current number of active subscribers + // This can be negative temporarily during concurrent operations, but will stabilize + SubscribersTotal atomic.Int64 +} + +// MetricsSnapshot is a point-in-time snapshot of metrics values. +// Unlike Metrics, this is safe to copy and serialize. +type MetricsSnapshot struct { + PublishedTotal uint64 + DeliveredTotal uint64 + DroppedTotal uint64 + SubscribersTotal int64 +} diff --git a/server/events/safe.go b/server/events/safe.go index 3f9ab4d02..09d671993 100644 --- a/server/events/safe.go +++ b/server/events/safe.go @@ -1,126 +1,126 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package events - -import eventsv1 "github.com/agntcy/dir/api/events/v1" - -// SafeEventBus is a nil-safe wrapper around EventBus. -// All methods are safe to call even if the underlying bus is nil. -// When nil, all operations become no-ops, making it safe to use -// in services without checking for nil. -type SafeEventBus struct { - bus *EventBus -} - -// NewSafeEventBus creates a nil-safe wrapper around an event bus. -// If bus is nil, all operations will be no-ops. -func NewSafeEventBus(bus *EventBus) *SafeEventBus { - return &SafeEventBus{bus: bus} -} - -// Publish publishes an event. No-op if bus is nil. -func (s *SafeEventBus) Publish(event *Event) { - if s.bus != nil { - s.bus.Publish(event) - } -} - -// Subscribe creates a subscription. Returns nil channel if bus is nil. -func (s *SafeEventBus) Subscribe(req *eventsv1.ListenRequest) (string, <-chan *Event) { - if s.bus != nil { - return s.bus.Subscribe(req) - } - - return "", nil -} - -// Unsubscribe removes a subscription. No-op if bus is nil. -func (s *SafeEventBus) Unsubscribe(id string) { - if s.bus != nil { - s.bus.Unsubscribe(id) - } -} - -// Convenience methods - all nil-safe - -// RecordPushed publishes a record push event. No-op if bus is nil. -func (s *SafeEventBus) RecordPushed(cid string, labels []string) { - if s.bus != nil { - s.bus.RecordPushed(cid, labels) - } -} - -// RecordPulled publishes a record pull event. No-op if bus is nil. -func (s *SafeEventBus) RecordPulled(cid string, labels []string) { - if s.bus != nil { - s.bus.RecordPulled(cid, labels) - } -} - -// RecordDeleted publishes a record delete event. No-op if bus is nil. -func (s *SafeEventBus) RecordDeleted(cid string) { - if s.bus != nil { - s.bus.RecordDeleted(cid) - } -} - -// RecordPublished publishes a record publish event. No-op if bus is nil. -func (s *SafeEventBus) RecordPublished(cid string, labels []string) { - if s.bus != nil { - s.bus.RecordPublished(cid, labels) - } -} - -// RecordUnpublished publishes a record unpublish event. No-op if bus is nil. -func (s *SafeEventBus) RecordUnpublished(cid string) { - if s.bus != nil { - s.bus.RecordUnpublished(cid) - } -} - -// SyncCreated publishes a sync created event. No-op if bus is nil. -func (s *SafeEventBus) SyncCreated(syncID, remoteURL string) { - if s.bus != nil { - s.bus.SyncCreated(syncID, remoteURL) - } -} - -// SyncCompleted publishes a sync completed event. No-op if bus is nil. -func (s *SafeEventBus) SyncCompleted(syncID, remoteURL string, recordCount int) { - if s.bus != nil { - s.bus.SyncCompleted(syncID, remoteURL, recordCount) - } -} - -// SyncFailed publishes a sync failed event. No-op if bus is nil. -func (s *SafeEventBus) SyncFailed(syncID, remoteURL, errorMsg string) { - if s.bus != nil { - s.bus.SyncFailed(syncID, remoteURL, errorMsg) - } -} - -// RecordSigned publishes a record signed event. No-op if bus is nil. -func (s *SafeEventBus) RecordSigned(cid, signer string) { - if s.bus != nil { - s.bus.RecordSigned(cid, signer) - } -} - -// SubscriberCount returns the number of active subscribers. Returns 0 if bus is nil. -func (s *SafeEventBus) SubscriberCount() int { - if s.bus != nil { - return s.bus.SubscriberCount() - } - - return 0 -} - -// GetMetrics returns a snapshot of metrics. Returns zero metrics if bus is nil. -func (s *SafeEventBus) GetMetrics() MetricsSnapshot { - if s.bus != nil { - return s.bus.GetMetrics() - } - - return MetricsSnapshot{} -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package events + +import eventsv1 "github.com/agntcy/dir/api/events/v1" + +// SafeEventBus is a nil-safe wrapper around EventBus. +// All methods are safe to call even if the underlying bus is nil. +// When nil, all operations become no-ops, making it safe to use +// in services without checking for nil. +type SafeEventBus struct { + bus *EventBus +} + +// NewSafeEventBus creates a nil-safe wrapper around an event bus. +// If bus is nil, all operations will be no-ops. +func NewSafeEventBus(bus *EventBus) *SafeEventBus { + return &SafeEventBus{bus: bus} +} + +// Publish publishes an event. No-op if bus is nil. +func (s *SafeEventBus) Publish(event *Event) { + if s.bus != nil { + s.bus.Publish(event) + } +} + +// Subscribe creates a subscription. Returns nil channel if bus is nil. +func (s *SafeEventBus) Subscribe(req *eventsv1.ListenRequest) (string, <-chan *Event) { + if s.bus != nil { + return s.bus.Subscribe(req) + } + + return "", nil +} + +// Unsubscribe removes a subscription. No-op if bus is nil. +func (s *SafeEventBus) Unsubscribe(id string) { + if s.bus != nil { + s.bus.Unsubscribe(id) + } +} + +// Convenience methods - all nil-safe + +// RecordPushed publishes a record push event. No-op if bus is nil. +func (s *SafeEventBus) RecordPushed(cid string, labels []string) { + if s.bus != nil { + s.bus.RecordPushed(cid, labels) + } +} + +// RecordPulled publishes a record pull event. No-op if bus is nil. +func (s *SafeEventBus) RecordPulled(cid string, labels []string) { + if s.bus != nil { + s.bus.RecordPulled(cid, labels) + } +} + +// RecordDeleted publishes a record delete event. No-op if bus is nil. +func (s *SafeEventBus) RecordDeleted(cid string) { + if s.bus != nil { + s.bus.RecordDeleted(cid) + } +} + +// RecordPublished publishes a record publish event. No-op if bus is nil. +func (s *SafeEventBus) RecordPublished(cid string, labels []string) { + if s.bus != nil { + s.bus.RecordPublished(cid, labels) + } +} + +// RecordUnpublished publishes a record unpublish event. No-op if bus is nil. +func (s *SafeEventBus) RecordUnpublished(cid string) { + if s.bus != nil { + s.bus.RecordUnpublished(cid) + } +} + +// SyncCreated publishes a sync created event. No-op if bus is nil. +func (s *SafeEventBus) SyncCreated(syncID, remoteURL string) { + if s.bus != nil { + s.bus.SyncCreated(syncID, remoteURL) + } +} + +// SyncCompleted publishes a sync completed event. No-op if bus is nil. +func (s *SafeEventBus) SyncCompleted(syncID, remoteURL string, recordCount int) { + if s.bus != nil { + s.bus.SyncCompleted(syncID, remoteURL, recordCount) + } +} + +// SyncFailed publishes a sync failed event. No-op if bus is nil. +func (s *SafeEventBus) SyncFailed(syncID, remoteURL, errorMsg string) { + if s.bus != nil { + s.bus.SyncFailed(syncID, remoteURL, errorMsg) + } +} + +// RecordSigned publishes a record signed event. No-op if bus is nil. +func (s *SafeEventBus) RecordSigned(cid, signer string) { + if s.bus != nil { + s.bus.RecordSigned(cid, signer) + } +} + +// SubscriberCount returns the number of active subscribers. Returns 0 if bus is nil. +func (s *SafeEventBus) SubscriberCount() int { + if s.bus != nil { + return s.bus.SubscriberCount() + } + + return 0 +} + +// GetMetrics returns a snapshot of metrics. Returns zero metrics if bus is nil. +func (s *SafeEventBus) GetMetrics() MetricsSnapshot { + if s.bus != nil { + return s.bus.GetMetrics() + } + + return MetricsSnapshot{} +} diff --git a/server/events/safe_test.go b/server/events/safe_test.go index 5c483cdf6..d51668bf9 100644 --- a/server/events/safe_test.go +++ b/server/events/safe_test.go @@ -1,320 +1,320 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package events - -import ( - "testing" - - eventsv1 "github.com/agntcy/dir/api/events/v1" -) - -const ( - testMetadataValue = "value" -) - -func TestSafeEventBusNilSafety(t *testing.T) { - // Create safe bus with nil underlying bus - safeBus := NewSafeEventBus(nil) - - // All operations should be no-ops and not panic - - // Test Publish - should not panic - event := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test") - safeBus.Publish(event) // Should not panic - - // Test Subscribe - should return empty values - subID, ch := safeBus.Subscribe(&eventsv1.ListenRequest{}) - if subID != "" { - t.Error("Expected empty subscription ID for nil bus") - } - - if ch != nil { - t.Error("Expected nil channel for nil bus") - } - - // Test Unsubscribe - should not panic - safeBus.Unsubscribe("any-id") // Should not panic - - // Test builder independently (no longer coupled to bus) - builder := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test") - if builder == nil { - t.Error("Expected builder to be created") - } - - builtEvent := builder.Build() - safeBus.Publish(builtEvent) // Should not panic with nil bus - - // Test all convenience methods - should not panic - safeBus.RecordPushed("cid", []string{"/test"}) - safeBus.RecordPulled("cid", []string{"/test"}) - safeBus.RecordDeleted("cid") - safeBus.RecordPublished("cid", []string{"/test"}) - safeBus.RecordUnpublished("cid") - safeBus.SyncCreated("sync-id", "url") - safeBus.SyncCompleted("sync-id", "url", 10) - safeBus.SyncFailed("sync-id", "url", "error") - safeBus.RecordSigned("cid", "signer") - - // Test SubscriberCount - should return 0 - count := safeBus.SubscriberCount() - if count != 0 { - t.Errorf("Expected subscriber count 0 for nil bus, got %d", count) - } - - // Test GetMetrics - should return zero metrics - metrics := safeBus.GetMetrics() - if metrics.PublishedTotal != 0 || metrics.DeliveredTotal != 0 { - t.Error("Expected zero metrics for nil bus") - } -} - -func TestSafeEventBusDelegation(t *testing.T) { - // Create safe bus with real underlying bus - bus := NewEventBus() - safeBus := NewSafeEventBus(bus) - - // Subscribe to verify events are actually published - req := &eventsv1.ListenRequest{} - - subID, eventCh := safeBus.Subscribe(req) - if subID == "" { - t.Error("Expected non-empty subscription ID") - } - - if eventCh == nil { - t.Error("Expected non-nil event channel") - } - - defer safeBus.Unsubscribe(subID) - - // Publish via safe bus - safeBus.RecordPushed("bafytest123", []string{"/skills/AI"}) - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - // Verify event was received - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED { - t.Errorf("Expected RECORD_PUSHED, got %v", event.Type) - } - - if event.ResourceID != "bafytest123" { - t.Errorf("Expected bafytest123, got %s", event.ResourceID) - } - default: - t.Error("Expected to receive event") - } - - // Test SubscriberCount delegation - count := safeBus.SubscriberCount() - if count != 1 { - t.Errorf("Expected subscriber count 1, got %d", count) - } - - // Test GetMetrics delegation - metrics := safeBus.GetMetrics() - if metrics.PublishedTotal == 0 { - t.Error("Expected non-zero published count") - } -} - -func TestSafeEventBusAllConvenienceMethods(t *testing.T) { - bus := NewEventBus() - safeBus := NewSafeEventBus(bus) - - // Subscribe to all events - req := &eventsv1.ListenRequest{} - - subID, eventCh := safeBus.Subscribe(req) - defer safeBus.Unsubscribe(subID) - - tests := []struct { - name string - publish func() - expected eventsv1.EventType - }{ - { - name: "RecordPushed", - publish: func() { safeBus.RecordPushed("cid1", []string{"/test"}) }, - expected: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - }, - { - name: "RecordPulled", - publish: func() { safeBus.RecordPulled("cid2", []string{"/test"}) }, - expected: eventsv1.EventType_EVENT_TYPE_RECORD_PULLED, - }, - { - name: "RecordDeleted", - publish: func() { safeBus.RecordDeleted("cid3") }, - expected: eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, - }, - { - name: "RecordPublished", - publish: func() { safeBus.RecordPublished("cid4", []string{"/test"}) }, - expected: eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, - }, - { - name: "RecordUnpublished", - publish: func() { safeBus.RecordUnpublished("cid5") }, - expected: eventsv1.EventType_EVENT_TYPE_RECORD_UNPUBLISHED, - }, - { - name: "SyncCreated", - publish: func() { safeBus.SyncCreated("sync1", "url") }, - expected: eventsv1.EventType_EVENT_TYPE_SYNC_CREATED, - }, - { - name: "SyncCompleted", - publish: func() { safeBus.SyncCompleted("sync2", "url", 10) }, - expected: eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED, - }, - { - name: "SyncFailed", - publish: func() { safeBus.SyncFailed("sync3", "url", "error") }, - expected: eventsv1.EventType_EVENT_TYPE_SYNC_FAILED, - }, - { - name: "RecordSigned", - publish: func() { safeBus.RecordSigned("cid6", "signer") }, - expected: eventsv1.EventType_EVENT_TYPE_RECORD_SIGNED, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Publish event - tt.publish() - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - // Receive and verify - select { - case event := <-eventCh: - if event.Type != tt.expected { - t.Errorf("Expected type %v, got %v", tt.expected, event.Type) - } - default: - t.Error("Expected to receive event") - } - }) - } -} - -func TestSafeEventBusBuilderWithNilBus(t *testing.T) { - safeBus := NewSafeEventBus(nil) - - // Builder is now independent - no need for bus - builder := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test") - if builder == nil { - t.Fatal("Expected builder to be returned") - } - - // Build should work - event := builder.WithLabels([]string{"/test"}).Build() - if event == nil { - t.Error("Expected event to be built") - } - - // Publish should not panic (SafeEventBus handles nil) - safeBus.Publish(event) // Should be no-op -} - -func TestSafeEventBusBuilderWithRealBus(t *testing.T) { - bus := NewEventBus() - safeBus := NewSafeEventBus(bus) - - // Subscribe - req := &eventsv1.ListenRequest{} - - subID, eventCh := safeBus.Subscribe(req) - defer safeBus.Unsubscribe(subID) - - // Use builder independently, then publish explicitly - event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "bafytest"). - WithLabels([]string{"/skills/AI"}). - WithMetadata("key", testMetadataValue). - Build() - safeBus.Publish(event) - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - // Verify event received - select { - case receivedEvent := <-eventCh: - if receivedEvent.ResourceID != "bafytest" { - t.Errorf("Expected bafytest, got %s", receivedEvent.ResourceID) - } - - if len(receivedEvent.Labels) != 1 { - t.Errorf("Expected 1 label, got %d", len(receivedEvent.Labels)) - } - - if receivedEvent.Metadata["key"] != testMetadataValue { - t.Errorf("Expected metadata key=value, got %v", receivedEvent.Metadata) - } - default: - t.Error("Expected to receive event") - } -} - -func TestSafeEventBusUnsubscribe(t *testing.T) { - bus := NewEventBus() - safeBus := NewSafeEventBus(bus) - - // Subscribe - req := &eventsv1.ListenRequest{} - subID, eventCh := safeBus.Subscribe(req) - - // Verify subscriber exists - if safeBus.SubscriberCount() != 1 { - t.Error("Expected 1 subscriber") - } - - // Unsubscribe - safeBus.Unsubscribe(subID) - - // Verify subscriber removed - if safeBus.SubscriberCount() != 0 { - t.Error("Expected 0 subscribers") - } - - // Channel should be closed - _, ok := <-eventCh - if ok { - t.Error("Expected channel to be closed") - } -} - -func TestSafeEventBusPublishDirect(t *testing.T) { - bus := NewEventBus() - safeBus := NewSafeEventBus(bus) - - // Subscribe - req := &eventsv1.ListenRequest{} - - subID, eventCh := safeBus.Subscribe(req) - defer safeBus.Unsubscribe(subID) - - // Create and publish event directly - event := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test-cid") - event.Labels = []string{"/test"} - safeBus.Publish(event) - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - // Verify event received - select { - case received := <-eventCh: - if received.ResourceID != "test-cid" { - t.Errorf("Expected test-cid, got %s", received.ResourceID) - } - default: - t.Error("Expected to receive event") - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "testing" + + eventsv1 "github.com/agntcy/dir/api/events/v1" +) + +const ( + testMetadataValue = "value" +) + +func TestSafeEventBusNilSafety(t *testing.T) { + // Create safe bus with nil underlying bus + safeBus := NewSafeEventBus(nil) + + // All operations should be no-ops and not panic + + // Test Publish - should not panic + event := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test") + safeBus.Publish(event) // Should not panic + + // Test Subscribe - should return empty values + subID, ch := safeBus.Subscribe(&eventsv1.ListenRequest{}) + if subID != "" { + t.Error("Expected empty subscription ID for nil bus") + } + + if ch != nil { + t.Error("Expected nil channel for nil bus") + } + + // Test Unsubscribe - should not panic + safeBus.Unsubscribe("any-id") // Should not panic + + // Test builder independently (no longer coupled to bus) + builder := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test") + if builder == nil { + t.Error("Expected builder to be created") + } + + builtEvent := builder.Build() + safeBus.Publish(builtEvent) // Should not panic with nil bus + + // Test all convenience methods - should not panic + safeBus.RecordPushed("cid", []string{"/test"}) + safeBus.RecordPulled("cid", []string{"/test"}) + safeBus.RecordDeleted("cid") + safeBus.RecordPublished("cid", []string{"/test"}) + safeBus.RecordUnpublished("cid") + safeBus.SyncCreated("sync-id", "url") + safeBus.SyncCompleted("sync-id", "url", 10) + safeBus.SyncFailed("sync-id", "url", "error") + safeBus.RecordSigned("cid", "signer") + + // Test SubscriberCount - should return 0 + count := safeBus.SubscriberCount() + if count != 0 { + t.Errorf("Expected subscriber count 0 for nil bus, got %d", count) + } + + // Test GetMetrics - should return zero metrics + metrics := safeBus.GetMetrics() + if metrics.PublishedTotal != 0 || metrics.DeliveredTotal != 0 { + t.Error("Expected zero metrics for nil bus") + } +} + +func TestSafeEventBusDelegation(t *testing.T) { + // Create safe bus with real underlying bus + bus := NewEventBus() + safeBus := NewSafeEventBus(bus) + + // Subscribe to verify events are actually published + req := &eventsv1.ListenRequest{} + + subID, eventCh := safeBus.Subscribe(req) + if subID == "" { + t.Error("Expected non-empty subscription ID") + } + + if eventCh == nil { + t.Error("Expected non-nil event channel") + } + + defer safeBus.Unsubscribe(subID) + + // Publish via safe bus + safeBus.RecordPushed("bafytest123", []string{"/skills/AI"}) + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + // Verify event was received + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED { + t.Errorf("Expected RECORD_PUSHED, got %v", event.Type) + } + + if event.ResourceID != "bafytest123" { + t.Errorf("Expected bafytest123, got %s", event.ResourceID) + } + default: + t.Error("Expected to receive event") + } + + // Test SubscriberCount delegation + count := safeBus.SubscriberCount() + if count != 1 { + t.Errorf("Expected subscriber count 1, got %d", count) + } + + // Test GetMetrics delegation + metrics := safeBus.GetMetrics() + if metrics.PublishedTotal == 0 { + t.Error("Expected non-zero published count") + } +} + +func TestSafeEventBusAllConvenienceMethods(t *testing.T) { + bus := NewEventBus() + safeBus := NewSafeEventBus(bus) + + // Subscribe to all events + req := &eventsv1.ListenRequest{} + + subID, eventCh := safeBus.Subscribe(req) + defer safeBus.Unsubscribe(subID) + + tests := []struct { + name string + publish func() + expected eventsv1.EventType + }{ + { + name: "RecordPushed", + publish: func() { safeBus.RecordPushed("cid1", []string{"/test"}) }, + expected: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + }, + { + name: "RecordPulled", + publish: func() { safeBus.RecordPulled("cid2", []string{"/test"}) }, + expected: eventsv1.EventType_EVENT_TYPE_RECORD_PULLED, + }, + { + name: "RecordDeleted", + publish: func() { safeBus.RecordDeleted("cid3") }, + expected: eventsv1.EventType_EVENT_TYPE_RECORD_DELETED, + }, + { + name: "RecordPublished", + publish: func() { safeBus.RecordPublished("cid4", []string{"/test"}) }, + expected: eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, + }, + { + name: "RecordUnpublished", + publish: func() { safeBus.RecordUnpublished("cid5") }, + expected: eventsv1.EventType_EVENT_TYPE_RECORD_UNPUBLISHED, + }, + { + name: "SyncCreated", + publish: func() { safeBus.SyncCreated("sync1", "url") }, + expected: eventsv1.EventType_EVENT_TYPE_SYNC_CREATED, + }, + { + name: "SyncCompleted", + publish: func() { safeBus.SyncCompleted("sync2", "url", 10) }, + expected: eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED, + }, + { + name: "SyncFailed", + publish: func() { safeBus.SyncFailed("sync3", "url", "error") }, + expected: eventsv1.EventType_EVENT_TYPE_SYNC_FAILED, + }, + { + name: "RecordSigned", + publish: func() { safeBus.RecordSigned("cid6", "signer") }, + expected: eventsv1.EventType_EVENT_TYPE_RECORD_SIGNED, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Publish event + tt.publish() + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + // Receive and verify + select { + case event := <-eventCh: + if event.Type != tt.expected { + t.Errorf("Expected type %v, got %v", tt.expected, event.Type) + } + default: + t.Error("Expected to receive event") + } + }) + } +} + +func TestSafeEventBusBuilderWithNilBus(t *testing.T) { + safeBus := NewSafeEventBus(nil) + + // Builder is now independent - no need for bus + builder := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test") + if builder == nil { + t.Fatal("Expected builder to be returned") + } + + // Build should work + event := builder.WithLabels([]string{"/test"}).Build() + if event == nil { + t.Error("Expected event to be built") + } + + // Publish should not panic (SafeEventBus handles nil) + safeBus.Publish(event) // Should be no-op +} + +func TestSafeEventBusBuilderWithRealBus(t *testing.T) { + bus := NewEventBus() + safeBus := NewSafeEventBus(bus) + + // Subscribe + req := &eventsv1.ListenRequest{} + + subID, eventCh := safeBus.Subscribe(req) + defer safeBus.Unsubscribe(subID) + + // Use builder independently, then publish explicitly + event := NewEventBuilder(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "bafytest"). + WithLabels([]string{"/skills/AI"}). + WithMetadata("key", testMetadataValue). + Build() + safeBus.Publish(event) + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + // Verify event received + select { + case receivedEvent := <-eventCh: + if receivedEvent.ResourceID != "bafytest" { + t.Errorf("Expected bafytest, got %s", receivedEvent.ResourceID) + } + + if len(receivedEvent.Labels) != 1 { + t.Errorf("Expected 1 label, got %d", len(receivedEvent.Labels)) + } + + if receivedEvent.Metadata["key"] != testMetadataValue { + t.Errorf("Expected metadata key=value, got %v", receivedEvent.Metadata) + } + default: + t.Error("Expected to receive event") + } +} + +func TestSafeEventBusUnsubscribe(t *testing.T) { + bus := NewEventBus() + safeBus := NewSafeEventBus(bus) + + // Subscribe + req := &eventsv1.ListenRequest{} + subID, eventCh := safeBus.Subscribe(req) + + // Verify subscriber exists + if safeBus.SubscriberCount() != 1 { + t.Error("Expected 1 subscriber") + } + + // Unsubscribe + safeBus.Unsubscribe(subID) + + // Verify subscriber removed + if safeBus.SubscriberCount() != 0 { + t.Error("Expected 0 subscribers") + } + + // Channel should be closed + _, ok := <-eventCh + if ok { + t.Error("Expected channel to be closed") + } +} + +func TestSafeEventBusPublishDirect(t *testing.T) { + bus := NewEventBus() + safeBus := NewSafeEventBus(bus) + + // Subscribe + req := &eventsv1.ListenRequest{} + + subID, eventCh := safeBus.Subscribe(req) + defer safeBus.Unsubscribe(subID) + + // Create and publish event directly + event := NewEvent(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, "test-cid") + event.Labels = []string{"/test"} + safeBus.Publish(event) + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + // Verify event received + select { + case received := <-eventCh: + if received.ResourceID != "test-cid" { + t.Errorf("Expected test-cid, got %s", received.ResourceID) + } + default: + t.Error("Expected to receive event") + } +} diff --git a/server/events/testing.go b/server/events/testing.go index 780d8d60b..5c545f2f2 100644 --- a/server/events/testing.go +++ b/server/events/testing.go @@ -1,239 +1,239 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package events - -import ( - "fmt" - "sync" - "time" - - eventsv1 "github.com/agntcy/dir/api/events/v1" -) - -// Test constants used across all event test files. -const ( - // Test CIDs. - TestCID123 = "bafytest123" - TestCID456 = "bafytest456" - - // Test identifiers. - TestEventID = "test-event-id" - - // Test timing. - testWaitPollingInterval = 10 * time.Millisecond -) - -// MockEventBus records all published events for testing. -// This is useful for verifying that services emit the correct events. -// -// Example usage: -// -// mock := events.NewMockEventBus() -// service := NewMyService(mock) -// -// service.DoSomething() -// -// // Assert event was published -// mock.AssertEventPublished(t, eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED) -// -// // Or check details -// events := mock.GetEvents() -// assert.Len(t, events, 1) -// assert.Equal(t, "bafyxxx", events[0].ResourceID) -type MockEventBus struct { - mu sync.Mutex - events []*Event -} - -// NewMockEventBus creates a new mock event bus. -func NewMockEventBus() *MockEventBus { - return &MockEventBus{ - events: make([]*Event, 0), - } -} - -// Publish records the event for later inspection. -func (m *MockEventBus) Publish(event *Event) { - m.mu.Lock() - defer m.mu.Unlock() - - m.events = append(m.events, event) -} - -// GetEvents returns all recorded events (creates a copy). -func (m *MockEventBus) GetEvents() []*Event { - m.mu.Lock() - defer m.mu.Unlock() - - events := make([]*Event, len(m.events)) - copy(events, m.events) - - return events -} - -// GetEventsByType returns events of a specific type. -func (m *MockEventBus) GetEventsByType(eventType eventsv1.EventType) []*Event { - m.mu.Lock() - defer m.mu.Unlock() - - var filtered []*Event - - for _, e := range m.events { - if e.Type == eventType { - filtered = append(filtered, e) - } - } - - return filtered -} - -// GetEventByResourceID returns the first event matching the resource ID. -// Returns nil if not found. -func (m *MockEventBus) GetEventByResourceID(resourceID string) *Event { - m.mu.Lock() - defer m.mu.Unlock() - - for _, e := range m.events { - if e.ResourceID == resourceID { - return e - } - } - - return nil -} - -// WaitForEvent waits for an event matching the filter (with timeout). -// Returns the event and true if found, nil and false if timeout. -// -// This is useful for async operations where events may be published -// with a slight delay. -// -// Example: -// -// event, ok := mock.WaitForEvent( -// events.EventTypeFilter(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED), -// time.Second, -// ) -// if !ok { -// t.Fatal("Timeout waiting for event") -// } -func (m *MockEventBus) WaitForEvent(filter Filter, timeout time.Duration) (*Event, bool) { - deadline := time.Now().Add(timeout) - - for time.Now().Before(deadline) { - m.mu.Lock() - - for _, e := range m.events { - if filter(e) { - m.mu.Unlock() - - return e, true - } - } - - m.mu.Unlock() - - time.Sleep(testWaitPollingInterval) - } - - return nil, false -} - -// Reset clears all recorded events. -func (m *MockEventBus) Reset() { - m.mu.Lock() - defer m.mu.Unlock() - - m.events = make([]*Event, 0) -} - -// Count returns the number of recorded events. -func (m *MockEventBus) Count() int { - m.mu.Lock() - defer m.mu.Unlock() - - return len(m.events) -} - -// AssertEventPublished checks if an event of the given type was published. -// Returns true if found, false otherwise. Reports error via TestingT if not found. -func (m *MockEventBus) AssertEventPublished(t TestingT, eventType eventsv1.EventType) bool { - m.mu.Lock() - defer m.mu.Unlock() - - for _, e := range m.events { - if e.Type == eventType { - return true - } - } - - t.Errorf("Expected event type %v was not published. Published events: %s", eventType, m.formatEvents()) - - return false -} - -// AssertEventWithResourceID checks if an event with the given resource ID was published. -// Returns true if found, false otherwise. Reports error via TestingT if not found. -func (m *MockEventBus) AssertEventWithResourceID(t TestingT, resourceID string) bool { - m.mu.Lock() - defer m.mu.Unlock() - - for _, e := range m.events { - if e.ResourceID == resourceID { - return true - } - } - - t.Errorf("Expected event with resource_id %q was not published", resourceID) - - return false -} - -// AssertEventCount checks if the expected number of events were published. -// Reports error via TestingT if count doesn't match. -func (m *MockEventBus) AssertEventCount(t TestingT, expected int) bool { - m.mu.Lock() - defer m.mu.Unlock() - - if len(m.events) != expected { - t.Errorf("Expected %d events, got %d. Events: %s", expected, len(m.events), m.formatEvents()) - - return false - } - - return true -} - -// AssertNoEvents checks that no events were published. -// Reports error via TestingT if any events exist. -func (m *MockEventBus) AssertNoEvents(t TestingT) bool { - return m.AssertEventCount(t, 0) -} - -// formatEvents creates a human-readable string of all events (must hold lock). -func (m *MockEventBus) formatEvents() string { - if len(m.events) == 0 { - return "[]" - } - - result := "[" - - for i, e := range m.events { - if i > 0 { - result += ", " - } - - result += fmt.Sprintf("{Type: %v, ResourceID: %s}", e.Type, e.ResourceID) - } - - result += "]" - - return result -} - -// TestingT is a minimal testing interface for assertions. -// This allows the mock to be used with any testing framework. -type TestingT interface { - Errorf(format string, args ...interface{}) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "fmt" + "sync" + "time" + + eventsv1 "github.com/agntcy/dir/api/events/v1" +) + +// Test constants used across all event test files. +const ( + // Test CIDs. + TestCID123 = "bafytest123" + TestCID456 = "bafytest456" + + // Test identifiers. + TestEventID = "test-event-id" + + // Test timing. + testWaitPollingInterval = 10 * time.Millisecond +) + +// MockEventBus records all published events for testing. +// This is useful for verifying that services emit the correct events. +// +// Example usage: +// +// mock := events.NewMockEventBus() +// service := NewMyService(mock) +// +// service.DoSomething() +// +// // Assert event was published +// mock.AssertEventPublished(t, eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED) +// +// // Or check details +// events := mock.GetEvents() +// assert.Len(t, events, 1) +// assert.Equal(t, "bafyxxx", events[0].ResourceID) +type MockEventBus struct { + mu sync.Mutex + events []*Event +} + +// NewMockEventBus creates a new mock event bus. +func NewMockEventBus() *MockEventBus { + return &MockEventBus{ + events: make([]*Event, 0), + } +} + +// Publish records the event for later inspection. +func (m *MockEventBus) Publish(event *Event) { + m.mu.Lock() + defer m.mu.Unlock() + + m.events = append(m.events, event) +} + +// GetEvents returns all recorded events (creates a copy). +func (m *MockEventBus) GetEvents() []*Event { + m.mu.Lock() + defer m.mu.Unlock() + + events := make([]*Event, len(m.events)) + copy(events, m.events) + + return events +} + +// GetEventsByType returns events of a specific type. +func (m *MockEventBus) GetEventsByType(eventType eventsv1.EventType) []*Event { + m.mu.Lock() + defer m.mu.Unlock() + + var filtered []*Event + + for _, e := range m.events { + if e.Type == eventType { + filtered = append(filtered, e) + } + } + + return filtered +} + +// GetEventByResourceID returns the first event matching the resource ID. +// Returns nil if not found. +func (m *MockEventBus) GetEventByResourceID(resourceID string) *Event { + m.mu.Lock() + defer m.mu.Unlock() + + for _, e := range m.events { + if e.ResourceID == resourceID { + return e + } + } + + return nil +} + +// WaitForEvent waits for an event matching the filter (with timeout). +// Returns the event and true if found, nil and false if timeout. +// +// This is useful for async operations where events may be published +// with a slight delay. +// +// Example: +// +// event, ok := mock.WaitForEvent( +// events.EventTypeFilter(eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED), +// time.Second, +// ) +// if !ok { +// t.Fatal("Timeout waiting for event") +// } +func (m *MockEventBus) WaitForEvent(filter Filter, timeout time.Duration) (*Event, bool) { + deadline := time.Now().Add(timeout) + + for time.Now().Before(deadline) { + m.mu.Lock() + + for _, e := range m.events { + if filter(e) { + m.mu.Unlock() + + return e, true + } + } + + m.mu.Unlock() + + time.Sleep(testWaitPollingInterval) + } + + return nil, false +} + +// Reset clears all recorded events. +func (m *MockEventBus) Reset() { + m.mu.Lock() + defer m.mu.Unlock() + + m.events = make([]*Event, 0) +} + +// Count returns the number of recorded events. +func (m *MockEventBus) Count() int { + m.mu.Lock() + defer m.mu.Unlock() + + return len(m.events) +} + +// AssertEventPublished checks if an event of the given type was published. +// Returns true if found, false otherwise. Reports error via TestingT if not found. +func (m *MockEventBus) AssertEventPublished(t TestingT, eventType eventsv1.EventType) bool { + m.mu.Lock() + defer m.mu.Unlock() + + for _, e := range m.events { + if e.Type == eventType { + return true + } + } + + t.Errorf("Expected event type %v was not published. Published events: %s", eventType, m.formatEvents()) + + return false +} + +// AssertEventWithResourceID checks if an event with the given resource ID was published. +// Returns true if found, false otherwise. Reports error via TestingT if not found. +func (m *MockEventBus) AssertEventWithResourceID(t TestingT, resourceID string) bool { + m.mu.Lock() + defer m.mu.Unlock() + + for _, e := range m.events { + if e.ResourceID == resourceID { + return true + } + } + + t.Errorf("Expected event with resource_id %q was not published", resourceID) + + return false +} + +// AssertEventCount checks if the expected number of events were published. +// Reports error via TestingT if count doesn't match. +func (m *MockEventBus) AssertEventCount(t TestingT, expected int) bool { + m.mu.Lock() + defer m.mu.Unlock() + + if len(m.events) != expected { + t.Errorf("Expected %d events, got %d. Events: %s", expected, len(m.events), m.formatEvents()) + + return false + } + + return true +} + +// AssertNoEvents checks that no events were published. +// Reports error via TestingT if any events exist. +func (m *MockEventBus) AssertNoEvents(t TestingT) bool { + return m.AssertEventCount(t, 0) +} + +// formatEvents creates a human-readable string of all events (must hold lock). +func (m *MockEventBus) formatEvents() string { + if len(m.events) == 0 { + return "[]" + } + + result := "[" + + for i, e := range m.events { + if i > 0 { + result += ", " + } + + result += fmt.Sprintf("{Type: %v, ResourceID: %s}", e.Type, e.ResourceID) + } + + result += "]" + + return result +} + +// TestingT is a minimal testing interface for assertions. +// This allows the mock to be used with any testing framework. +type TestingT interface { + Errorf(format string, args ...interface{}) +} diff --git a/server/events/types.go b/server/events/types.go index 201a7260c..e86ba29d6 100644 --- a/server/events/types.go +++ b/server/events/types.go @@ -1,124 +1,124 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Package events provides a lightweight, real-time event streaming system -// that enables external clients to subscribe to system events via gRPC. -// -// The event system captures events from all major system operations (storage, -// routing, synchronization, signing) and delivers them to interested subscribers -// with configurable filtering. -// -// Key characteristics: -// - Simple: In-memory event bus with no external dependencies -// - Real-time: Events delivered from subscription time forward (no history/replay) -// - Filtered: Client-side control over event types, labels, and CIDs -// - Type-safe: Protocol buffer enums for all event types -// - Observable: Built-in metrics and logging for monitoring -// -// Usage: -// -// // Create event bus -// := events.NewEventBus() -// -// // Publish events -// bus.RecordPushed("bafyxxx", []string{"/skills/AI"}) -// -// // Subscribe to events -// req := &eventsv1.ListenRequest{ -// EventTypes: []eventsv1.EventType{eventsv1.EVENT_TYPE_RECORD_PUSHED}, -// } -// subID, eventCh := bus.Subscribe(req) -// defer bus.Unsubscribe(subID) -// -// // Receive events -// for event := range eventCh { -// fmt.Printf("Event: %s\n", event.Type) -// } -package events - -import ( - "errors" - "time" - - eventsv1 "github.com/agntcy/dir/api/events/v1" - "github.com/google/uuid" - "google.golang.org/protobuf/types/known/timestamppb" -) - -// Event represents a system event that occurred. -// It is the internal representation used by the event bus. -type Event struct { - // ID is a unique identifier for this event (generated automatically) - ID string - - // Type is the kind of event that occurred - Type eventsv1.EventType - - // Timestamp is when the event occurred (generated automatically) - Timestamp time.Time - - // ResourceID is the identifier of the resource this event is about - // (e.g., CID for records, sync_id for syncs) - ResourceID string - - // Labels are optional labels associated with the record (for record events) - Labels []string - - // Metadata contains optional additional context for the event. - // This provides flexibility for event-specific data. - Metadata map[string]string -} - -// NewEvent creates a new event with auto-generated ID and timestamp. -// -// Parameters: -// - eventType: The type of event (e.g., EVENT_TYPE_RECORD_PUSHED) -// - resourceID: The resource identifier (e.g., CID, sync_id) -// -// Returns a new Event with ID and Timestamp populated. -func NewEvent(eventType eventsv1.EventType, resourceID string) *Event { - return &Event{ - ID: uuid.New().String(), - Type: eventType, - Timestamp: time.Now(), - ResourceID: resourceID, - Metadata: make(map[string]string), - } -} - -// ToProto converts the internal Event to its protobuf representation. -// This is used when streaming events to gRPC clients. -func (e *Event) ToProto() *eventsv1.Event { - return &eventsv1.Event{ - Id: e.ID, - Type: e.Type, - Timestamp: timestamppb.New(e.Timestamp), - ResourceId: e.ResourceID, - Labels: e.Labels, - Metadata: e.Metadata, - } -} - -// Validate checks if the event is well-formed and safe to process. -// This prevents malformed events from being published. -// -// Returns an error if the event is invalid, nil otherwise. -func (e *Event) Validate() error { - if e.ID == "" { - return errors.New("event ID is required") - } - - if e.Type == eventsv1.EventType_EVENT_TYPE_UNSPECIFIED { - return errors.New("event type is required") - } - - if e.ResourceID == "" { - return errors.New("resource ID is required") - } - - if e.Timestamp.IsZero() { - return errors.New("timestamp is required") - } - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Package events provides a lightweight, real-time event streaming system +// that enables external clients to subscribe to system events via gRPC. +// +// The event system captures events from all major system operations (storage, +// routing, synchronization, signing) and delivers them to interested subscribers +// with configurable filtering. +// +// Key characteristics: +// - Simple: In-memory event bus with no external dependencies +// - Real-time: Events delivered from subscription time forward (no history/replay) +// - Filtered: Client-side control over event types, labels, and CIDs +// - Type-safe: Protocol buffer enums for all event types +// - Observable: Built-in metrics and logging for monitoring +// +// Usage: +// +// // Create event bus +// := events.NewEventBus() +// +// // Publish events +// bus.RecordPushed("bafyxxx", []string{"/skills/AI"}) +// +// // Subscribe to events +// req := &eventsv1.ListenRequest{ +// EventTypes: []eventsv1.EventType{eventsv1.EVENT_TYPE_RECORD_PUSHED}, +// } +// subID, eventCh := bus.Subscribe(req) +// defer bus.Unsubscribe(subID) +// +// // Receive events +// for event := range eventCh { +// fmt.Printf("Event: %s\n", event.Type) +// } +package events + +import ( + "errors" + "time" + + eventsv1 "github.com/agntcy/dir/api/events/v1" + "github.com/google/uuid" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// Event represents a system event that occurred. +// It is the internal representation used by the event bus. +type Event struct { + // ID is a unique identifier for this event (generated automatically) + ID string + + // Type is the kind of event that occurred + Type eventsv1.EventType + + // Timestamp is when the event occurred (generated automatically) + Timestamp time.Time + + // ResourceID is the identifier of the resource this event is about + // (e.g., CID for records, sync_id for syncs) + ResourceID string + + // Labels are optional labels associated with the record (for record events) + Labels []string + + // Metadata contains optional additional context for the event. + // This provides flexibility for event-specific data. + Metadata map[string]string +} + +// NewEvent creates a new event with auto-generated ID and timestamp. +// +// Parameters: +// - eventType: The type of event (e.g., EVENT_TYPE_RECORD_PUSHED) +// - resourceID: The resource identifier (e.g., CID, sync_id) +// +// Returns a new Event with ID and Timestamp populated. +func NewEvent(eventType eventsv1.EventType, resourceID string) *Event { + return &Event{ + ID: uuid.New().String(), + Type: eventType, + Timestamp: time.Now(), + ResourceID: resourceID, + Metadata: make(map[string]string), + } +} + +// ToProto converts the internal Event to its protobuf representation. +// This is used when streaming events to gRPC clients. +func (e *Event) ToProto() *eventsv1.Event { + return &eventsv1.Event{ + Id: e.ID, + Type: e.Type, + Timestamp: timestamppb.New(e.Timestamp), + ResourceId: e.ResourceID, + Labels: e.Labels, + Metadata: e.Metadata, + } +} + +// Validate checks if the event is well-formed and safe to process. +// This prevents malformed events from being published. +// +// Returns an error if the event is invalid, nil otherwise. +func (e *Event) Validate() error { + if e.ID == "" { + return errors.New("event ID is required") + } + + if e.Type == eventsv1.EventType_EVENT_TYPE_UNSPECIFIED { + return errors.New("event type is required") + } + + if e.ResourceID == "" { + return errors.New("resource ID is required") + } + + if e.Timestamp.IsZero() { + return errors.New("timestamp is required") + } + + return nil +} diff --git a/server/events/types_test.go b/server/events/types_test.go index af53bf10a..e48b8caee 100644 --- a/server/events/types_test.go +++ b/server/events/types_test.go @@ -1,186 +1,186 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package events - -import ( - "testing" - "time" - - eventsv1 "github.com/agntcy/dir/api/events/v1" -) - -func TestNewEvent(t *testing.T) { - eventType := eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED - resourceID := "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi" - - event := NewEvent(eventType, resourceID) - - // Check that ID was generated - if event.ID == "" { - t.Error("Expected event ID to be generated, got empty string") - } - - // Check that type was set - if event.Type != eventType { - t.Errorf("Expected event type %v, got %v", eventType, event.Type) - } - - // Check that resource ID was set - if event.ResourceID != resourceID { - t.Errorf("Expected resource ID %s, got %s", resourceID, event.ResourceID) - } - - // Check that timestamp was set - if event.Timestamp.IsZero() { - t.Error("Expected timestamp to be set, got zero time") - } - - // Check that timestamp is recent (within 1 second) - now := time.Now() - - diff := now.Sub(event.Timestamp) - if diff < 0 || diff > time.Second { - t.Errorf("Expected timestamp to be recent, got %v (diff: %v)", event.Timestamp, diff) - } - - // Check that metadata map was initialized - if event.Metadata == nil { - t.Error("Expected metadata map to be initialized, got nil") - } -} - -func TestEventToProto(t *testing.T) { - event := &Event{ - ID: "test-id-123", - Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, - Timestamp: time.Date(2025, 1, 15, 12, 30, 0, 0, time.UTC), - ResourceID: "bafytest123", - Labels: []string{"/skills/AI", "/domains/research"}, - Metadata: map[string]string{ - "key1": "value1", - "key2": "value2", - }, - } - - protoEvent := event.ToProto() - - // Check all fields were converted correctly - if protoEvent.GetId() != event.ID { - t.Errorf("Expected ID %s, got %s", event.ID, protoEvent.GetId()) - } - - if protoEvent.GetType() != event.Type { - t.Errorf("Expected type %v, got %v", event.Type, protoEvent.GetType()) - } - - if protoEvent.GetResourceId() != event.ResourceID { - t.Errorf("Expected resource ID %s, got %s", event.ResourceID, protoEvent.GetResourceId()) - } - - if len(protoEvent.GetLabels()) != len(event.Labels) { - t.Errorf("Expected %d labels, got %d", len(event.Labels), len(protoEvent.GetLabels())) - } - - if len(protoEvent.GetMetadata()) != len(event.Metadata) { - t.Errorf("Expected %d metadata entries, got %d", len(event.Metadata), len(protoEvent.GetMetadata())) - } - - // Check timestamp conversion - if protoEvent.GetTimestamp().AsTime().Unix() != event.Timestamp.Unix() { - t.Errorf("Expected timestamp %v, got %v", event.Timestamp, protoEvent.GetTimestamp().AsTime()) - } -} - -func TestEventValidate(t *testing.T) { - tests := []struct { - name string - event *Event - wantError bool - errorMsg string - }{ - { - name: "valid event", - event: &Event{ - ID: "valid-id", - Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - Timestamp: time.Now(), - ResourceID: "bafytest123", - }, - wantError: false, - }, - { - name: "missing ID", - event: &Event{ - ID: "", - Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - Timestamp: time.Now(), - ResourceID: "bafytest123", - }, - wantError: true, - errorMsg: "event ID is required", - }, - { - name: "unspecified type", - event: &Event{ - ID: "test-id", - Type: eventsv1.EventType_EVENT_TYPE_UNSPECIFIED, - Timestamp: time.Now(), - ResourceID: "bafytest123", - }, - wantError: true, - errorMsg: "event type is required", - }, - { - name: "missing resource ID", - event: &Event{ - ID: "test-id", - Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - Timestamp: time.Now(), - ResourceID: "", - }, - wantError: true, - errorMsg: "resource ID is required", - }, - { - name: "zero timestamp", - event: &Event{ - ID: "test-id", - Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, - Timestamp: time.Time{}, - ResourceID: "bafytest123", - }, - wantError: true, - errorMsg: "timestamp is required", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.event.Validate() - - if tt.wantError { - if err == nil { - t.Errorf("Expected error, got nil") - } else if err.Error() != tt.errorMsg { - t.Errorf("Expected error message %q, got %q", tt.errorMsg, err.Error()) - } - } else { - if err != nil { - t.Errorf("Expected no error, got %v", err) - } - } - }) - } -} - -func TestEventMetadataInitialized(t *testing.T) { - event := NewEvent(eventsv1.EventType_EVENT_TYPE_SYNC_CREATED, "sync-123") - - // Should be able to add metadata without panic - event.Metadata["key"] = "value" - - if event.Metadata["key"] != "value" { - t.Errorf("Expected metadata key to be set, got %v", event.Metadata) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package events + +import ( + "testing" + "time" + + eventsv1 "github.com/agntcy/dir/api/events/v1" +) + +func TestNewEvent(t *testing.T) { + eventType := eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED + resourceID := "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi" + + event := NewEvent(eventType, resourceID) + + // Check that ID was generated + if event.ID == "" { + t.Error("Expected event ID to be generated, got empty string") + } + + // Check that type was set + if event.Type != eventType { + t.Errorf("Expected event type %v, got %v", eventType, event.Type) + } + + // Check that resource ID was set + if event.ResourceID != resourceID { + t.Errorf("Expected resource ID %s, got %s", resourceID, event.ResourceID) + } + + // Check that timestamp was set + if event.Timestamp.IsZero() { + t.Error("Expected timestamp to be set, got zero time") + } + + // Check that timestamp is recent (within 1 second) + now := time.Now() + + diff := now.Sub(event.Timestamp) + if diff < 0 || diff > time.Second { + t.Errorf("Expected timestamp to be recent, got %v (diff: %v)", event.Timestamp, diff) + } + + // Check that metadata map was initialized + if event.Metadata == nil { + t.Error("Expected metadata map to be initialized, got nil") + } +} + +func TestEventToProto(t *testing.T) { + event := &Event{ + ID: "test-id-123", + Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED, + Timestamp: time.Date(2025, 1, 15, 12, 30, 0, 0, time.UTC), + ResourceID: "bafytest123", + Labels: []string{"/skills/AI", "/domains/research"}, + Metadata: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + } + + protoEvent := event.ToProto() + + // Check all fields were converted correctly + if protoEvent.GetId() != event.ID { + t.Errorf("Expected ID %s, got %s", event.ID, protoEvent.GetId()) + } + + if protoEvent.GetType() != event.Type { + t.Errorf("Expected type %v, got %v", event.Type, protoEvent.GetType()) + } + + if protoEvent.GetResourceId() != event.ResourceID { + t.Errorf("Expected resource ID %s, got %s", event.ResourceID, protoEvent.GetResourceId()) + } + + if len(protoEvent.GetLabels()) != len(event.Labels) { + t.Errorf("Expected %d labels, got %d", len(event.Labels), len(protoEvent.GetLabels())) + } + + if len(protoEvent.GetMetadata()) != len(event.Metadata) { + t.Errorf("Expected %d metadata entries, got %d", len(event.Metadata), len(protoEvent.GetMetadata())) + } + + // Check timestamp conversion + if protoEvent.GetTimestamp().AsTime().Unix() != event.Timestamp.Unix() { + t.Errorf("Expected timestamp %v, got %v", event.Timestamp, protoEvent.GetTimestamp().AsTime()) + } +} + +func TestEventValidate(t *testing.T) { + tests := []struct { + name string + event *Event + wantError bool + errorMsg string + }{ + { + name: "valid event", + event: &Event{ + ID: "valid-id", + Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + Timestamp: time.Now(), + ResourceID: "bafytest123", + }, + wantError: false, + }, + { + name: "missing ID", + event: &Event{ + ID: "", + Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + Timestamp: time.Now(), + ResourceID: "bafytest123", + }, + wantError: true, + errorMsg: "event ID is required", + }, + { + name: "unspecified type", + event: &Event{ + ID: "test-id", + Type: eventsv1.EventType_EVENT_TYPE_UNSPECIFIED, + Timestamp: time.Now(), + ResourceID: "bafytest123", + }, + wantError: true, + errorMsg: "event type is required", + }, + { + name: "missing resource ID", + event: &Event{ + ID: "test-id", + Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + Timestamp: time.Now(), + ResourceID: "", + }, + wantError: true, + errorMsg: "resource ID is required", + }, + { + name: "zero timestamp", + event: &Event{ + ID: "test-id", + Type: eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED, + Timestamp: time.Time{}, + ResourceID: "bafytest123", + }, + wantError: true, + errorMsg: "timestamp is required", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.event.Validate() + + if tt.wantError { + if err == nil { + t.Errorf("Expected error, got nil") + } else if err.Error() != tt.errorMsg { + t.Errorf("Expected error message %q, got %q", tt.errorMsg, err.Error()) + } + } else { + if err != nil { + t.Errorf("Expected no error, got %v", err) + } + } + }) + } +} + +func TestEventMetadataInitialized(t *testing.T) { + event := NewEvent(eventsv1.EventType_EVENT_TYPE_SYNC_CREATED, "sync-123") + + // Should be able to add metadata without panic + event.Metadata["key"] = "value" + + if event.Metadata["key"] != "value" { + t.Errorf("Expected metadata key to be set, got %v", event.Metadata) + } +} diff --git a/server/go.mod b/server/go.mod index f36461410..1adf512e3 100644 --- a/server/go.mod +++ b/server/go.mod @@ -1,271 +1,271 @@ -module github.com/agntcy/dir/server - -go 1.25.2 - -replace ( - github.com/agntcy/dir/api => ../api - github.com/agntcy/dir/utils => ../utils -) - -require ( - buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 - github.com/agntcy/dir/api v0.6.0 - github.com/agntcy/dir/utils v0.6.0 - github.com/agntcy/oasf-sdk/pkg v0.0.14 - github.com/casbin/casbin/v2 v2.120.0 - github.com/glebarez/sqlite v1.11.0 - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/ipfs/go-datastore v0.8.2 - github.com/libp2p/go-libp2p v0.44.0 - github.com/libp2p/go-libp2p-gorpc v0.6.0 - github.com/libp2p/go-libp2p-kad-dht v0.30.2 - github.com/libp2p/go-libp2p-pubsub v0.15.0 - github.com/libp2p/go-libp2p-record v0.3.1 - github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c - github.com/opencontainers/image-spec v1.1.1 - github.com/spf13/cobra v1.10.2 - github.com/spf13/viper v1.21.0 - github.com/spiffe/go-spiffe/v2 v2.6.0 - github.com/stretchr/testify v1.11.1 - google.golang.org/grpc v1.77.0 - google.golang.org/protobuf v1.36.10 - gorm.io/gorm v1.30.0 - oras.land/oras-go/v2 v2.6.0 - zotregistry.dev/zot/v2 v2.1.11 -) - -require ( - github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect - github.com/go-chi/chi/v5 v5.2.3 // indirect - github.com/go-openapi/swag/cmdutils v0.25.4 // indirect - github.com/go-openapi/swag/conv v0.25.4 // indirect - github.com/go-openapi/swag/fileutils v0.25.4 // indirect - github.com/go-openapi/swag/jsonname v0.25.4 // indirect - github.com/go-openapi/swag/jsonutils v0.25.4 // indirect - github.com/go-openapi/swag/loading v0.25.4 // indirect - github.com/go-openapi/swag/mangling v0.25.4 // indirect - github.com/go-openapi/swag/netutils v0.25.4 // indirect - github.com/go-openapi/swag/stringutils v0.25.4 // indirect - github.com/go-openapi/swag/typeutils v0.25.4 // indirect - github.com/go-openapi/swag/yamlutils v0.25.4 // indirect - github.com/moby/term v0.5.2 // indirect - github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/sigstore/cosign/v3 v3.0.3 // indirect - github.com/sigstore/rekor-tiles/v2 v2.0.1 // indirect - github.com/sigstore/timestamp-authority/v2 v2.0.3 // indirect - github.com/tiendc/go-deepcopy v1.7.1 // indirect - go.yaml.in/yaml/v2 v2.4.3 // indirect - go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect -) - -require ( - buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 // indirect - github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect - github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/OneOfOne/xxhash v1.2.8 // indirect - github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/benbjohnson/clock v1.3.5 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/blang/semver v3.5.1+incompatible // indirect - github.com/bmatcuk/doublestar/v4 v4.9.1 // indirect - github.com/casbin/govaluate v1.3.0 // indirect - github.com/cenkalti/backoff/v5 v5.0.3 // indirect - github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect - github.com/coreos/go-oidc/v3 v3.17.0 // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect - github.com/dgraph-io/badger v1.6.2 // indirect - github.com/dgraph-io/ristretto v0.0.2 // indirect - github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect - github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect - github.com/distribution/distribution/v3 v3.0.0 // indirect - github.com/distribution/reference v0.6.0 // indirect - github.com/docker/cli v29.0.3+incompatible // indirect - github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker-credential-helpers v0.9.4 // indirect - github.com/dustin/go-humanize v1.0.1 // indirect - github.com/flynn/noise v1.1.0 // indirect - github.com/francoispqt/gojay v1.2.13 // indirect - github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/glebarez/go-sqlite v1.21.2 // indirect - github.com/go-jose/go-jose/v4 v4.1.3 // indirect - github.com/go-logr/logr v1.4.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.24.1 // indirect - github.com/go-openapi/errors v0.22.4 // indirect - github.com/go-openapi/jsonpointer v0.22.1 // indirect - github.com/go-openapi/jsonreference v0.21.3 // indirect - github.com/go-openapi/loads v0.23.2 // indirect - github.com/go-openapi/runtime v0.29.2 // indirect - github.com/go-openapi/spec v0.22.1 // indirect - github.com/go-openapi/strfmt v0.25.0 // indirect - github.com/go-openapi/swag v0.25.4 // indirect - github.com/go-openapi/validate v0.25.1 // indirect - github.com/go-viper/mapstructure/v2 v2.4.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/certificate-transparency-go v1.3.2 // indirect - github.com/google/go-containerregistry v0.20.7 // indirect - github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a // indirect - github.com/google/uuid v1.6.0 - github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect - github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-retryablehttp v0.7.8 // indirect - github.com/hashicorp/golang-lru v1.0.2 // indirect - github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect - github.com/huin/goupnp v1.3.0 // indirect - github.com/in-toto/attestation v1.1.2 // indirect - github.com/in-toto/in-toto-golang v0.9.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/ipfs/boxo v0.29.1 // indirect - github.com/ipfs/go-cid v0.5.0 - github.com/ipfs/go-ds-badger v0.3.4 - github.com/ipfs/go-log v1.0.5 // indirect - github.com/ipfs/go-log/v2 v2.5.1 // indirect - github.com/ipld/go-ipld-prime v0.21.0 // indirect - github.com/jackpal/go-nat-pmp v1.0.2 // indirect - github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect - github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect - github.com/jinzhu/inflection v1.0.0 // indirect - github.com/jinzhu/now v1.1.5 // indirect - github.com/klauspost/compress v1.18.1 // indirect - github.com/klauspost/cpuid/v2 v2.2.10 // indirect - github.com/koron/go-ssdp v0.0.6 // indirect - github.com/letsencrypt/boulder v0.20251110.0 // indirect - github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/libp2p/go-cidranger v1.1.0 // indirect - github.com/libp2p/go-flow-metrics v0.2.0 // indirect - github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect - github.com/libp2p/go-libp2p-kbucket v0.6.5 // indirect - github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect - github.com/libp2p/go-msgio v0.3.0 // indirect - github.com/libp2p/go-netroute v0.3.0 // indirect - github.com/libp2p/go-reuseport v0.4.0 // indirect - github.com/libp2p/go-yamux/v5 v5.0.1 // indirect - github.com/libp2p/zeroconf/v2 v2.2.0 // indirect - github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/miekg/dns v1.1.66 // indirect - github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect - github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect - github.com/minio/sha256-simd v1.0.1 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mr-tron/base58 v1.2.0 // indirect - github.com/multiformats/go-base32 v0.1.0 // indirect - github.com/multiformats/go-base36 v0.2.0 // indirect - github.com/multiformats/go-multiaddr v0.16.0 - github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect - github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multibase v0.2.0 // indirect - github.com/multiformats/go-multicodec v0.9.1 // indirect - github.com/multiformats/go-multihash v0.2.3 - github.com/multiformats/go-multistream v0.6.1 // indirect - github.com/multiformats/go-varint v0.0.7 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/ncruces/go-strftime v1.0.0 // indirect - github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect - github.com/oklog/ulid v1.3.1 // indirect - github.com/opencontainers/distribution-spec/specs-go v0.0.0-20250123160558-a139cc423184 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pelletier/go-toml/v2 v2.2.4 // indirect - github.com/pion/datachannel v1.5.10 // indirect - github.com/pion/dtls/v2 v2.2.12 // indirect - github.com/pion/dtls/v3 v3.0.6 // indirect - github.com/pion/ice/v4 v4.0.10 // indirect - github.com/pion/interceptor v0.1.40 // indirect - github.com/pion/logging v0.2.3 // indirect - github.com/pion/mdns/v2 v2.0.7 // indirect - github.com/pion/randutil v0.1.0 // indirect - github.com/pion/rtcp v1.2.15 // indirect - github.com/pion/rtp v1.8.19 // indirect - github.com/pion/sctp v1.8.39 // indirect - github.com/pion/sdp/v3 v3.0.13 // indirect - github.com/pion/srtp/v3 v3.0.6 // indirect - github.com/pion/stun v0.6.1 // indirect - github.com/pion/stun/v3 v3.0.0 // indirect - github.com/pion/transport/v2 v2.2.10 // indirect - github.com/pion/transport/v3 v3.0.7 // indirect - github.com/pion/turn/v4 v4.0.2 // indirect - github.com/pion/webrtc/v4 v4.1.2 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/polydawn/refmt v0.89.0 // indirect - github.com/prometheus/client_golang v1.23.2 - github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.67.4 // indirect - github.com/prometheus/procfs v0.19.2 // indirect - github.com/quic-go/qpack v0.5.1 // indirect - github.com/quic-go/quic-go v0.55.0 // indirect - github.com/quic-go/webtransport-go v0.9.0 // indirect - github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect - github.com/sagikazarmark/locafero v0.11.0 // indirect - github.com/sassoftware/relic v7.2.1+incompatible // indirect - github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect - github.com/shibumi/go-pathspec v1.3.0 // indirect - github.com/sigstore/protobuf-specs v0.5.0 // indirect - github.com/sigstore/rekor v1.4.3 // indirect - github.com/sigstore/sigstore v1.10.0 // indirect - github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 // indirect - github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect - github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect - github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.15.0 // indirect - github.com/spf13/cast v1.10.0 // indirect - github.com/spf13/pflag v1.0.10 // indirect - github.com/stretchr/objx v0.5.2 // indirect - github.com/subosito/gotenv v1.6.0 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect - github.com/theupdateframework/go-tuf v0.7.0 // indirect - github.com/theupdateframework/go-tuf/v2 v2.3.0 // indirect - github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect - github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect - github.com/transparency-dev/merkle v0.0.2 // indirect - github.com/ugorji/go/codec v1.2.6 // indirect - github.com/vbatts/tar-split v0.12.2 // indirect - github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect - github.com/wlynxg/anet v0.0.5 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xeipuuv/gojsonschema v1.2.0 // indirect - go.mongodb.org/mongo-driver v1.17.6 // indirect - go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/otel v1.38.0 // indirect - go.opentelemetry.io/otel/metric v1.38.0 // indirect - go.opentelemetry.io/otel/trace v1.38.0 // indirect - go.uber.org/dig v1.19.0 // indirect - go.uber.org/fx v1.24.0 // indirect - go.uber.org/mock v0.5.2 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.1 // indirect - golang.org/x/crypto v0.45.0 - golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect - golang.org/x/mod v0.30.0 // indirect - golang.org/x/net v0.47.0 // indirect - golang.org/x/oauth2 v0.33.0 // indirect - golang.org/x/sync v0.18.0 // indirect - golang.org/x/sys v0.38.0 // indirect - golang.org/x/term v0.37.0 // indirect - golang.org/x/text v0.31.0 // indirect - golang.org/x/time v0.14.0 - golang.org/x/tools v0.39.0 // indirect - gonum.org/v1/gonum v0.16.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/blake3 v1.4.1 // indirect - modernc.org/libc v1.66.10 // indirect - modernc.org/mathutil v1.7.1 // indirect - modernc.org/memory v1.11.0 // indirect - modernc.org/sqlite v1.40.0 // indirect -) +module github.com/agntcy/dir/server + +go 1.25.2 + +replace ( + github.com/agntcy/dir/api => ../api + github.com/agntcy/dir/utils => ../utils +) + +require ( + buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 + github.com/agntcy/dir/api v0.6.0 + github.com/agntcy/dir/utils v0.6.0 + github.com/agntcy/oasf-sdk/pkg v0.0.14 + github.com/casbin/casbin/v2 v2.120.0 + github.com/glebarez/sqlite v1.11.0 + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 + github.com/ipfs/go-datastore v0.8.2 + github.com/libp2p/go-libp2p v0.44.0 + github.com/libp2p/go-libp2p-gorpc v0.6.0 + github.com/libp2p/go-libp2p-kad-dht v0.30.2 + github.com/libp2p/go-libp2p-pubsub v0.15.0 + github.com/libp2p/go-libp2p-record v0.3.1 + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c + github.com/opencontainers/image-spec v1.1.1 + github.com/spf13/cobra v1.10.2 + github.com/spf13/viper v1.21.0 + github.com/spiffe/go-spiffe/v2 v2.6.0 + github.com/stretchr/testify v1.11.1 + google.golang.org/grpc v1.77.0 + google.golang.org/protobuf v1.36.10 + gorm.io/gorm v1.30.0 + oras.land/oras-go/v2 v2.6.0 + zotregistry.dev/zot/v2 v2.1.11 +) + +require ( + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect + github.com/go-openapi/swag/cmdutils v0.25.4 // indirect + github.com/go-openapi/swag/conv v0.25.4 // indirect + github.com/go-openapi/swag/fileutils v0.25.4 // indirect + github.com/go-openapi/swag/jsonname v0.25.4 // indirect + github.com/go-openapi/swag/jsonutils v0.25.4 // indirect + github.com/go-openapi/swag/loading v0.25.4 // indirect + github.com/go-openapi/swag/mangling v0.25.4 // indirect + github.com/go-openapi/swag/netutils v0.25.4 // indirect + github.com/go-openapi/swag/stringutils v0.25.4 // indirect + github.com/go-openapi/swag/typeutils v0.25.4 // indirect + github.com/go-openapi/swag/yamlutils v0.25.4 // indirect + github.com/moby/term v0.5.2 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/sigstore/cosign/v3 v3.0.3 // indirect + github.com/sigstore/rekor-tiles/v2 v2.0.1 // indirect + github.com/sigstore/timestamp-authority/v2 v2.0.3 // indirect + github.com/tiendc/go-deepcopy v1.7.1 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect +) + +require ( + buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 // indirect + github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/OneOfOne/xxhash v1.2.8 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/benbjohnson/clock v1.3.5 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver v3.5.1+incompatible // indirect + github.com/bmatcuk/doublestar/v4 v4.9.1 // indirect + github.com/casbin/govaluate v1.3.0 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect + github.com/coreos/go-oidc/v3 v3.17.0 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect + github.com/dgraph-io/badger v1.6.2 // indirect + github.com/dgraph-io/ristretto v0.0.2 // indirect + github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect + github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect + github.com/distribution/distribution/v3 v3.0.0 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/cli v29.0.3+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.4 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/flynn/noise v1.1.0 // indirect + github.com/francoispqt/gojay v1.2.13 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/glebarez/go-sqlite v1.21.2 // indirect + github.com/go-jose/go-jose/v4 v4.1.3 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/analysis v0.24.1 // indirect + github.com/go-openapi/errors v0.22.4 // indirect + github.com/go-openapi/jsonpointer v0.22.1 // indirect + github.com/go-openapi/jsonreference v0.21.3 // indirect + github.com/go-openapi/loads v0.23.2 // indirect + github.com/go-openapi/runtime v0.29.2 // indirect + github.com/go-openapi/spec v0.22.1 // indirect + github.com/go-openapi/strfmt v0.25.0 // indirect + github.com/go-openapi/swag v0.25.4 // indirect + github.com/go-openapi/validate v0.25.1 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/certificate-transparency-go v1.3.2 // indirect + github.com/google/go-containerregistry v0.20.7 // indirect + github.com/google/gopacket v1.1.19 // indirect + github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a // indirect + github.com/google/uuid v1.6.0 + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-retryablehttp v0.7.8 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/huin/goupnp v1.3.0 // indirect + github.com/in-toto/attestation v1.1.2 // indirect + github.com/in-toto/in-toto-golang v0.9.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/ipfs/boxo v0.29.1 // indirect + github.com/ipfs/go-cid v0.5.0 + github.com/ipfs/go-ds-badger v0.3.4 + github.com/ipfs/go-log v1.0.5 // indirect + github.com/ipfs/go-log/v2 v2.5.1 // indirect + github.com/ipld/go-ipld-prime v0.21.0 // indirect + github.com/jackpal/go-nat-pmp v1.0.2 // indirect + github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect + github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect + github.com/jinzhu/inflection v1.0.0 // indirect + github.com/jinzhu/now v1.1.5 // indirect + github.com/klauspost/compress v1.18.1 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect + github.com/koron/go-ssdp v0.0.6 // indirect + github.com/letsencrypt/boulder v0.20251110.0 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/libp2p/go-cidranger v1.1.0 // indirect + github.com/libp2p/go-flow-metrics v0.2.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect + github.com/libp2p/go-libp2p-kbucket v0.6.5 // indirect + github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect + github.com/libp2p/go-msgio v0.3.0 // indirect + github.com/libp2p/go-netroute v0.3.0 // indirect + github.com/libp2p/go-reuseport v0.4.0 // indirect + github.com/libp2p/go-yamux/v5 v5.0.1 // indirect + github.com/libp2p/zeroconf/v2 v2.2.0 // indirect + github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/miekg/dns v1.1.66 // indirect + github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect + github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect + github.com/multiformats/go-base32 v0.1.0 // indirect + github.com/multiformats/go-base36 v0.2.0 // indirect + github.com/multiformats/go-multiaddr v0.16.0 + github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect + github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.9.1 // indirect + github.com/multiformats/go-multihash v0.2.3 + github.com/multiformats/go-multistream v0.6.1 // indirect + github.com/multiformats/go-varint v0.0.7 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/ncruces/go-strftime v1.0.0 // indirect + github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/opencontainers/distribution-spec/specs-go v0.0.0-20250123160558-a139cc423184 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/pion/datachannel v1.5.10 // indirect + github.com/pion/dtls/v2 v2.2.12 // indirect + github.com/pion/dtls/v3 v3.0.6 // indirect + github.com/pion/ice/v4 v4.0.10 // indirect + github.com/pion/interceptor v0.1.40 // indirect + github.com/pion/logging v0.2.3 // indirect + github.com/pion/mdns/v2 v2.0.7 // indirect + github.com/pion/randutil v0.1.0 // indirect + github.com/pion/rtcp v1.2.15 // indirect + github.com/pion/rtp v1.8.19 // indirect + github.com/pion/sctp v1.8.39 // indirect + github.com/pion/sdp/v3 v3.0.13 // indirect + github.com/pion/srtp/v3 v3.0.6 // indirect + github.com/pion/stun v0.6.1 // indirect + github.com/pion/stun/v3 v3.0.0 // indirect + github.com/pion/transport/v2 v2.2.10 // indirect + github.com/pion/transport/v3 v3.0.7 // indirect + github.com/pion/turn/v4 v4.0.2 // indirect + github.com/pion/webrtc/v4 v4.1.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/polydawn/refmt v0.89.0 // indirect + github.com/prometheus/client_golang v1.23.2 + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.67.4 // indirect + github.com/prometheus/procfs v0.19.2 // indirect + github.com/quic-go/qpack v0.5.1 // indirect + github.com/quic-go/quic-go v0.55.0 // indirect + github.com/quic-go/webtransport-go v0.9.0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect + github.com/sassoftware/relic v7.2.1+incompatible // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect + github.com/shibumi/go-pathspec v1.3.0 // indirect + github.com/sigstore/protobuf-specs v0.5.0 // indirect + github.com/sigstore/rekor v1.4.3 // indirect + github.com/sigstore/sigstore v1.10.0 // indirect + github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 // indirect + github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/theupdateframework/go-tuf v0.7.0 // indirect + github.com/theupdateframework/go-tuf/v2 v2.3.0 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect + github.com/transparency-dev/merkle v0.0.2 // indirect + github.com/ugorji/go/codec v1.2.6 // indirect + github.com/vbatts/tar-split v0.12.2 // indirect + github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect + github.com/wlynxg/anet v0.0.5 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + go.mongodb.org/mongo-driver v1.17.6 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.uber.org/dig v1.19.0 // indirect + go.uber.org/fx v1.24.0 // indirect + go.uber.org/mock v0.5.2 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.1 // indirect + golang.org/x/crypto v0.45.0 + golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect + golang.org/x/mod v0.30.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.33.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.14.0 + golang.org/x/tools v0.39.0 // indirect + gonum.org/v1/gonum v0.16.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + lukechampine.com/blake3 v1.4.1 // indirect + modernc.org/libc v1.66.10 // indirect + modernc.org/mathutil v1.7.1 // indirect + modernc.org/memory v1.11.0 // indirect + modernc.org/sqlite v1.40.0 // indirect +) diff --git a/server/go.sum b/server/go.sum index 117b41ebf..90dfe2ee1 100644 --- a/server/go.sum +++ b/server/go.sum @@ -1,1179 +1,1179 @@ -buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 h1:THc6uLCGTpU393vVD5Eu5JHUdikvaP1+dqAclQe8pOE= -buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1/go.mod h1:xkbAJMbZuuebIblSFnLrfTpvmfjarhKsIid+Q9snDQ0= -buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 h1:ZObM/Cdu5dZO4ibBXNRSy+rFwG4oV86mYfKbI0Z7AAI= -buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1/go.mod h1:yJHswa2p3J+WxGLpgzuWNWn3I1CIkxdOu80Y/vN5lbE= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= -cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= -cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= -cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= -cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= -cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= -cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= -cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= -cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= -cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= -cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= -cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= -cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= -cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= -cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= -dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= -dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= -dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= -dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= -github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= -github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= -github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0 h1:E4MgwLBGeVB5f2MdcIVD3ELVAWpr+WD6MUe1i+tM/PA= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0/go.mod h1:Y2b/1clN4zsAoUd/pgNAQHjLDnTis/6ROkUfyob6psM= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA= -github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= -github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= -github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= -github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= -github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/agntcy/oasf-sdk/pkg v0.0.14 h1:DNKQNf4R4SMDbnaawoSl6FVOBvkSy4O9MyqKd7iHE8I= -github.com/agntcy/oasf-sdk/pkg v0.0.14/go.mod h1:FvcEB49gsvK+JO5i6l/pt5QgTK0LZeR7KYKsdcI6ZIM= -github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= -github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= -github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= -github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc= -github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE= -github.com/aws/aws-sdk-go-v2/config v1.32.2 h1:4liUsdEpUUPZs5WVapsJLx5NPmQhQdez7nYFcovrytk= -github.com/aws/aws-sdk-go-v2/config v1.32.2/go.mod h1:l0hs06IFz1eCT+jTacU/qZtC33nvcnLADAPL/XyrkZI= -github.com/aws/aws-sdk-go-v2/credentials v1.19.2 h1:qZry8VUyTK4VIo5aEdUcBjPZHL2v4FyQ3QEOaWcFLu4= -github.com/aws/aws-sdk-go-v2/credentials v1.19.2/go.mod h1:YUqm5a1/kBnoK+/NY5WEiMocZihKSo15/tJdmdXnM5g= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 h1:WZVR5DbDgxzA0BJeudId89Kmgy6DIU4ORpxwsVHz0qA= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14/go.mod h1:Dadl9QO0kHgbrH1GRqGiZdYtW5w+IXXaBNCHTIaheM4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 h1:FIouAnCE46kyYqyhs0XEBDFFSREtdnr8HQuLPQPLCrY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14/go.mod h1:UTwDc5COa5+guonQU8qBikJo1ZJ4ln2r1MkF7Dqag1E= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.1 h1:U0asSZ3ifpuIehDPkRI2rxHbmFUMplDA2VeR9Uogrmw= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.1/go.mod h1:NZo9WJqQ0sxQ1Yqu1IwCHQFQunTms2MlVgejg16S1rY= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 h1:MxMBdKTYBjPQChlJhi4qlEueqB1p1KcbTEa7tD5aqPs= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.2/go.mod h1:iS6EPmNeqCsGo+xQmXv0jIMjyYtQfnwg36zl2FwEouk= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 h1:ksUT5KtgpZd3SAiFJNJ0AFEJVva3gjBmN7eXUZjzUwQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.5/go.mod h1:av+ArJpoYf3pgyrj6tcehSFW+y9/QvAY8kMooR9bZCw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 h1:GtsxyiF3Nd3JahRBJbxLCCdYW9ltGQYrFWg8XdkGDd8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10/go.mod h1:/j67Z5XBVDx8nZVp9EuFM9/BS5dvBznbqILGuu73hug= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 h1:a5UTtD4mHBU3t0o6aHQZFJTNKVfxFWfPX7J0Lr7G+uY= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.2/go.mod h1:6TxbXoDSgBQ225Qd8Q+MbxUxUh6TtNKwbRt/EPS9xso= -github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= -github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= -github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= -github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/avrEXE= -github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= -github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/casbin/casbin/v2 v2.120.0 h1:Mo9R/EKZk9aoagFs0OmuCmBYjWJfvbWJiX4aenIJOKY= -github.com/casbin/casbin/v2 v2.120.0/go.mod h1:Ee33aqGrmES+GNL17L0h9X28wXuo829wnNUnS0edAco= -github.com/casbin/govaluate v1.3.0 h1:VA0eSY0M2lA86dYd5kPPuNZMUD9QkWnOCnavGrw9myc= -github.com/casbin/govaluate v1.3.0/go.mod h1:G/UnbIjZk/0uMNaLwZZmFQrR72tYRZWQkO70si/iR7A= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= -github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= -github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= -github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= -github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= -github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= -github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/danieljoos/wincred v1.2.3 h1:v7dZC2x32Ut3nEfRH+vhoZGvN72+dQ/snVXo/vMFLdQ= -github.com/danieljoos/wincred v1.2.3/go.mod h1:6qqX0WNrS4RzPZ1tnroDzq9kY3fu1KwE7MRLQK4X0bs= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= -github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= -github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= -github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= -github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= -github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= -github.com/dgraph-io/ristretto v0.0.2 h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQn3po= -github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= -github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= -github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= -github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= -github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= -github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= -github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= -github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= -github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= -github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= -github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= -github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= -github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= -github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= -github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= -github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= -github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/glebarez/go-sqlite v1.21.2 h1:3a6LFC4sKahUunAmynQKLZceZCOzUthkRkEAl9gAXWo= -github.com/glebarez/go-sqlite v1.21.2/go.mod h1:sfxdZyhQjTM2Wry3gVYWaW072Ri1WMdWJi0k6+3382k= -github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw= -github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= -github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= -github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rBOesYM= -github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84= -github.com/go-openapi/errors v0.22.4 h1:oi2K9mHTOb5DPW2Zjdzs/NIvwi2N3fARKaTJLdNabaM= -github.com/go-openapi/errors v0.22.4/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= -github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= -github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= -github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc= -github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4= -github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4= -github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY= -github.com/go-openapi/runtime v0.29.2 h1:UmwSGWNmWQqKm1c2MGgXVpC2FTGwPDQeUsBMufc5Yj0= -github.com/go-openapi/runtime v0.29.2/go.mod h1:biq5kJXRJKBJxTDJXAa00DOTa/anflQPhT0/wmjuy+0= -github.com/go-openapi/spec v0.22.1 h1:beZMa5AVQzRspNjvhe5aG1/XyBSMeX1eEOs7dMoXh/k= -github.com/go-openapi/spec v0.22.1/go.mod h1:c7aeIQT175dVowfp7FeCvXXnjN/MrpaONStibD2WtDA= -github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= -github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= -github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= -github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= -github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= -github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= -github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= -github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= -github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= -github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= -github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= -github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= -github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= -github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= -github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= -github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= -github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= -github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= -github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= -github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= -github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= -github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= -github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= -github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= -github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= -github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= -github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= -github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= -github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw= -github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc= -github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= -github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= -github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= -github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= -github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= -github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= -github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.7.0-rc.1 h1:YojYx61/OLFsiv6Rw1Z96LpldJIy31o+UHmwAUMJ6/U= -github.com/golang/mock v1.7.0-rc.1/go.mod h1:s42URUywIqd+OcERslBJvOjepvNymP31m3q8d/GkuRs= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= -github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= -github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= -github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18= -github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= -github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= -github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= -github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= -github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= -github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU= -github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= -github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= -github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= -github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= -github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= -github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= -github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 h1:B+8ClL/kCQkRiU82d9xajRPKYMrB7E0MbtzWVi1K4ns= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3/go.mod h1:NbCUVmiS4foBGBHOYlCT25+YmGpJ32dZPi75pGEUpj4= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= -github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= -github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= -github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= -github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= -github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= -github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= -github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/vault/api v1.22.0 h1:+HYFquE35/B74fHoIeXlZIP2YADVboaPjaSicHEZiH0= -github.com/hashicorp/vault/api v1.22.0/go.mod h1:IUZA2cDvr4Ok3+NtK2Oq/r+lJeXkeCrHRmqdyWfpmGM= -github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= -github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= -github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E= -github.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM= -github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= -github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/ipfs/boxo v0.29.1 h1:z61ZT4YDfTHLjXTsu/+3wvJ8aJlExthDSOCpx6Nh8xc= -github.com/ipfs/boxo v0.29.1/go.mod h1:MkDJStXiJS9U99cbAijHdcmwNfVn5DKYBmQCOgjY2NU= -github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= -github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= -github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= -github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= -github.com/ipfs/go-datastore v0.8.2 h1:Jy3wjqQR6sg/LhyY0NIePZC3Vux19nLtg7dx0TVqr6U= -github.com/ipfs/go-datastore v0.8.2/go.mod h1:W+pI1NsUsz3tcsAACMtfC+IZdnQTnC/7VfPoJBQuts0= -github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= -github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= -github.com/ipfs/go-ds-badger v0.3.4 h1:MmqFicftE0KrwMC77WjXTrPuoUxhwyFsjKONSeWrlOo= -github.com/ipfs/go-ds-badger v0.3.4/go.mod h1:HfqsKJcNnIr9ZhZ+rkwS1J5PpaWjJjg6Ipmxd7KPfZ8= -github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= -github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= -github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= -github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= -github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= -github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= -github.com/ipfs/go-test v0.2.1 h1:/D/a8xZ2JzkYqcVcV/7HYlCnc7bv/pKHQiX5TdClkPE= -github.com/ipfs/go-test v0.2.1/go.mod h1:dzu+KB9cmWjuJnXFDYJwC25T3j1GcN57byN+ixmK39M= -github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= -github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= -github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= -github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= -github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= -github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= -github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= -github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= -github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= -github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= -github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= -github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= -github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= -github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= -github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= -github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= -github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= -github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= -github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= -github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= -github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU= -github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/letsencrypt/boulder v0.20251110.0 h1:J8MnKICeilO91dyQ2n5eBbab24neHzUpYMUIOdOtbjc= -github.com/letsencrypt/boulder v0.20251110.0/go.mod h1:ogKCJQwll82m7OVHWyTuf8eeFCjuzdRQlgnZcCl0V+8= -github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= -github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= -github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= -github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= -github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw= -github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc= -github.com/libp2p/go-libp2p v0.44.0 h1:5Gtt8OrF8yiXmH+Mx4+/iBeFRMK1TY3a8OrEBDEqAvs= -github.com/libp2p/go-libp2p v0.44.0/go.mod h1:NovCojezAt4dnDd4fH048K7PKEqH0UFYYqJRjIIu8zc= -github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= -github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= -github.com/libp2p/go-libp2p-gorpc v0.6.0 h1:Z3ODCzbKe+2lUtEjRc+W+l8Olj63r68G5w1wrQ9ZsOw= -github.com/libp2p/go-libp2p-gorpc v0.6.0/go.mod h1:jGTsI/yn1xL/9VupJ+DIXo8ExobWDKjwVdjNAfhFKxk= -github.com/libp2p/go-libp2p-kad-dht v0.30.2 h1:K0LJPdXynQ+u3rx6uFlrfNy0i11LE6SOCDzwAAaahys= -github.com/libp2p/go-libp2p-kad-dht v0.30.2/go.mod h1:UV0mxF4ufh/ht05jNg5mcjOMrjK82uecgANa+GKi4y0= -github.com/libp2p/go-libp2p-kbucket v0.6.5 h1:Fsl1YvZcMwqrR4DYrTO02yo9PGYs2HBQIT3lGXFMTxg= -github.com/libp2p/go-libp2p-kbucket v0.6.5/go.mod h1:U6WOd0BvnSp03IQSrjgM54tg7zh1UUNsXLJqAQzClTA= -github.com/libp2p/go-libp2p-pubsub v0.15.0 h1:cG7Cng2BT82WttmPFMi50gDNV+58K626m/wR00vGL1o= -github.com/libp2p/go-libp2p-pubsub v0.15.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= -github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= -github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= -github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI= -github.com/libp2p/go-libp2p-routing-helpers v0.7.5/go.mod h1:3YaxrwP0OBPDD7my3D0KxfR89FlcX/IEbxDEDfAmj98= -github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= -github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= -github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= -github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= -github.com/libp2p/go-netroute v0.3.0 h1:nqPCXHmeNmgTJnktosJ/sIef9hvwYCrsLxXmfNks/oc= -github.com/libp2p/go-netroute v0.3.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA= -github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= -github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= -github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg= -github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU= -github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q= -github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= -github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/marcopolo/simnet v0.0.1 h1:rSMslhPz6q9IvJeFWDoMGxMIrlsbXau3NkuIXHGJxfg= -github.com/marcopolo/simnet v0.0.1/go.mod h1:WDaQkgLAjqDUEBAOXz22+1j6wXKfGlC5sD5XWt3ddOs= -github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= -github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= -github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE= -github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE= -github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= -github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= -github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= -github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= -github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= -github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= -github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= -github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= -github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= -github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= -github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= -github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= -github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= -github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= -github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc= -github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= -github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= -github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= -github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= -github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= -github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= -github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo= -github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo= -github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= -github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ= -github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw= -github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= -github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= -github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= -github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= -github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= -github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= -github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= -github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= -github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= -github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= -github.com/opencontainers/distribution-spec/specs-go v0.0.0-20250123160558-a139cc423184 h1:4fMydcL7sQjWQPMmzTLpRtsKl5KQdZVNcvPoYwpr4G4= -github.com/opencontainers/distribution-spec/specs-go v0.0.0-20250123160558-a139cc423184/go.mod h1:Va0IMqkjv62YSEytL4sgxrkiD9IzU0T0bX/ZZEtMnSQ= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= -github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= -github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= -github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= -github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= -github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M= -github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= -github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= -github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E= -github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU= -github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4= -github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw= -github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4= -github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic= -github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= -github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI= -github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90= -github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM= -github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA= -github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= -github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= -github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo= -github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0= -github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c= -github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk= -github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE= -github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE= -github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4= -github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E= -github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4= -github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY= -github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= -github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= -github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw= -github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU= -github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= -github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= -github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= -github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= -github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= -github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= -github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps= -github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs= -github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54= -github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= -github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= -github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= -github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= -github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= -github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= -github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= -github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= -github.com/quic-go/quic-go v0.55.0 h1:zccPQIqYCXDt5NmcEabyYvOnomjs8Tlwl7tISjJh9Mk= -github.com/quic-go/quic-go v0.55.0/go.mod h1:DR51ilwU1uE164KuWXhinFcKWGlEjzys2l8zUl5Ss1U= -github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70= -github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= -github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= -github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= -github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= -github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= -github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= -github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= -github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= -github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= -github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= -github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= -github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= -github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= -github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= -github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= -github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= -github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= -github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= -github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= -github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= -github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= -github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= -github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= -github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= -github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= -github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= -github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= -github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= -github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= -github.com/sigstore/cosign/v3 v3.0.3 h1:IknuTUYM+tZ/ToghM7mvg9V0O31NG3rev97u1IJIuYA= -github.com/sigstore/cosign/v3 v3.0.3/go.mod h1:poeQqwvpDNIDyim7a2ljUhonVKpCys+fx3SY0Lkmi/4= -github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= -github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= -github.com/sigstore/rekor v1.4.3 h1:2+aw4Gbgumv8vYM/QVg6b+hvr4x4Cukur8stJrVPKU0= -github.com/sigstore/rekor v1.4.3/go.mod h1:o0zgY087Q21YwohVvGwV9vK1/tliat5mfnPiVI3i75o= -github.com/sigstore/rekor-tiles/v2 v2.0.1 h1:1Wfz15oSRNGF5Dzb0lWn5W8+lfO50ork4PGIfEKjZeo= -github.com/sigstore/rekor-tiles/v2 v2.0.1/go.mod h1:Pjsbhzj5hc3MKY8FfVTYHBUHQEnP0ozC4huatu4x7OU= -github.com/sigstore/sigstore v1.10.0 h1:lQrmdzqlR8p9SCfWIpFoGUqdXEzJSZT2X+lTXOMPaQI= -github.com/sigstore/sigstore v1.10.0/go.mod h1:Ygq+L/y9Bm3YnjpJTlQrOk/gXyrjkpn3/AEJpmk1n9Y= -github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 h1:K8hnZhun6XacjxAdCdxkowSi7+FpmfYnAcMhTXZQyPg= -github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894/go.mod h1:uuR+Edo6P+iwi0HKscycUm8mxXL748nAureqSg6jFLA= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0 h1:UOHpiyezCj5RuixgIvCV3QyuxIGQT+N6nGZEXA7OTTY= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0/go.mod h1:U0CZmA2psabDa8DdiV7yXab0AHODzfKqvD2isH7Hrvw= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0 h1:fq4+8Y4YadxeF8mzhoMRPZ1mVvDYXmI3BfS0vlkPT7M= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0/go.mod h1:u05nqPWY05lmcdHhv2lPaWTH3FGUhJzO7iW2hbboK3Q= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0 h1:iUEf5MZYOuXGnXxdF/WrarJrk0DTVHqeIOjYdtpVXtc= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0/go.mod h1:i6vg5JfEQix46R1rhQlrKmUtJoeH91drltyYOJEk1T4= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0 h1:dUvPv/MP23ZPIXZUW45kvCIgC0ZRfYxEof57AB6bAtU= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0/go.mod h1:fR/gDdPvJWGWL70/NgBBIL1O0/3Wma6JHs3tSSYg3s4= -github.com/sigstore/timestamp-authority/v2 v2.0.3 h1:sRyYNtdED/ttLCMdaYnwpf0zre1A9chvjTnCmWWxN8Y= -github.com/sigstore/timestamp-authority/v2 v2.0.3/go.mod h1:mDaHxkt3HmZYoIlwYj4QWo0RUr7VjYU52aVO5f5Qb3I= -github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= -github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smarty/assertions v1.16.0 h1:EvHNkdRA4QHMrn75NZSoUQ/mAUXAYWfatfB01yTCzfY= -github.com/smarty/assertions v1.16.0/go.mod h1:duaaFdCS0K9dnoM50iyek/eYINOZ64gbh1Xlf6LG7AI= -github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= -github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= -github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= -github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= -github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= -github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= -github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= -github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= -github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= -github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= -github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= -github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= -github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= -github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= -github.com/theupdateframework/go-tuf/v2 v2.3.0 h1:gt3X8xT8qu/HT4w+n1jgv+p7koi5ad8XEkLXXZqG9AA= -github.com/theupdateframework/go-tuf/v2 v2.3.0/go.mod h1:xW8yNvgXRncmovMLvBxKwrKpsOwJZu/8x+aB0KtFcdw= -github.com/tiendc/go-deepcopy v1.7.1 h1:LnubftI6nYaaMOcaz0LphzwraqN8jiWTwm416sitff4= -github.com/tiendc/go-deepcopy v1.7.1/go.mod h1:4bKjNC2r7boYOkD2IOuZpYjmlDdzjbpTRyCx+goBCJQ= -github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= -github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= -github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= -github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= -github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= -github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= -github.com/tink-crypto/tink-go/v2 v2.5.0 h1:B8KLF6AofxdBIE4UJIaFbmoj5/1ehEtt7/MmzfI4Zpw= -github.com/tink-crypto/tink-go/v2 v2.5.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8= -github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= -github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= -github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c h1:5a2XDQ2LiAUV+/RjckMyq9sXudfrPSuCY4FuPC1NyAw= -github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c/go.mod h1:g85IafeFJZLxlzZCDRu4JLpfS7HKzR+Hw9qRh3bVzDI= -github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= -github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= -github.com/ugorji/go v1.2.6/go.mod h1:anCg0y61KIhDlPZmnH+so+RQbysYVyDko0IMgJv0Nn0= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.2.6 h1:7kbGefxLoDBuYXOms4yD7223OpNMMPNPZxXk5TvFcyQ= -github.com/ugorji/go/codec v1.2.6/go.mod h1:V6TCNZ4PHqoHGFZuSG1W8nrCzzdgA2DozYxWFFpvxTw= -github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= -github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= -github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= -github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= -github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= -github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= -github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= -github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= -github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= -github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= -github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= -github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= -github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= -github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= -github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= -github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= -github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= -github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= -github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= -github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms= -github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= -go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= -go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= -go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= -go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= -go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= -go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= -go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= -go.step.sm/crypto v0.74.0 h1:/APBEv45yYR4qQFg47HA8w1nesIGcxh44pGyQNw6JRA= -go.step.sm/crypto v0.74.0/go.mod h1:UoXqCAJjjRgzPte0Llaqen7O9P7XjPmgjgTHQGkKCDk= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4= -go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg= -go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= -go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= -go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= -go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= -go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= -go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= -go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= -golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= -golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= -golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= -golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= -golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= -golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= -golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo= -golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= -golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= -golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= -gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= -google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI= -google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= -google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 h1:LvZVVaPE0JSqL+ZWb6ErZfnEOKIqqFWUJE2D0fObSmc= -google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9/go.mod h1:QFOrLhdAe2PsTp3vQY4quuLKTi9j3XG3r6JPPaw7MSc= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= -google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= -google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 h1:ExN12ndbJ608cboPYflpTny6mXSzPrDLh0iTaVrRrds= -google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6/go.mod h1:6ytKWczdvnpnO+m+JiG9NjEDzR1FJfsnmJdG7B8QVZ8= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= -google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/gorm v1.30.0 h1:qbT5aPv1UH8gI99OsRlvDToLxW5zR7FzS9acZDOZcgs= -gorm.io/gorm v1.30.0/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE= -gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= -gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= -grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= -k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= -lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= -modernc.org/cc/v4 v4.26.5 h1:xM3bX7Mve6G8K8b+T11ReenJOT+BmVqQj0FY5T4+5Y4= -modernc.org/cc/v4 v4.26.5/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= -modernc.org/ccgo/v4 v4.28.1 h1:wPKYn5EC/mYTqBO373jKjvX2n+3+aK7+sICCv4Fjy1A= -modernc.org/ccgo/v4 v4.28.1/go.mod h1:uD+4RnfrVgE6ec9NGguUNdhqzNIeeomeXf6CL0GTE5Q= -modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA= -modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc= -modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI= -modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= -modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks= -modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI= -modernc.org/libc v1.66.10 h1:yZkb3YeLx4oynyR+iUsXsybsX4Ubx7MQlSYEw4yj59A= -modernc.org/libc v1.66.10/go.mod h1:8vGSEwvoUoltr4dlywvHqjtAqHBaw0j1jI7iFBTAr2I= -modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= -modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= -modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= -modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= -modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= -modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= -modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= -modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= -modernc.org/sqlite v1.40.0 h1:bNWEDlYhNPAUdUdBzjAvn8icAs/2gaKlj4vM+tQ6KdQ= -modernc.org/sqlite v1.40.0/go.mod h1:9fjQZ0mB1LLP0GYrp39oOJXx/I2sxEnZtzCmEQIKvGE= -modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= -modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= -modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= -modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= -oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= -sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= -sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= -software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= -software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= -sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= -zotregistry.dev/zot/v2 v2.1.11 h1:hxeE4ilLcmCeF035hs1lRtoyFXm6rJ0rearKgTXPbq8= -zotregistry.dev/zot/v2 v2.1.11/go.mod h1:EYqgYSnmOBPQ9OwD5ntuYoLY/qbuzVfpDllomKCa3NI= +buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1 h1:THc6uLCGTpU393vVD5Eu5JHUdikvaP1+dqAclQe8pOE= +buf.build/gen/go/agntcy/oasf-sdk/protocolbuffers/go v1.36.10-20251029125108-823ea6fabc82.1/go.mod h1:xkbAJMbZuuebIblSFnLrfTpvmfjarhKsIid+Q9snDQ0= +buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1 h1:ZObM/Cdu5dZO4ibBXNRSy+rFwG4oV86mYfKbI0Z7AAI= +buf.build/gen/go/agntcy/oasf/protocolbuffers/go v1.36.10-20251022143645-07a420b66e81.1/go.mod h1:yJHswa2p3J+WxGLpgzuWNWn3I1CIkxdOu80Y/vN5lbE= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= +cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= +cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= +cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= +cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= +cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= +cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0 h1:E4MgwLBGeVB5f2MdcIVD3ELVAWpr+WD6MUe1i+tM/PA= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0/go.mod h1:Y2b/1clN4zsAoUd/pgNAQHjLDnTis/6ROkUfyob6psM= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= +github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/agntcy/oasf-sdk/pkg v0.0.14 h1:DNKQNf4R4SMDbnaawoSl6FVOBvkSy4O9MyqKd7iHE8I= +github.com/agntcy/oasf-sdk/pkg v0.0.14/go.mod h1:FvcEB49gsvK+JO5i6l/pt5QgTK0LZeR7KYKsdcI6ZIM= +github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= +github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= +github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= +github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc= +github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE= +github.com/aws/aws-sdk-go-v2/config v1.32.2 h1:4liUsdEpUUPZs5WVapsJLx5NPmQhQdez7nYFcovrytk= +github.com/aws/aws-sdk-go-v2/config v1.32.2/go.mod h1:l0hs06IFz1eCT+jTacU/qZtC33nvcnLADAPL/XyrkZI= +github.com/aws/aws-sdk-go-v2/credentials v1.19.2 h1:qZry8VUyTK4VIo5aEdUcBjPZHL2v4FyQ3QEOaWcFLu4= +github.com/aws/aws-sdk-go-v2/credentials v1.19.2/go.mod h1:YUqm5a1/kBnoK+/NY5WEiMocZihKSo15/tJdmdXnM5g= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 h1:WZVR5DbDgxzA0BJeudId89Kmgy6DIU4ORpxwsVHz0qA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14/go.mod h1:Dadl9QO0kHgbrH1GRqGiZdYtW5w+IXXaBNCHTIaheM4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 h1:FIouAnCE46kyYqyhs0XEBDFFSREtdnr8HQuLPQPLCrY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14/go.mod h1:UTwDc5COa5+guonQU8qBikJo1ZJ4ln2r1MkF7Dqag1E= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.1 h1:U0asSZ3ifpuIehDPkRI2rxHbmFUMplDA2VeR9Uogrmw= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.1/go.mod h1:NZo9WJqQ0sxQ1Yqu1IwCHQFQunTms2MlVgejg16S1rY= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 h1:MxMBdKTYBjPQChlJhi4qlEueqB1p1KcbTEa7tD5aqPs= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.2/go.mod h1:iS6EPmNeqCsGo+xQmXv0jIMjyYtQfnwg36zl2FwEouk= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 h1:ksUT5KtgpZd3SAiFJNJ0AFEJVva3gjBmN7eXUZjzUwQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.5/go.mod h1:av+ArJpoYf3pgyrj6tcehSFW+y9/QvAY8kMooR9bZCw= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 h1:GtsxyiF3Nd3JahRBJbxLCCdYW9ltGQYrFWg8XdkGDd8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10/go.mod h1:/j67Z5XBVDx8nZVp9EuFM9/BS5dvBznbqILGuu73hug= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 h1:a5UTtD4mHBU3t0o6aHQZFJTNKVfxFWfPX7J0Lr7G+uY= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.2/go.mod h1:6TxbXoDSgBQ225Qd8Q+MbxUxUh6TtNKwbRt/EPS9xso= +github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= +github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/avrEXE= +github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/casbin/casbin/v2 v2.120.0 h1:Mo9R/EKZk9aoagFs0OmuCmBYjWJfvbWJiX4aenIJOKY= +github.com/casbin/casbin/v2 v2.120.0/go.mod h1:Ee33aqGrmES+GNL17L0h9X28wXuo829wnNUnS0edAco= +github.com/casbin/govaluate v1.3.0 h1:VA0eSY0M2lA86dYd5kPPuNZMUD9QkWnOCnavGrw9myc= +github.com/casbin/govaluate v1.3.0/go.mod h1:G/UnbIjZk/0uMNaLwZZmFQrR72tYRZWQkO70si/iR7A= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= +github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= +github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= +github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/danieljoos/wincred v1.2.3 h1:v7dZC2x32Ut3nEfRH+vhoZGvN72+dQ/snVXo/vMFLdQ= +github.com/danieljoos/wincred v1.2.3/go.mod h1:6qqX0WNrS4RzPZ1tnroDzq9kY3fu1KwE7MRLQK4X0bs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= +github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= +github.com/dgraph-io/ristretto v0.0.2 h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQn3po= +github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= +github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= +github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= +github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= +github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= +github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/glebarez/go-sqlite v1.21.2 h1:3a6LFC4sKahUunAmynQKLZceZCOzUthkRkEAl9gAXWo= +github.com/glebarez/go-sqlite v1.21.2/go.mod h1:sfxdZyhQjTM2Wry3gVYWaW072Ri1WMdWJi0k6+3382k= +github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw= +github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rBOesYM= +github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84= +github.com/go-openapi/errors v0.22.4 h1:oi2K9mHTOb5DPW2Zjdzs/NIvwi2N3fARKaTJLdNabaM= +github.com/go-openapi/errors v0.22.4/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= +github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= +github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= +github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc= +github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4= +github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4= +github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY= +github.com/go-openapi/runtime v0.29.2 h1:UmwSGWNmWQqKm1c2MGgXVpC2FTGwPDQeUsBMufc5Yj0= +github.com/go-openapi/runtime v0.29.2/go.mod h1:biq5kJXRJKBJxTDJXAa00DOTa/anflQPhT0/wmjuy+0= +github.com/go-openapi/spec v0.22.1 h1:beZMa5AVQzRspNjvhe5aG1/XyBSMeX1eEOs7dMoXh/k= +github.com/go-openapi/spec v0.22.1/go.mod h1:c7aeIQT175dVowfp7FeCvXXnjN/MrpaONStibD2WtDA= +github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= +github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= +github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= +github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= +github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= +github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= +github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= +github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= +github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= +github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= +github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= +github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= +github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= +github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= +github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= +github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= +github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= +github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= +github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= +github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= +github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= +github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw= +github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc= +github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= +github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= +github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= +github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.7.0-rc.1 h1:YojYx61/OLFsiv6Rw1Z96LpldJIy31o+UHmwAUMJ6/U= +github.com/golang/mock v1.7.0-rc.1/go.mod h1:s42URUywIqd+OcERslBJvOjepvNymP31m3q8d/GkuRs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= +github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= +github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18= +github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= +github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 h1:B+8ClL/kCQkRiU82d9xajRPKYMrB7E0MbtzWVi1K4ns= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3/go.mod h1:NbCUVmiS4foBGBHOYlCT25+YmGpJ32dZPi75pGEUpj4= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= +github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/vault/api v1.22.0 h1:+HYFquE35/B74fHoIeXlZIP2YADVboaPjaSicHEZiH0= +github.com/hashicorp/vault/api v1.22.0/go.mod h1:IUZA2cDvr4Ok3+NtK2Oq/r+lJeXkeCrHRmqdyWfpmGM= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E= +github.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM= +github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= +github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/ipfs/boxo v0.29.1 h1:z61ZT4YDfTHLjXTsu/+3wvJ8aJlExthDSOCpx6Nh8xc= +github.com/ipfs/boxo v0.29.1/go.mod h1:MkDJStXiJS9U99cbAijHdcmwNfVn5DKYBmQCOgjY2NU= +github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= +github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= +github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg= +github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk= +github.com/ipfs/go-datastore v0.8.2 h1:Jy3wjqQR6sg/LhyY0NIePZC3Vux19nLtg7dx0TVqr6U= +github.com/ipfs/go-datastore v0.8.2/go.mod h1:W+pI1NsUsz3tcsAACMtfC+IZdnQTnC/7VfPoJBQuts0= +github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= +github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger v0.3.4 h1:MmqFicftE0KrwMC77WjXTrPuoUxhwyFsjKONSeWrlOo= +github.com/ipfs/go-ds-badger v0.3.4/go.mod h1:HfqsKJcNnIr9ZhZ+rkwS1J5PpaWjJjg6Ipmxd7KPfZ8= +github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= +github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= +github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= +github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= +github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= +github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= +github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/ipfs/go-test v0.2.1 h1:/D/a8xZ2JzkYqcVcV/7HYlCnc7bv/pKHQiX5TdClkPE= +github.com/ipfs/go-test v0.2.1/go.mod h1:dzu+KB9cmWjuJnXFDYJwC25T3j1GcN57byN+ixmK39M= +github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= +github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= +github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= +github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= +github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= +github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= +github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= +github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU= +github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/letsencrypt/boulder v0.20251110.0 h1:J8MnKICeilO91dyQ2n5eBbab24neHzUpYMUIOdOtbjc= +github.com/letsencrypt/boulder v0.20251110.0/go.mod h1:ogKCJQwll82m7OVHWyTuf8eeFCjuzdRQlgnZcCl0V+8= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= +github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= +github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw= +github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc= +github.com/libp2p/go-libp2p v0.44.0 h1:5Gtt8OrF8yiXmH+Mx4+/iBeFRMK1TY3a8OrEBDEqAvs= +github.com/libp2p/go-libp2p v0.44.0/go.mod h1:NovCojezAt4dnDd4fH048K7PKEqH0UFYYqJRjIIu8zc= +github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= +github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= +github.com/libp2p/go-libp2p-gorpc v0.6.0 h1:Z3ODCzbKe+2lUtEjRc+W+l8Olj63r68G5w1wrQ9ZsOw= +github.com/libp2p/go-libp2p-gorpc v0.6.0/go.mod h1:jGTsI/yn1xL/9VupJ+DIXo8ExobWDKjwVdjNAfhFKxk= +github.com/libp2p/go-libp2p-kad-dht v0.30.2 h1:K0LJPdXynQ+u3rx6uFlrfNy0i11LE6SOCDzwAAaahys= +github.com/libp2p/go-libp2p-kad-dht v0.30.2/go.mod h1:UV0mxF4ufh/ht05jNg5mcjOMrjK82uecgANa+GKi4y0= +github.com/libp2p/go-libp2p-kbucket v0.6.5 h1:Fsl1YvZcMwqrR4DYrTO02yo9PGYs2HBQIT3lGXFMTxg= +github.com/libp2p/go-libp2p-kbucket v0.6.5/go.mod h1:U6WOd0BvnSp03IQSrjgM54tg7zh1UUNsXLJqAQzClTA= +github.com/libp2p/go-libp2p-pubsub v0.15.0 h1:cG7Cng2BT82WttmPFMi50gDNV+58K626m/wR00vGL1o= +github.com/libp2p/go-libp2p-pubsub v0.15.0/go.mod h1:lr4oE8bFgQaifRcoc2uWhWWiK6tPdOEKpUuR408GFN4= +github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOUZ8kUl+Fg= +github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E= +github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI= +github.com/libp2p/go-libp2p-routing-helpers v0.7.5/go.mod h1:3YaxrwP0OBPDD7my3D0KxfR89FlcX/IEbxDEDfAmj98= +github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= +github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= +github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= +github.com/libp2p/go-netroute v0.3.0 h1:nqPCXHmeNmgTJnktosJ/sIef9hvwYCrsLxXmfNks/oc= +github.com/libp2p/go-netroute v0.3.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA= +github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= +github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= +github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg= +github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU= +github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q= +github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marcopolo/simnet v0.0.1 h1:rSMslhPz6q9IvJeFWDoMGxMIrlsbXau3NkuIXHGJxfg= +github.com/marcopolo/simnet v0.0.1/go.mod h1:WDaQkgLAjqDUEBAOXz22+1j6wXKfGlC5sD5XWt3ddOs= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= +github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE= +github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc= +github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0= +github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= +github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo= +github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= +github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= +github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ= +github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= +github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= +github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= +github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= +github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/opencontainers/distribution-spec/specs-go v0.0.0-20250123160558-a139cc423184 h1:4fMydcL7sQjWQPMmzTLpRtsKl5KQdZVNcvPoYwpr4G4= +github.com/opencontainers/distribution-spec/specs-go v0.0.0-20250123160558-a139cc423184/go.mod h1:Va0IMqkjv62YSEytL4sgxrkiD9IzU0T0bX/ZZEtMnSQ= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o= +github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M= +github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= +github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= +github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= +github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E= +github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU= +github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4= +github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw= +github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4= +github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI= +github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90= +github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM= +github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA= +github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= +github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= +github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo= +github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0= +github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c= +github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk= +github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE= +github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE= +github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4= +github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E= +github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4= +github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY= +github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= +github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= +github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw= +github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU= +github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= +github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= +github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= +github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps= +github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs= +github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54= +github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= +github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= +github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= +github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= +github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= +github.com/quic-go/quic-go v0.55.0 h1:zccPQIqYCXDt5NmcEabyYvOnomjs8Tlwl7tISjJh9Mk= +github.com/quic-go/quic-go v0.55.0/go.mod h1:DR51ilwU1uE164KuWXhinFcKWGlEjzys2l8zUl5Ss1U= +github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70= +github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= +github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= +github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= +github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= +github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= +github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= +github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= +github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= +github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sigstore/cosign/v3 v3.0.3 h1:IknuTUYM+tZ/ToghM7mvg9V0O31NG3rev97u1IJIuYA= +github.com/sigstore/cosign/v3 v3.0.3/go.mod h1:poeQqwvpDNIDyim7a2ljUhonVKpCys+fx3SY0Lkmi/4= +github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= +github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.4.3 h1:2+aw4Gbgumv8vYM/QVg6b+hvr4x4Cukur8stJrVPKU0= +github.com/sigstore/rekor v1.4.3/go.mod h1:o0zgY087Q21YwohVvGwV9vK1/tliat5mfnPiVI3i75o= +github.com/sigstore/rekor-tiles/v2 v2.0.1 h1:1Wfz15oSRNGF5Dzb0lWn5W8+lfO50ork4PGIfEKjZeo= +github.com/sigstore/rekor-tiles/v2 v2.0.1/go.mod h1:Pjsbhzj5hc3MKY8FfVTYHBUHQEnP0ozC4huatu4x7OU= +github.com/sigstore/sigstore v1.10.0 h1:lQrmdzqlR8p9SCfWIpFoGUqdXEzJSZT2X+lTXOMPaQI= +github.com/sigstore/sigstore v1.10.0/go.mod h1:Ygq+L/y9Bm3YnjpJTlQrOk/gXyrjkpn3/AEJpmk1n9Y= +github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 h1:K8hnZhun6XacjxAdCdxkowSi7+FpmfYnAcMhTXZQyPg= +github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894/go.mod h1:uuR+Edo6P+iwi0HKscycUm8mxXL748nAureqSg6jFLA= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0 h1:UOHpiyezCj5RuixgIvCV3QyuxIGQT+N6nGZEXA7OTTY= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0/go.mod h1:U0CZmA2psabDa8DdiV7yXab0AHODzfKqvD2isH7Hrvw= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0 h1:fq4+8Y4YadxeF8mzhoMRPZ1mVvDYXmI3BfS0vlkPT7M= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0/go.mod h1:u05nqPWY05lmcdHhv2lPaWTH3FGUhJzO7iW2hbboK3Q= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0 h1:iUEf5MZYOuXGnXxdF/WrarJrk0DTVHqeIOjYdtpVXtc= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0/go.mod h1:i6vg5JfEQix46R1rhQlrKmUtJoeH91drltyYOJEk1T4= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0 h1:dUvPv/MP23ZPIXZUW45kvCIgC0ZRfYxEof57AB6bAtU= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0/go.mod h1:fR/gDdPvJWGWL70/NgBBIL1O0/3Wma6JHs3tSSYg3s4= +github.com/sigstore/timestamp-authority/v2 v2.0.3 h1:sRyYNtdED/ttLCMdaYnwpf0zre1A9chvjTnCmWWxN8Y= +github.com/sigstore/timestamp-authority/v2 v2.0.3/go.mod h1:mDaHxkt3HmZYoIlwYj4QWo0RUr7VjYU52aVO5f5Qb3I= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smarty/assertions v1.16.0 h1:EvHNkdRA4QHMrn75NZSoUQ/mAUXAYWfatfB01yTCzfY= +github.com/smarty/assertions v1.16.0/go.mod h1:duaaFdCS0K9dnoM50iyek/eYINOZ64gbh1Xlf6LG7AI= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= +github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= +github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= +github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= +github.com/theupdateframework/go-tuf/v2 v2.3.0 h1:gt3X8xT8qu/HT4w+n1jgv+p7koi5ad8XEkLXXZqG9AA= +github.com/theupdateframework/go-tuf/v2 v2.3.0/go.mod h1:xW8yNvgXRncmovMLvBxKwrKpsOwJZu/8x+aB0KtFcdw= +github.com/tiendc/go-deepcopy v1.7.1 h1:LnubftI6nYaaMOcaz0LphzwraqN8jiWTwm416sitff4= +github.com/tiendc/go-deepcopy v1.7.1/go.mod h1:4bKjNC2r7boYOkD2IOuZpYjmlDdzjbpTRyCx+goBCJQ= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= +github.com/tink-crypto/tink-go/v2 v2.5.0 h1:B8KLF6AofxdBIE4UJIaFbmoj5/1ehEtt7/MmzfI4Zpw= +github.com/tink-crypto/tink-go/v2 v2.5.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c h1:5a2XDQ2LiAUV+/RjckMyq9sXudfrPSuCY4FuPC1NyAw= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c/go.mod h1:g85IafeFJZLxlzZCDRu4JLpfS7HKzR+Hw9qRh3bVzDI= +github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= +github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= +github.com/ugorji/go v1.2.6/go.mod h1:anCg0y61KIhDlPZmnH+so+RQbysYVyDko0IMgJv0Nn0= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.2.6 h1:7kbGefxLoDBuYXOms4yD7223OpNMMPNPZxXk5TvFcyQ= +github.com/ugorji/go/codec v1.2.6/go.mod h1:V6TCNZ4PHqoHGFZuSG1W8nrCzzdgA2DozYxWFFpvxTw= +github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= +github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= +github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= +github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= +github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= +github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= +github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= +github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= +github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= +github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= +github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= +github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= +github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms= +github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= +go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= +go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.step.sm/crypto v0.74.0 h1:/APBEv45yYR4qQFg47HA8w1nesIGcxh44pGyQNw6JRA= +go.step.sm/crypto v0.74.0/go.mod h1:UoXqCAJjjRgzPte0Llaqen7O9P7XjPmgjgTHQGkKCDk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4= +go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg= +go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= +go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= +golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI= +google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 h1:LvZVVaPE0JSqL+ZWb6ErZfnEOKIqqFWUJE2D0fObSmc= +google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9/go.mod h1:QFOrLhdAe2PsTp3vQY4quuLKTi9j3XG3r6JPPaw7MSc= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 h1:ExN12ndbJ608cboPYflpTny6mXSzPrDLh0iTaVrRrds= +google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6/go.mod h1:6ytKWczdvnpnO+m+JiG9NjEDzR1FJfsnmJdG7B8QVZ8= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gorm.io/gorm v1.30.0 h1:qbT5aPv1UH8gI99OsRlvDToLxW5zR7FzS9acZDOZcgs= +gorm.io/gorm v1.30.0/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg= +lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo= +modernc.org/cc/v4 v4.26.5 h1:xM3bX7Mve6G8K8b+T11ReenJOT+BmVqQj0FY5T4+5Y4= +modernc.org/cc/v4 v4.26.5/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= +modernc.org/ccgo/v4 v4.28.1 h1:wPKYn5EC/mYTqBO373jKjvX2n+3+aK7+sICCv4Fjy1A= +modernc.org/ccgo/v4 v4.28.1/go.mod h1:uD+4RnfrVgE6ec9NGguUNdhqzNIeeomeXf6CL0GTE5Q= +modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA= +modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc= +modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI= +modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= +modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks= +modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI= +modernc.org/libc v1.66.10 h1:yZkb3YeLx4oynyR+iUsXsybsX4Ubx7MQlSYEw4yj59A= +modernc.org/libc v1.66.10/go.mod h1:8vGSEwvoUoltr4dlywvHqjtAqHBaw0j1jI7iFBTAr2I= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= +modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= +modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= +modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= +modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= +modernc.org/sqlite v1.40.0 h1:bNWEDlYhNPAUdUdBzjAvn8icAs/2gaKlj4vM+tQ6KdQ= +modernc.org/sqlite v1.40.0/go.mod h1:9fjQZ0mB1LLP0GYrp39oOJXx/I2sxEnZtzCmEQIKvGE= +modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= +modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= +oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= +zotregistry.dev/zot/v2 v2.1.11 h1:hxeE4ilLcmCeF035hs1lRtoyFXm6rJ0rearKgTXPbq8= +zotregistry.dev/zot/v2 v2.1.11/go.mod h1:EYqgYSnmOBPQ9OwD5ntuYoLY/qbuzVfpDllomKCa3NI= diff --git a/server/healthcheck/healthcheck.go b/server/healthcheck/healthcheck.go index 881c00bb8..b8d44dffd 100644 --- a/server/healthcheck/healthcheck.go +++ b/server/healthcheck/healthcheck.go @@ -1,160 +1,160 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Package healthcheck provides gRPC health check service. -package healthcheck - -import ( - "context" - "sync" - "time" - - "github.com/agntcy/dir/utils/logging" - "google.golang.org/grpc" - "google.golang.org/grpc/health" - "google.golang.org/grpc/health/grpc_health_v1" -) - -var logger = logging.Logger("healthcheck") - -const ( - // MonitorInterval is the interval at which health checks are monitored. - MonitorInterval = 5 * time.Second - // CheckTimeout is the timeout for individual health checks. - CheckTimeout = 3 * time.Second -) - -// CheckFunc is a function that performs a health check. -// Return true if healthy, false otherwise. -type CheckFunc func(ctx context.Context) bool - -// IsHealthCheckEndpoint returns true if the given method is a gRPC health check endpoint. -func IsHealthCheckEndpoint(method string) bool { - return method == "/grpc.health.v1.Health/Check" || method == "/grpc.health.v1.Health/Watch" -} - -// Checker manages health checks using gRPC health checking protocol. -type Checker struct { - mu sync.RWMutex - readinessChecks map[string]CheckFunc - healthServer *health.Server - stopChan chan struct{} - wg sync.WaitGroup -} - -// New creates a new health checker. -func New() *Checker { - return &Checker{ - readinessChecks: make(map[string]CheckFunc), - healthServer: health.NewServer(), - stopChan: make(chan struct{}), - } -} - -// AddReadinessCheck adds a readiness check. -func (c *Checker) AddReadinessCheck(name string, check CheckFunc) { - c.mu.Lock() - defer c.mu.Unlock() - - c.readinessChecks[name] = check -} - -// Register registers the health service with the gRPC server. -func (c *Checker) Register(grpcServer *grpc.Server) { - grpc_health_v1.RegisterHealthServer(grpcServer, c.healthServer) - logger.Info("Registered gRPC health service") -} - -// Start starts the health check monitoring. -// It periodically checks all registered readiness checks and updates the health status. -func (c *Checker) Start(ctx context.Context) error { - c.mu.RLock() - hasChecks := len(c.readinessChecks) > 0 - c.mu.RUnlock() - - // If no readiness checks are registered, immediately set status to SERVING - // Otherwise, start as NOT_SERVING and wait for first health check to run - if !hasChecks { - c.healthServer.SetServingStatus("", grpc_health_v1.HealthCheckResponse_SERVING) - } else { - c.healthServer.SetServingStatus("", grpc_health_v1.HealthCheckResponse_NOT_SERVING) - } - - // Start background goroutine to monitor health checks - c.wg.Add(1) - - go func() { - defer c.wg.Done() - - c.monitorHealth(ctx) - }() - - logger.Info("Health check monitoring started") - - return nil -} - -// Stop gracefully stops the health check monitoring. -func (c *Checker) Stop(ctx context.Context) error { - logger.Info("Stopping health check monitoring") - - // Signal stop and wait for goroutine to finish - close(c.stopChan) - c.wg.Wait() - - // Set status as not serving - c.healthServer.SetServingStatus("", grpc_health_v1.HealthCheckResponse_NOT_SERVING) - - return nil -} - -// monitorHealth continuously monitors health checks and updates the health status. -func (c *Checker) monitorHealth(ctx context.Context) { - ticker := time.NewTicker(MonitorInterval) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-c.stopChan: - return - case <-ticker.C: - c.updateHealthStatus(ctx) - } - } -} - -// updateHealthStatus runs all readiness checks and updates the health status. -func (c *Checker) updateHealthStatus(ctx context.Context) { - c.mu.RLock() - - checks := make(map[string]CheckFunc, len(c.readinessChecks)) - for name, check := range c.readinessChecks { - checks[name] = check - } - - c.mu.RUnlock() - - // Run all checks with timeout - checkCtx, cancel := context.WithTimeout(ctx, CheckTimeout) - defer cancel() - - allHealthy := true - failedChecks := []string{} - - for name, check := range checks { - if !check(checkCtx) { - allHealthy = false - - failedChecks = append(failedChecks, name) - } - } - - if allHealthy { - c.healthServer.SetServingStatus("", grpc_health_v1.HealthCheckResponse_SERVING) - } else { - logger.Warn("Health checks failed", "failed_checks", failedChecks) - c.healthServer.SetServingStatus("", grpc_health_v1.HealthCheckResponse_NOT_SERVING) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Package healthcheck provides gRPC health check service. +package healthcheck + +import ( + "context" + "sync" + "time" + + "github.com/agntcy/dir/utils/logging" + "google.golang.org/grpc" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +var logger = logging.Logger("healthcheck") + +const ( + // MonitorInterval is the interval at which health checks are monitored. + MonitorInterval = 5 * time.Second + // CheckTimeout is the timeout for individual health checks. + CheckTimeout = 3 * time.Second +) + +// CheckFunc is a function that performs a health check. +// Return true if healthy, false otherwise. +type CheckFunc func(ctx context.Context) bool + +// IsHealthCheckEndpoint returns true if the given method is a gRPC health check endpoint. +func IsHealthCheckEndpoint(method string) bool { + return method == "/grpc.health.v1.Health/Check" || method == "/grpc.health.v1.Health/Watch" +} + +// Checker manages health checks using gRPC health checking protocol. +type Checker struct { + mu sync.RWMutex + readinessChecks map[string]CheckFunc + healthServer *health.Server + stopChan chan struct{} + wg sync.WaitGroup +} + +// New creates a new health checker. +func New() *Checker { + return &Checker{ + readinessChecks: make(map[string]CheckFunc), + healthServer: health.NewServer(), + stopChan: make(chan struct{}), + } +} + +// AddReadinessCheck adds a readiness check. +func (c *Checker) AddReadinessCheck(name string, check CheckFunc) { + c.mu.Lock() + defer c.mu.Unlock() + + c.readinessChecks[name] = check +} + +// Register registers the health service with the gRPC server. +func (c *Checker) Register(grpcServer *grpc.Server) { + grpc_health_v1.RegisterHealthServer(grpcServer, c.healthServer) + logger.Info("Registered gRPC health service") +} + +// Start starts the health check monitoring. +// It periodically checks all registered readiness checks and updates the health status. +func (c *Checker) Start(ctx context.Context) error { + c.mu.RLock() + hasChecks := len(c.readinessChecks) > 0 + c.mu.RUnlock() + + // If no readiness checks are registered, immediately set status to SERVING + // Otherwise, start as NOT_SERVING and wait for first health check to run + if !hasChecks { + c.healthServer.SetServingStatus("", grpc_health_v1.HealthCheckResponse_SERVING) + } else { + c.healthServer.SetServingStatus("", grpc_health_v1.HealthCheckResponse_NOT_SERVING) + } + + // Start background goroutine to monitor health checks + c.wg.Add(1) + + go func() { + defer c.wg.Done() + + c.monitorHealth(ctx) + }() + + logger.Info("Health check monitoring started") + + return nil +} + +// Stop gracefully stops the health check monitoring. +func (c *Checker) Stop(ctx context.Context) error { + logger.Info("Stopping health check monitoring") + + // Signal stop and wait for goroutine to finish + close(c.stopChan) + c.wg.Wait() + + // Set status as not serving + c.healthServer.SetServingStatus("", grpc_health_v1.HealthCheckResponse_NOT_SERVING) + + return nil +} + +// monitorHealth continuously monitors health checks and updates the health status. +func (c *Checker) monitorHealth(ctx context.Context) { + ticker := time.NewTicker(MonitorInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-c.stopChan: + return + case <-ticker.C: + c.updateHealthStatus(ctx) + } + } +} + +// updateHealthStatus runs all readiness checks and updates the health status. +func (c *Checker) updateHealthStatus(ctx context.Context) { + c.mu.RLock() + + checks := make(map[string]CheckFunc, len(c.readinessChecks)) + for name, check := range c.readinessChecks { + checks[name] = check + } + + c.mu.RUnlock() + + // Run all checks with timeout + checkCtx, cancel := context.WithTimeout(ctx, CheckTimeout) + defer cancel() + + allHealthy := true + failedChecks := []string{} + + for name, check := range checks { + if !check(checkCtx) { + allHealthy = false + + failedChecks = append(failedChecks, name) + } + } + + if allHealthy { + c.healthServer.SetServingStatus("", grpc_health_v1.HealthCheckResponse_SERVING) + } else { + logger.Warn("Health checks failed", "failed_checks", failedChecks) + c.healthServer.SetServingStatus("", grpc_health_v1.HealthCheckResponse_NOT_SERVING) + } +} diff --git a/server/healthcheck/healthcheck_test.go b/server/healthcheck/healthcheck_test.go index 200bc4604..7fbfbf004 100644 --- a/server/healthcheck/healthcheck_test.go +++ b/server/healthcheck/healthcheck_test.go @@ -1,374 +1,374 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:noctx,goconst -package healthcheck - -import ( - "context" - "net" - "testing" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/health/grpc_health_v1" -) - -func TestNew(t *testing.T) { - checker := New() - if checker == nil { - t.Fatal("Expected non-nil checker") - } - - if checker.readinessChecks == nil { - t.Fatal("Expected readinessChecks map to be initialized") - } - - if checker.healthServer == nil { - t.Fatal("Expected healthServer to be initialized") - } -} - -func TestAddReadinessCheck(t *testing.T) { - checker := New() - - checkCalled := false - testCheck := func(ctx context.Context) bool { - checkCalled = true - - return true - } - - checker.AddReadinessCheck("test", testCheck) - - // Verify check was added - checker.mu.RLock() - - if _, exists := checker.readinessChecks["test"]; !exists { - t.Error("Expected readiness check to be added") - } - - checker.mu.RUnlock() - - // Call the check function - ctx := context.Background() - - result := checker.readinessChecks["test"](ctx) - if !result { - t.Error("Expected check to return true") - } - - if !checkCalled { - t.Error("Expected check function to be called") - } -} - -func TestStartAndStop(t *testing.T) { - checker := New() - - // Create a gRPC server and register the health service - grpcServer := grpc.NewServer() - checker.Register(grpcServer) - - // Start the gRPC server in the background - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("Failed to create listener: %v", err) - } - - go func() { - _ = grpcServer.Serve(listener) - }() - - defer grpcServer.Stop() - - ctx := context.Background() - - err = checker.Start(ctx) - if err != nil { - t.Fatalf("Failed to start health check monitoring: %v", err) - } - - // Give monitoring time to start - time.Sleep(100 * time.Millisecond) - - // Stop monitoring - stopCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - err = checker.Stop(stopCtx) - if err != nil { - t.Errorf("Failed to stop health check monitoring: %v", err) - } -} - -func TestStopWithoutStart(t *testing.T) { - checker := New() - - // Stop without starting should not error - ctx := context.Background() - - err := checker.Stop(ctx) - if err != nil { - t.Errorf("Expected no error when stopping without start, got: %v", err) - } -} - -func TestHealthCheckServing(t *testing.T) { - checker := New() - - // Create a gRPC server and register the health service - grpcServer := grpc.NewServer() - checker.Register(grpcServer) - - // Start the gRPC server in the background - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("Failed to create listener: %v", err) - } - - go func() { - _ = grpcServer.Serve(listener) - }() - - defer grpcServer.Stop() - - // Connect to the server - conn, err := grpc.NewClient( - listener.Addr().String(), - grpc.WithTransportCredentials(insecure.NewCredentials()), - ) - if err != nil { - t.Fatalf("Failed to connect to server: %v", err) - } - defer conn.Close() - - client := grpc_health_v1.NewHealthClient(conn) - - ctx := context.Background() - - // Start health check monitoring - err = checker.Start(ctx) - if err != nil { - t.Fatalf("Failed to start health check monitoring: %v", err) - } - - defer func() { - stopCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - _ = checker.Stop(stopCtx) - }() - - // Give monitoring time to start - time.Sleep(100 * time.Millisecond) - - // Check health status - should be SERVING - resp, err := client.Check(ctx, &grpc_health_v1.HealthCheckRequest{Service: ""}) - if err != nil { - t.Fatalf("Failed to check health: %v", err) - } - - if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING { - t.Errorf("Expected status SERVING, got %v", resp.GetStatus()) - } -} - -func TestHealthCheckWithFailingCheck(t *testing.T) { - checker := New() - - // Add a failing check - checker.AddReadinessCheck("failing", func(ctx context.Context) bool { - return false - }) - - // Create a gRPC server and register the health service - grpcServer := grpc.NewServer() - checker.Register(grpcServer) - - // Start the gRPC server in the background - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("Failed to create listener: %v", err) - } - - go func() { - _ = grpcServer.Serve(listener) - }() - - defer grpcServer.Stop() - - // Connect to the server - conn, err := grpc.NewClient( - listener.Addr().String(), - grpc.WithTransportCredentials(insecure.NewCredentials()), - ) - if err != nil { - t.Fatalf("Failed to connect to server: %v", err) - } - defer conn.Close() - - client := grpc_health_v1.NewHealthClient(conn) - - ctx := context.Background() - - // Start health check monitoring - err = checker.Start(ctx) - if err != nil { - t.Fatalf("Failed to start health check monitoring: %v", err) - } - - defer func() { - stopCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - _ = checker.Stop(stopCtx) - }() - - // Wait for health check to run (checks run every 5 seconds) - time.Sleep(6 * time.Second) - - // Check health status - should be NOT_SERVING - resp, err := client.Check(ctx, &grpc_health_v1.HealthCheckRequest{Service: ""}) - if err != nil { - t.Fatalf("Failed to check health: %v", err) - } - - if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_NOT_SERVING { - t.Errorf("Expected status NOT_SERVING, got %v", resp.GetStatus()) - } -} - -func TestHealthCheckWithPassingChecks(t *testing.T) { - checker := New() - - // Add passing checks - checker.AddReadinessCheck("check1", func(ctx context.Context) bool { - return true - }) - - checker.AddReadinessCheck("check2", func(ctx context.Context) bool { - return true - }) - - // Create a gRPC server and register the health service - grpcServer := grpc.NewServer() - checker.Register(grpcServer) - - // Start the gRPC server in the background - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("Failed to create listener: %v", err) - } - - go func() { - _ = grpcServer.Serve(listener) - }() - - defer grpcServer.Stop() - - // Connect to the server - conn, err := grpc.NewClient( - listener.Addr().String(), - grpc.WithTransportCredentials(insecure.NewCredentials()), - ) - if err != nil { - t.Fatalf("Failed to connect to server: %v", err) - } - defer conn.Close() - - client := grpc_health_v1.NewHealthClient(conn) - - ctx := context.Background() - - // Start health check monitoring - err = checker.Start(ctx) - if err != nil { - t.Fatalf("Failed to start health check monitoring: %v", err) - } - - defer func() { - stopCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - _ = checker.Stop(stopCtx) - }() - - // Wait for health check to run - time.Sleep(6 * time.Second) - - // Check health status - should be SERVING - resp, err := client.Check(ctx, &grpc_health_v1.HealthCheckRequest{Service: ""}) - if err != nil { - t.Fatalf("Failed to check health: %v", err) - } - - if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING { - t.Errorf("Expected status SERVING, got %v", resp.GetStatus()) - } -} - -func TestHealthWatch(t *testing.T) { - checker := New() - - // Create a gRPC server and register the health service - grpcServer := grpc.NewServer() - checker.Register(grpcServer) - - // Start the gRPC server in the background - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("Failed to create listener: %v", err) - } - - go func() { - _ = grpcServer.Serve(listener) - }() - - defer grpcServer.Stop() - - // Connect to the server - conn, err := grpc.NewClient( - listener.Addr().String(), - grpc.WithTransportCredentials(insecure.NewCredentials()), - ) - if err != nil { - t.Fatalf("Failed to connect to server: %v", err) - } - defer conn.Close() - - client := grpc_health_v1.NewHealthClient(conn) - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - // Start health check monitoring - err = checker.Start(ctx) - if err != nil { - t.Fatalf("Failed to start health check monitoring: %v", err) - } - - defer func() { - stopCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - _ = checker.Stop(stopCtx) - }() - - // Watch health status - stream, err := client.Watch(ctx, &grpc_health_v1.HealthCheckRequest{Service: ""}) - if err != nil { - t.Fatalf("Failed to watch health: %v", err) - } - - // Should receive at least one status update - resp, err := stream.Recv() - if err != nil { - t.Fatalf("Failed to receive health status: %v", err) - } - - if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING { - t.Errorf("Expected status SERVING, got %v", resp.GetStatus()) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:noctx,goconst +package healthcheck + +import ( + "context" + "net" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/health/grpc_health_v1" +) + +func TestNew(t *testing.T) { + checker := New() + if checker == nil { + t.Fatal("Expected non-nil checker") + } + + if checker.readinessChecks == nil { + t.Fatal("Expected readinessChecks map to be initialized") + } + + if checker.healthServer == nil { + t.Fatal("Expected healthServer to be initialized") + } +} + +func TestAddReadinessCheck(t *testing.T) { + checker := New() + + checkCalled := false + testCheck := func(ctx context.Context) bool { + checkCalled = true + + return true + } + + checker.AddReadinessCheck("test", testCheck) + + // Verify check was added + checker.mu.RLock() + + if _, exists := checker.readinessChecks["test"]; !exists { + t.Error("Expected readiness check to be added") + } + + checker.mu.RUnlock() + + // Call the check function + ctx := context.Background() + + result := checker.readinessChecks["test"](ctx) + if !result { + t.Error("Expected check to return true") + } + + if !checkCalled { + t.Error("Expected check function to be called") + } +} + +func TestStartAndStop(t *testing.T) { + checker := New() + + // Create a gRPC server and register the health service + grpcServer := grpc.NewServer() + checker.Register(grpcServer) + + // Start the gRPC server in the background + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("Failed to create listener: %v", err) + } + + go func() { + _ = grpcServer.Serve(listener) + }() + + defer grpcServer.Stop() + + ctx := context.Background() + + err = checker.Start(ctx) + if err != nil { + t.Fatalf("Failed to start health check monitoring: %v", err) + } + + // Give monitoring time to start + time.Sleep(100 * time.Millisecond) + + // Stop monitoring + stopCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + err = checker.Stop(stopCtx) + if err != nil { + t.Errorf("Failed to stop health check monitoring: %v", err) + } +} + +func TestStopWithoutStart(t *testing.T) { + checker := New() + + // Stop without starting should not error + ctx := context.Background() + + err := checker.Stop(ctx) + if err != nil { + t.Errorf("Expected no error when stopping without start, got: %v", err) + } +} + +func TestHealthCheckServing(t *testing.T) { + checker := New() + + // Create a gRPC server and register the health service + grpcServer := grpc.NewServer() + checker.Register(grpcServer) + + // Start the gRPC server in the background + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("Failed to create listener: %v", err) + } + + go func() { + _ = grpcServer.Serve(listener) + }() + + defer grpcServer.Stop() + + // Connect to the server + conn, err := grpc.NewClient( + listener.Addr().String(), + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + t.Fatalf("Failed to connect to server: %v", err) + } + defer conn.Close() + + client := grpc_health_v1.NewHealthClient(conn) + + ctx := context.Background() + + // Start health check monitoring + err = checker.Start(ctx) + if err != nil { + t.Fatalf("Failed to start health check monitoring: %v", err) + } + + defer func() { + stopCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + _ = checker.Stop(stopCtx) + }() + + // Give monitoring time to start + time.Sleep(100 * time.Millisecond) + + // Check health status - should be SERVING + resp, err := client.Check(ctx, &grpc_health_v1.HealthCheckRequest{Service: ""}) + if err != nil { + t.Fatalf("Failed to check health: %v", err) + } + + if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING { + t.Errorf("Expected status SERVING, got %v", resp.GetStatus()) + } +} + +func TestHealthCheckWithFailingCheck(t *testing.T) { + checker := New() + + // Add a failing check + checker.AddReadinessCheck("failing", func(ctx context.Context) bool { + return false + }) + + // Create a gRPC server and register the health service + grpcServer := grpc.NewServer() + checker.Register(grpcServer) + + // Start the gRPC server in the background + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("Failed to create listener: %v", err) + } + + go func() { + _ = grpcServer.Serve(listener) + }() + + defer grpcServer.Stop() + + // Connect to the server + conn, err := grpc.NewClient( + listener.Addr().String(), + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + t.Fatalf("Failed to connect to server: %v", err) + } + defer conn.Close() + + client := grpc_health_v1.NewHealthClient(conn) + + ctx := context.Background() + + // Start health check monitoring + err = checker.Start(ctx) + if err != nil { + t.Fatalf("Failed to start health check monitoring: %v", err) + } + + defer func() { + stopCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + _ = checker.Stop(stopCtx) + }() + + // Wait for health check to run (checks run every 5 seconds) + time.Sleep(6 * time.Second) + + // Check health status - should be NOT_SERVING + resp, err := client.Check(ctx, &grpc_health_v1.HealthCheckRequest{Service: ""}) + if err != nil { + t.Fatalf("Failed to check health: %v", err) + } + + if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_NOT_SERVING { + t.Errorf("Expected status NOT_SERVING, got %v", resp.GetStatus()) + } +} + +func TestHealthCheckWithPassingChecks(t *testing.T) { + checker := New() + + // Add passing checks + checker.AddReadinessCheck("check1", func(ctx context.Context) bool { + return true + }) + + checker.AddReadinessCheck("check2", func(ctx context.Context) bool { + return true + }) + + // Create a gRPC server and register the health service + grpcServer := grpc.NewServer() + checker.Register(grpcServer) + + // Start the gRPC server in the background + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("Failed to create listener: %v", err) + } + + go func() { + _ = grpcServer.Serve(listener) + }() + + defer grpcServer.Stop() + + // Connect to the server + conn, err := grpc.NewClient( + listener.Addr().String(), + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + t.Fatalf("Failed to connect to server: %v", err) + } + defer conn.Close() + + client := grpc_health_v1.NewHealthClient(conn) + + ctx := context.Background() + + // Start health check monitoring + err = checker.Start(ctx) + if err != nil { + t.Fatalf("Failed to start health check monitoring: %v", err) + } + + defer func() { + stopCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + _ = checker.Stop(stopCtx) + }() + + // Wait for health check to run + time.Sleep(6 * time.Second) + + // Check health status - should be SERVING + resp, err := client.Check(ctx, &grpc_health_v1.HealthCheckRequest{Service: ""}) + if err != nil { + t.Fatalf("Failed to check health: %v", err) + } + + if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING { + t.Errorf("Expected status SERVING, got %v", resp.GetStatus()) + } +} + +func TestHealthWatch(t *testing.T) { + checker := New() + + // Create a gRPC server and register the health service + grpcServer := grpc.NewServer() + checker.Register(grpcServer) + + // Start the gRPC server in the background + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("Failed to create listener: %v", err) + } + + go func() { + _ = grpcServer.Serve(listener) + }() + + defer grpcServer.Stop() + + // Connect to the server + conn, err := grpc.NewClient( + listener.Addr().String(), + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + t.Fatalf("Failed to connect to server: %v", err) + } + defer conn.Close() + + client := grpc_health_v1.NewHealthClient(conn) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Start health check monitoring + err = checker.Start(ctx) + if err != nil { + t.Fatalf("Failed to start health check monitoring: %v", err) + } + + defer func() { + stopCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + _ = checker.Stop(stopCtx) + }() + + // Watch health status + stream, err := client.Watch(ctx, &grpc_health_v1.HealthCheckRequest{Service: ""}) + if err != nil { + t.Fatalf("Failed to watch health: %v", err) + } + + // Should receive at least one status update + resp, err := stream.Recv() + if err != nil { + t.Fatalf("Failed to receive health status: %v", err) + } + + if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING { + t.Errorf("Expected status SERVING, got %v", resp.GetStatus()) + } +} diff --git a/server/metrics/interceptors.go b/server/metrics/interceptors.go index 99f12f61c..74bba614f 100644 --- a/server/metrics/interceptors.go +++ b/server/metrics/interceptors.go @@ -1,28 +1,28 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package metrics - -import ( - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "google.golang.org/grpc" -) - -// ServerOptions returns gRPC server options for Prometheus metrics collection. -// Interceptors are chained and collect metrics for all gRPC methods automatically. -func ServerOptions() []grpc.ServerOption { - return []grpc.ServerOption{ - grpc.ChainUnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), - grpc.ChainStreamInterceptor(grpc_prometheus.StreamServerInterceptor), - } -} - -// InitializeMetrics registers gRPC metrics with the Prometheus registry. -// Must be called after all gRPC services are registered on the server. -func InitializeMetrics(grpcServer *grpc.Server, metricsServer *Server) { - // Initialize gRPC metrics with all registered services - grpc_prometheus.Register(grpcServer) - - // Register grpc_prometheus metrics with our custom registry - metricsServer.Registry().MustRegister(grpc_prometheus.DefaultServerMetrics) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package metrics + +import ( + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "google.golang.org/grpc" +) + +// ServerOptions returns gRPC server options for Prometheus metrics collection. +// Interceptors are chained and collect metrics for all gRPC methods automatically. +func ServerOptions() []grpc.ServerOption { + return []grpc.ServerOption{ + grpc.ChainUnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), + grpc.ChainStreamInterceptor(grpc_prometheus.StreamServerInterceptor), + } +} + +// InitializeMetrics registers gRPC metrics with the Prometheus registry. +// Must be called after all gRPC services are registered on the server. +func InitializeMetrics(grpcServer *grpc.Server, metricsServer *Server) { + // Initialize gRPC metrics with all registered services + grpc_prometheus.Register(grpcServer) + + // Register grpc_prometheus metrics with our custom registry + metricsServer.Registry().MustRegister(grpc_prometheus.DefaultServerMetrics) +} diff --git a/server/metrics/server.go b/server/metrics/server.go index 47317e97e..18a5859b3 100644 --- a/server/metrics/server.go +++ b/server/metrics/server.go @@ -1,133 +1,133 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Package metrics provides Prometheus metrics collection infrastructure. -// It manages a separate HTTP server for exposing metrics on the /metrics endpoint. -package metrics - -import ( - "context" - "fmt" - "net/http" - "time" - - "github.com/agntcy/dir/utils/logging" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" -) - -var logger = logging.Logger("metrics") - -const ( - // metricsCollectionTimeout is the timeout for collecting metrics. - // Prevents the /metrics endpoint from hanging indefinitely. - metricsCollectionTimeout = 10 * time.Second - - // httpReadTimeout is the maximum duration for reading the entire request. - httpReadTimeout = 10 * time.Second - - // httpReadHeaderTimeout is the maximum duration for reading request headers. - httpReadHeaderTimeout = 5 * time.Second - - // httpWriteTimeout is the maximum duration for writing the response. - httpWriteTimeout = 30 * time.Second - - // httpIdleTimeout is the maximum duration to wait for the next request. - httpIdleTimeout = 60 * time.Second - - // serverStartupDelay is the delay after starting the server to check for errors. - serverStartupDelay = 100 * time.Millisecond -) - -// Server manages the Prometheus metrics HTTP server. -// It provides a separate HTTP endpoint for metrics collection, -// independent of the main gRPC server. -type Server struct { - registry *prometheus.Registry - server *http.Server - address string -} - -// New creates a new metrics server with a custom Prometheus registry. -// The server listens on the specified address (e.g., ":9090"). -// Call Start() to begin serving metrics. -func New(address string) *Server { - // Create custom registry to avoid conflicts with global registry - registry := prometheus.NewRegistry() - - // Create HTTP handler for Prometheus metrics - // Using promhttp.HandlerFor allows us to use our custom registry - metricsHandler := promhttp.HandlerFor( - registry, - promhttp.HandlerOpts{ - // Enable OpenMetrics format for better compatibility - EnableOpenMetrics: true, - // Timeout for collecting metrics (prevent hanging) - Timeout: metricsCollectionTimeout, - }, - ) - - // Create HTTP mux for routing - mux := http.NewServeMux() - mux.Handle("/metrics", metricsHandler) - - // Create HTTP server - httpServer := &http.Server{ - Addr: address, - Handler: mux, - // Reasonable timeouts to prevent resource exhaustion - ReadTimeout: httpReadTimeout, - ReadHeaderTimeout: httpReadHeaderTimeout, - WriteTimeout: httpWriteTimeout, - IdleTimeout: httpIdleTimeout, - } - - return &Server{ - registry: registry, - server: httpServer, - address: address, - } -} - -// Registry returns the Prometheus registry for registering custom metrics. -func (s *Server) Registry() *prometheus.Registry { - return s.registry -} - -// Start starts the HTTP server in the background. -// Returns immediately after starting the server goroutine. -func (s *Server) Start() error { - // Start HTTP server in background goroutine - go func() { - logger.Info("Metrics server starting", "address", s.address) - - // ListenAndServe blocks until server is shut down - if err := s.server.ListenAndServe(); err != nil && err != http.ErrServerClosed { - // ErrServerClosed is expected during graceful shutdown - logger.Error("Metrics server error", "error", err) - } - }() - - // Give the server a moment to start and check for immediate errors - // (e.g., port already in use) - time.Sleep(serverStartupDelay) - - logger.Info("Metrics server started successfully", "address", s.address, "endpoint", "/metrics") - - return nil -} - -// Stop gracefully shuts down the HTTP server. -// Waits for in-flight requests to complete up to the context timeout. -func (s *Server) Stop(ctx context.Context) error { - logger.Info("Stopping metrics server", "address", s.address) - - // Gracefully shutdown the server, waiting for in-flight requests to complete - if err := s.server.Shutdown(ctx); err != nil { - return fmt.Errorf("failed to shutdown metrics server: %w", err) - } - - logger.Info("Metrics server stopped successfully") - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Package metrics provides Prometheus metrics collection infrastructure. +// It manages a separate HTTP server for exposing metrics on the /metrics endpoint. +package metrics + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/agntcy/dir/utils/logging" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +var logger = logging.Logger("metrics") + +const ( + // metricsCollectionTimeout is the timeout for collecting metrics. + // Prevents the /metrics endpoint from hanging indefinitely. + metricsCollectionTimeout = 10 * time.Second + + // httpReadTimeout is the maximum duration for reading the entire request. + httpReadTimeout = 10 * time.Second + + // httpReadHeaderTimeout is the maximum duration for reading request headers. + httpReadHeaderTimeout = 5 * time.Second + + // httpWriteTimeout is the maximum duration for writing the response. + httpWriteTimeout = 30 * time.Second + + // httpIdleTimeout is the maximum duration to wait for the next request. + httpIdleTimeout = 60 * time.Second + + // serverStartupDelay is the delay after starting the server to check for errors. + serverStartupDelay = 100 * time.Millisecond +) + +// Server manages the Prometheus metrics HTTP server. +// It provides a separate HTTP endpoint for metrics collection, +// independent of the main gRPC server. +type Server struct { + registry *prometheus.Registry + server *http.Server + address string +} + +// New creates a new metrics server with a custom Prometheus registry. +// The server listens on the specified address (e.g., ":9090"). +// Call Start() to begin serving metrics. +func New(address string) *Server { + // Create custom registry to avoid conflicts with global registry + registry := prometheus.NewRegistry() + + // Create HTTP handler for Prometheus metrics + // Using promhttp.HandlerFor allows us to use our custom registry + metricsHandler := promhttp.HandlerFor( + registry, + promhttp.HandlerOpts{ + // Enable OpenMetrics format for better compatibility + EnableOpenMetrics: true, + // Timeout for collecting metrics (prevent hanging) + Timeout: metricsCollectionTimeout, + }, + ) + + // Create HTTP mux for routing + mux := http.NewServeMux() + mux.Handle("/metrics", metricsHandler) + + // Create HTTP server + httpServer := &http.Server{ + Addr: address, + Handler: mux, + // Reasonable timeouts to prevent resource exhaustion + ReadTimeout: httpReadTimeout, + ReadHeaderTimeout: httpReadHeaderTimeout, + WriteTimeout: httpWriteTimeout, + IdleTimeout: httpIdleTimeout, + } + + return &Server{ + registry: registry, + server: httpServer, + address: address, + } +} + +// Registry returns the Prometheus registry for registering custom metrics. +func (s *Server) Registry() *prometheus.Registry { + return s.registry +} + +// Start starts the HTTP server in the background. +// Returns immediately after starting the server goroutine. +func (s *Server) Start() error { + // Start HTTP server in background goroutine + go func() { + logger.Info("Metrics server starting", "address", s.address) + + // ListenAndServe blocks until server is shut down + if err := s.server.ListenAndServe(); err != nil && err != http.ErrServerClosed { + // ErrServerClosed is expected during graceful shutdown + logger.Error("Metrics server error", "error", err) + } + }() + + // Give the server a moment to start and check for immediate errors + // (e.g., port already in use) + time.Sleep(serverStartupDelay) + + logger.Info("Metrics server started successfully", "address", s.address, "endpoint", "/metrics") + + return nil +} + +// Stop gracefully shuts down the HTTP server. +// Waits for in-flight requests to complete up to the context timeout. +func (s *Server) Stop(ctx context.Context) error { + logger.Info("Stopping metrics server", "address", s.address) + + // Gracefully shutdown the server, waiting for in-flight requests to complete + if err := s.server.Shutdown(ctx); err != nil { + return fmt.Errorf("failed to shutdown metrics server: %w", err) + } + + logger.Info("Metrics server stopped successfully") + + return nil +} diff --git a/server/middleware/logging/interceptor.go b/server/middleware/logging/interceptor.go index 8cd95aea0..3fdcb21c9 100644 --- a/server/middleware/logging/interceptor.go +++ b/server/middleware/logging/interceptor.go @@ -1,20 +1,20 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Package logging provides gRPC interceptors for structured request/response logging. -package logging - -import ( - "context" - "log/slog" - - grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" -) - -// InterceptorLogger adapts slog.Logger to the grpc-middleware Logger interface. -// This allows go-grpc-middleware to use our existing slog-based logging infrastructure. -func InterceptorLogger(l *slog.Logger) grpc_logging.Logger { - return grpc_logging.LoggerFunc(func(ctx context.Context, lvl grpc_logging.Level, msg string, fields ...any) { - l.Log(ctx, slog.Level(lvl), msg, fields...) - }) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Package logging provides gRPC interceptors for structured request/response logging. +package logging + +import ( + "context" + "log/slog" + + grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" +) + +// InterceptorLogger adapts slog.Logger to the grpc-middleware Logger interface. +// This allows go-grpc-middleware to use our existing slog-based logging infrastructure. +func InterceptorLogger(l *slog.Logger) grpc_logging.Logger { + return grpc_logging.LoggerFunc(func(ctx context.Context, lvl grpc_logging.Level, msg string, fields ...any) { + l.Log(ctx, slog.Level(lvl), msg, fields...) + }) +} diff --git a/server/middleware/logging/interceptor_test.go b/server/middleware/logging/interceptor_test.go index 569a4a4fb..cb3c09145 100644 --- a/server/middleware/logging/interceptor_test.go +++ b/server/middleware/logging/interceptor_test.go @@ -1,208 +1,208 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package logging - -import ( - "bytes" - "context" - "encoding/json" - "log/slog" - "strings" - "testing" - - grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" -) - -// TestInterceptorLogger verifies the adapter creates a valid logger. -func TestInterceptorLogger(t *testing.T) { - var buf bytes.Buffer - - logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelDebug})) - - interceptorLogger := InterceptorLogger(logger) - if interceptorLogger == nil { - t.Fatal("InterceptorLogger returned nil") - } -} - -// TestInterceptorLoggerLogsMessage verifies the adapter logs messages correctly. -func TestInterceptorLoggerLogsMessage(t *testing.T) { - var buf bytes.Buffer - - logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelDebug})) - - interceptorLogger := InterceptorLogger(logger) - ctx := context.Background() - - // Log a test message - interceptorLogger.Log(ctx, grpc_logging.LevelInfo, "test message", "key", "value") - - output := buf.String() - - // Verify it's valid JSON - var parsed map[string]interface{} - if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &parsed); err != nil { - t.Fatalf("Output is not valid JSON: %v\nOutput: %s", err, output) - } - - // Verify expected fields - if parsed["msg"] != "test message" { - t.Errorf("Expected msg='test message', got: %v", parsed["msg"]) - } - - if parsed["key"] != "value" { - t.Errorf("Expected key='value', got: %v", parsed["key"]) - } - - if parsed["level"] != "INFO" { - t.Errorf("Expected level='INFO', got: %v", parsed["level"]) - } -} - -// TestInterceptorLoggerLevels verifies logging at different levels. -func TestInterceptorLoggerLevels(t *testing.T) { - tests := []struct { - name string - level grpc_logging.Level - expectedLevel string - }{ - {"DEBUG", grpc_logging.LevelDebug, "DEBUG"}, - {"INFO", grpc_logging.LevelInfo, "INFO"}, - {"WARN", grpc_logging.LevelWarn, "WARN"}, - {"ERROR", grpc_logging.LevelError, "ERROR"}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var buf bytes.Buffer - - logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelDebug})) - - interceptorLogger := InterceptorLogger(logger) - ctx := context.Background() - - interceptorLogger.Log(ctx, tt.level, "test", "level", tt.name) - - output := buf.String() - - var parsed map[string]interface{} - if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &parsed); err != nil { - t.Fatalf("Failed to parse JSON: %v", err) - } - - if parsed["level"] != tt.expectedLevel { - t.Errorf("Expected level=%s, got: %v", tt.expectedLevel, parsed["level"]) - } - }) - } -} - -// testContextKey is a custom type for context keys to avoid collisions. -type testContextKey string - -const requestIDContextKey testContextKey = "request_id" - -// TestInterceptorLoggerWithContext verifies context is passed through. -func TestInterceptorLoggerWithContext(t *testing.T) { - var buf bytes.Buffer - - logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) - - interceptorLogger := InterceptorLogger(logger) - - // Create context with a value (simulating request context) - ctx := context.WithValue(context.Background(), requestIDContextKey, "test-123") - - interceptorLogger.Log(ctx, grpc_logging.LevelInfo, "context test", "test", "value") - - // Verify log was created (context is passed but not automatically logged) - output := buf.String() - if !strings.Contains(output, "context test") { - t.Error("Expected 'context test' to be logged") - } -} - -// TestInterceptorLoggerMultipleFields verifies multiple structured fields. -func TestInterceptorLoggerMultipleFields(t *testing.T) { - var buf bytes.Buffer - - logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) - - interceptorLogger := InterceptorLogger(logger) - ctx := context.Background() - - // Log with multiple fields - interceptorLogger.Log(ctx, grpc_logging.LevelInfo, "multi-field test", - "string_field", "test", - "int_field", 42, - "bool_field", true, - ) - - output := buf.String() - - var parsed map[string]interface{} - if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &parsed); err != nil { - t.Fatalf("Failed to parse JSON: %v", err) - } - - // Verify all fields are present - if parsed["string_field"] != "test" { - t.Errorf("Expected string_field='test', got: %v", parsed["string_field"]) - } - - if parsed["int_field"] != float64(42) { // JSON numbers are float64 - t.Errorf("Expected int_field=42, got: %v", parsed["int_field"]) - } - - if parsed["bool_field"] != true { - t.Errorf("Expected bool_field=true, got: %v", parsed["bool_field"]) - } -} - -// TestInterceptorLoggerEmptyFields verifies handling of empty/nil fields. -func TestInterceptorLoggerEmptyFields(t *testing.T) { - var buf bytes.Buffer - - logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) - - interceptorLogger := InterceptorLogger(logger) - ctx := context.Background() - - // Log with empty fields - interceptorLogger.Log(ctx, grpc_logging.LevelInfo, "empty test") - - output := buf.String() - - var parsed map[string]interface{} - if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &parsed); err != nil { - t.Fatalf("Failed to parse JSON: %v", err) - } - - if parsed["msg"] != "empty test" { - t.Errorf("Expected msg='empty test', got: %v", parsed["msg"]) - } -} - -// TestInterceptorLoggerTextFormat verifies adapter works with text format too. -func TestInterceptorLoggerTextFormat(t *testing.T) { - var buf bytes.Buffer - - logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) - - interceptorLogger := InterceptorLogger(logger) - ctx := context.Background() - - interceptorLogger.Log(ctx, grpc_logging.LevelInfo, "text format test", "key", "value") - - output := buf.String() - - // Verify text format output - if !strings.Contains(output, "text format test") { - t.Error("Expected 'text format test' in output") - } - - if !strings.Contains(output, "key=value") { - t.Error("Expected 'key=value' in output") - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package logging + +import ( + "bytes" + "context" + "encoding/json" + "log/slog" + "strings" + "testing" + + grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" +) + +// TestInterceptorLogger verifies the adapter creates a valid logger. +func TestInterceptorLogger(t *testing.T) { + var buf bytes.Buffer + + logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelDebug})) + + interceptorLogger := InterceptorLogger(logger) + if interceptorLogger == nil { + t.Fatal("InterceptorLogger returned nil") + } +} + +// TestInterceptorLoggerLogsMessage verifies the adapter logs messages correctly. +func TestInterceptorLoggerLogsMessage(t *testing.T) { + var buf bytes.Buffer + + logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelDebug})) + + interceptorLogger := InterceptorLogger(logger) + ctx := context.Background() + + // Log a test message + interceptorLogger.Log(ctx, grpc_logging.LevelInfo, "test message", "key", "value") + + output := buf.String() + + // Verify it's valid JSON + var parsed map[string]interface{} + if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &parsed); err != nil { + t.Fatalf("Output is not valid JSON: %v\nOutput: %s", err, output) + } + + // Verify expected fields + if parsed["msg"] != "test message" { + t.Errorf("Expected msg='test message', got: %v", parsed["msg"]) + } + + if parsed["key"] != "value" { + t.Errorf("Expected key='value', got: %v", parsed["key"]) + } + + if parsed["level"] != "INFO" { + t.Errorf("Expected level='INFO', got: %v", parsed["level"]) + } +} + +// TestInterceptorLoggerLevels verifies logging at different levels. +func TestInterceptorLoggerLevels(t *testing.T) { + tests := []struct { + name string + level grpc_logging.Level + expectedLevel string + }{ + {"DEBUG", grpc_logging.LevelDebug, "DEBUG"}, + {"INFO", grpc_logging.LevelInfo, "INFO"}, + {"WARN", grpc_logging.LevelWarn, "WARN"}, + {"ERROR", grpc_logging.LevelError, "ERROR"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var buf bytes.Buffer + + logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelDebug})) + + interceptorLogger := InterceptorLogger(logger) + ctx := context.Background() + + interceptorLogger.Log(ctx, tt.level, "test", "level", tt.name) + + output := buf.String() + + var parsed map[string]interface{} + if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &parsed); err != nil { + t.Fatalf("Failed to parse JSON: %v", err) + } + + if parsed["level"] != tt.expectedLevel { + t.Errorf("Expected level=%s, got: %v", tt.expectedLevel, parsed["level"]) + } + }) + } +} + +// testContextKey is a custom type for context keys to avoid collisions. +type testContextKey string + +const requestIDContextKey testContextKey = "request_id" + +// TestInterceptorLoggerWithContext verifies context is passed through. +func TestInterceptorLoggerWithContext(t *testing.T) { + var buf bytes.Buffer + + logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) + + interceptorLogger := InterceptorLogger(logger) + + // Create context with a value (simulating request context) + ctx := context.WithValue(context.Background(), requestIDContextKey, "test-123") + + interceptorLogger.Log(ctx, grpc_logging.LevelInfo, "context test", "test", "value") + + // Verify log was created (context is passed but not automatically logged) + output := buf.String() + if !strings.Contains(output, "context test") { + t.Error("Expected 'context test' to be logged") + } +} + +// TestInterceptorLoggerMultipleFields verifies multiple structured fields. +func TestInterceptorLoggerMultipleFields(t *testing.T) { + var buf bytes.Buffer + + logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) + + interceptorLogger := InterceptorLogger(logger) + ctx := context.Background() + + // Log with multiple fields + interceptorLogger.Log(ctx, grpc_logging.LevelInfo, "multi-field test", + "string_field", "test", + "int_field", 42, + "bool_field", true, + ) + + output := buf.String() + + var parsed map[string]interface{} + if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &parsed); err != nil { + t.Fatalf("Failed to parse JSON: %v", err) + } + + // Verify all fields are present + if parsed["string_field"] != "test" { + t.Errorf("Expected string_field='test', got: %v", parsed["string_field"]) + } + + if parsed["int_field"] != float64(42) { // JSON numbers are float64 + t.Errorf("Expected int_field=42, got: %v", parsed["int_field"]) + } + + if parsed["bool_field"] != true { + t.Errorf("Expected bool_field=true, got: %v", parsed["bool_field"]) + } +} + +// TestInterceptorLoggerEmptyFields verifies handling of empty/nil fields. +func TestInterceptorLoggerEmptyFields(t *testing.T) { + var buf bytes.Buffer + + logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) + + interceptorLogger := InterceptorLogger(logger) + ctx := context.Background() + + // Log with empty fields + interceptorLogger.Log(ctx, grpc_logging.LevelInfo, "empty test") + + output := buf.String() + + var parsed map[string]interface{} + if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &parsed); err != nil { + t.Fatalf("Failed to parse JSON: %v", err) + } + + if parsed["msg"] != "empty test" { + t.Errorf("Expected msg='empty test', got: %v", parsed["msg"]) + } +} + +// TestInterceptorLoggerTextFormat verifies adapter works with text format too. +func TestInterceptorLoggerTextFormat(t *testing.T) { + var buf bytes.Buffer + + logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) + + interceptorLogger := InterceptorLogger(logger) + ctx := context.Background() + + interceptorLogger.Log(ctx, grpc_logging.LevelInfo, "text format test", "key", "value") + + output := buf.String() + + // Verify text format output + if !strings.Contains(output, "text format test") { + t.Error("Expected 'text format test' in output") + } + + if !strings.Contains(output, "key=value") { + t.Error("Expected 'key=value' in output") + } +} diff --git a/server/middleware/logging/options.go b/server/middleware/logging/options.go index 6037be416..fdb140774 100644 --- a/server/middleware/logging/options.go +++ b/server/middleware/logging/options.go @@ -1,172 +1,172 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package logging - -import ( - "context" - - "github.com/agntcy/dir/server/authn" - "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors" - grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" -) - -// Common metadata keys for request tracking. -const ( - RequestIDKey = "x-request-id" - CorrelationIDKey = "x-correlation-id" - UserAgentKey = "user-agent" -) - -// Typical field count for pre-allocating fields slice. -// Includes: spiffe_id, request_id, correlation_id, user_agent (4 keys + 4 values = 8 items). -const typicalFieldCount = 8 - -// Noisy endpoints that should be excluded from logging by default. -var noisyEndpoints = map[string]bool{ - "/grpc.health.v1.Health/Check": true, - "/grpc.health.v1.Health/Watch": true, -} - -// extractFieldsFromContext extracts fields from context and metadata for logging. -// This is the core field extraction logic shared by both default and verbose modes. -func extractFieldsFromContext(ctx context.Context) grpc_logging.Fields { - fields := make(grpc_logging.Fields, 0, typicalFieldCount) // Pre-allocate for typical field count - - // Extract SPIFFE ID from authenticated context - if spiffeID, ok := authn.SpiffeIDFromContext(ctx); ok { - fields = append(fields, "spiffe_id", spiffeID.String()) - } - - // Extract metadata fields - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return fields - } - - // Extract Request ID - if requestID := md.Get(RequestIDKey); len(requestID) > 0 { - fields = append(fields, "request_id", requestID[0]) - } - - // Extract Correlation ID - if correlationID := md.Get(CorrelationIDKey); len(correlationID) > 0 { - fields = append(fields, "correlation_id", correlationID[0]) - } - - // Extract User Agent - if userAgent := md.Get(UserAgentKey); len(userAgent) > 0 { - fields = append(fields, "user_agent", userAgent[0]) - } - - return fields -} - -// ExtractFields extracts custom fields from the gRPC context and call metadata for structured logging. -// This function extracts: -// - SPIFFE ID from authenticated context -// - Request ID from metadata -// - Correlation ID from metadata -// - User Agent from metadata -// - Filters out noisy endpoints (health checks, probes). -func ExtractFields(ctx context.Context, c interceptors.CallMeta) grpc_logging.Fields { - // Filter out noisy endpoints by returning nil fields - if noisyEndpoints[c.FullMethod()] { - return nil - } - - return extractFieldsFromContext(ctx) -} - -// ServerCodeToLevel maps gRPC status codes to appropriate log levels. -// This helps reduce noise in logs by treating expected errors appropriately. -func ServerCodeToLevel(code codes.Code) grpc_logging.Level { - switch code { - // Successful or client-controlled outcomes - INFO level - case codes.OK, - codes.Canceled, - codes.DeadlineExceeded: - return grpc_logging.LevelInfo - - // Expected business logic outcomes - INFO level - case codes.NotFound, - codes.AlreadyExists, - codes.Aborted: - return grpc_logging.LevelInfo - - // Client errors that might need attention - WARN level - case codes.InvalidArgument, - codes.Unauthenticated, - codes.PermissionDenied, - codes.ResourceExhausted, - codes.FailedPrecondition, - codes.OutOfRange: - return grpc_logging.LevelWarn - - // Server errors that require investigation - ERROR level - case codes.Internal, - codes.DataLoss, - codes.Unknown, - codes.Unimplemented, - codes.Unavailable: - return grpc_logging.LevelError - - // Default to WARN for any unhandled codes - default: - return grpc_logging.LevelWarn - } -} - -// ShouldLog determines whether a gRPC call should be logged. -// It filters out noisy endpoints like health checks and readiness probes. -func ShouldLog(fullMethodName string) bool { - return !noisyEndpoints[fullMethodName] -} - -// DefaultOptions returns the recommended logging options for production use. -// These options provide comprehensive logging without excessive verbosity. -// Noisy endpoints (health checks, probes) are filtered out in ExtractFields. -func DefaultOptions() []grpc_logging.Option { - return []grpc_logging.Option{ - // Log both the start and finish of RPCs - grpc_logging.WithLogOnEvents( - grpc_logging.StartCall, - grpc_logging.FinishCall, - ), - - // Extract custom fields for better observability - // This also handles filtering of noisy endpoints - grpc_logging.WithFieldsFromContextAndCallMeta(ExtractFields), - - // Map status codes to appropriate log levels - grpc_logging.WithLevels(ServerCodeToLevel), - } -} - -// extractFieldsVerbose extracts fields for verbose logging mode (no filtering). -// Unlike ExtractFields, this does NOT filter health checks - it logs everything. -func extractFieldsVerbose(ctx context.Context, _ interceptors.CallMeta) grpc_logging.Fields { - return extractFieldsFromContext(ctx) -} - -// VerboseOptions returns logging options for development and debugging. -// These options include request/response payloads and don't filter any endpoints. -func VerboseOptions() []grpc_logging.Option { - return []grpc_logging.Option{ - // Log all events including payloads - grpc_logging.WithLogOnEvents( - grpc_logging.StartCall, - grpc_logging.FinishCall, - grpc_logging.PayloadReceived, - grpc_logging.PayloadSent, - ), - - // Extract custom fields, but don't filter anything in verbose mode - grpc_logging.WithFieldsFromContextAndCallMeta(extractFieldsVerbose), - - // Map status codes to appropriate log levels - grpc_logging.WithLevels(ServerCodeToLevel), - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package logging + +import ( + "context" + + "github.com/agntcy/dir/server/authn" + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors" + grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// Common metadata keys for request tracking. +const ( + RequestIDKey = "x-request-id" + CorrelationIDKey = "x-correlation-id" + UserAgentKey = "user-agent" +) + +// Typical field count for pre-allocating fields slice. +// Includes: spiffe_id, request_id, correlation_id, user_agent (4 keys + 4 values = 8 items). +const typicalFieldCount = 8 + +// Noisy endpoints that should be excluded from logging by default. +var noisyEndpoints = map[string]bool{ + "/grpc.health.v1.Health/Check": true, + "/grpc.health.v1.Health/Watch": true, +} + +// extractFieldsFromContext extracts fields from context and metadata for logging. +// This is the core field extraction logic shared by both default and verbose modes. +func extractFieldsFromContext(ctx context.Context) grpc_logging.Fields { + fields := make(grpc_logging.Fields, 0, typicalFieldCount) // Pre-allocate for typical field count + + // Extract SPIFFE ID from authenticated context + if spiffeID, ok := authn.SpiffeIDFromContext(ctx); ok { + fields = append(fields, "spiffe_id", spiffeID.String()) + } + + // Extract metadata fields + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return fields + } + + // Extract Request ID + if requestID := md.Get(RequestIDKey); len(requestID) > 0 { + fields = append(fields, "request_id", requestID[0]) + } + + // Extract Correlation ID + if correlationID := md.Get(CorrelationIDKey); len(correlationID) > 0 { + fields = append(fields, "correlation_id", correlationID[0]) + } + + // Extract User Agent + if userAgent := md.Get(UserAgentKey); len(userAgent) > 0 { + fields = append(fields, "user_agent", userAgent[0]) + } + + return fields +} + +// ExtractFields extracts custom fields from the gRPC context and call metadata for structured logging. +// This function extracts: +// - SPIFFE ID from authenticated context +// - Request ID from metadata +// - Correlation ID from metadata +// - User Agent from metadata +// - Filters out noisy endpoints (health checks, probes). +func ExtractFields(ctx context.Context, c interceptors.CallMeta) grpc_logging.Fields { + // Filter out noisy endpoints by returning nil fields + if noisyEndpoints[c.FullMethod()] { + return nil + } + + return extractFieldsFromContext(ctx) +} + +// ServerCodeToLevel maps gRPC status codes to appropriate log levels. +// This helps reduce noise in logs by treating expected errors appropriately. +func ServerCodeToLevel(code codes.Code) grpc_logging.Level { + switch code { + // Successful or client-controlled outcomes - INFO level + case codes.OK, + codes.Canceled, + codes.DeadlineExceeded: + return grpc_logging.LevelInfo + + // Expected business logic outcomes - INFO level + case codes.NotFound, + codes.AlreadyExists, + codes.Aborted: + return grpc_logging.LevelInfo + + // Client errors that might need attention - WARN level + case codes.InvalidArgument, + codes.Unauthenticated, + codes.PermissionDenied, + codes.ResourceExhausted, + codes.FailedPrecondition, + codes.OutOfRange: + return grpc_logging.LevelWarn + + // Server errors that require investigation - ERROR level + case codes.Internal, + codes.DataLoss, + codes.Unknown, + codes.Unimplemented, + codes.Unavailable: + return grpc_logging.LevelError + + // Default to WARN for any unhandled codes + default: + return grpc_logging.LevelWarn + } +} + +// ShouldLog determines whether a gRPC call should be logged. +// It filters out noisy endpoints like health checks and readiness probes. +func ShouldLog(fullMethodName string) bool { + return !noisyEndpoints[fullMethodName] +} + +// DefaultOptions returns the recommended logging options for production use. +// These options provide comprehensive logging without excessive verbosity. +// Noisy endpoints (health checks, probes) are filtered out in ExtractFields. +func DefaultOptions() []grpc_logging.Option { + return []grpc_logging.Option{ + // Log both the start and finish of RPCs + grpc_logging.WithLogOnEvents( + grpc_logging.StartCall, + grpc_logging.FinishCall, + ), + + // Extract custom fields for better observability + // This also handles filtering of noisy endpoints + grpc_logging.WithFieldsFromContextAndCallMeta(ExtractFields), + + // Map status codes to appropriate log levels + grpc_logging.WithLevels(ServerCodeToLevel), + } +} + +// extractFieldsVerbose extracts fields for verbose logging mode (no filtering). +// Unlike ExtractFields, this does NOT filter health checks - it logs everything. +func extractFieldsVerbose(ctx context.Context, _ interceptors.CallMeta) grpc_logging.Fields { + return extractFieldsFromContext(ctx) +} + +// VerboseOptions returns logging options for development and debugging. +// These options include request/response payloads and don't filter any endpoints. +func VerboseOptions() []grpc_logging.Option { + return []grpc_logging.Option{ + // Log all events including payloads + grpc_logging.WithLogOnEvents( + grpc_logging.StartCall, + grpc_logging.FinishCall, + grpc_logging.PayloadReceived, + grpc_logging.PayloadSent, + ), + + // Extract custom fields, but don't filter anything in verbose mode + grpc_logging.WithFieldsFromContextAndCallMeta(extractFieldsVerbose), + + // Map status codes to appropriate log levels + grpc_logging.WithLevels(ServerCodeToLevel), + } +} diff --git a/server/middleware/logging/options_test.go b/server/middleware/logging/options_test.go index f51b3f56c..9f11f0132 100644 --- a/server/middleware/logging/options_test.go +++ b/server/middleware/logging/options_test.go @@ -1,472 +1,472 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package logging - -import ( - "context" - "testing" - - "github.com/agntcy/dir/server/authn" - "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors" - grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" - "github.com/spiffe/go-spiffe/v2/spiffeid" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" -) - -// fieldsToMap converts a Fields slice to a map for easier testing. -func fieldsToMap(t *testing.T, fields grpc_logging.Fields) map[string]string { - t.Helper() - - fieldsMap := make(map[string]string) - for i := 0; i < len(fields); i += 2 { - key, ok := fields[i].(string) - if !ok { - t.Fatalf("expected field key at index %d to be string, got %T", i, fields[i]) - } - - value, ok := fields[i+1].(string) - if !ok { - t.Fatalf("expected field value at index %d to be string, got %T", i+1, fields[i+1]) - } - - fieldsMap[key] = value - } - - return fieldsMap -} - -// assertFieldsMatch verifies that the actual fields match the expected fields. -func assertFieldsMatch(t *testing.T, actual map[string]string, expected map[string]string) { - t.Helper() - - // Check that all expected fields are present - for key, expectedValue := range expected { - if actualValue, ok := actual[key]; !ok { - t.Errorf("expected field %q not found", key) - } else if actualValue != expectedValue { - t.Errorf("field %q = %q, want %q", key, actualValue, expectedValue) - } - } - - // Check that no unexpected fields are present - for key := range actual { - if _, ok := expected[key]; !ok { - t.Errorf("unexpected field %q found", key) - } - } -} - -// TestExtractFields tests the extraction of custom fields from gRPC context. -func TestExtractFields(t *testing.T) { - t.Parallel() - - //nolint:containedctx // Context in test table struct is acceptable for test organization - tests := []struct { - name string - ctx context.Context - callMeta interceptors.CallMeta - expectedFields map[string]string - expectNil bool - }{ - { - name: "empty context", - ctx: context.Background(), - callMeta: interceptors.NewServerCallMeta("/test.Service/Method", nil, nil), - expectedFields: map[string]string{}, - }, - { - name: "context with SPIFFE ID", - ctx: func() context.Context { - spiffeID := spiffeid.RequireFromString("spiffe://example.org/agent/test") - - return context.WithValue(context.Background(), authn.SpiffeIDContextKey, spiffeID) - }(), - callMeta: interceptors.NewServerCallMeta("/test.Service/Method", nil, nil), - expectedFields: map[string]string{ - "spiffe_id": "spiffe://example.org/agent/test", - }, - }, - { - name: "context with metadata", - ctx: metadata.NewIncomingContext(context.Background(), metadata.Pairs( - RequestIDKey, "req-123", - CorrelationIDKey, "corr-456", - UserAgentKey, "grpc-go/1.0.0", - )), - callMeta: interceptors.NewServerCallMeta("/test.Service/Method", nil, nil), - expectedFields: map[string]string{ - "request_id": "req-123", - "correlation_id": "corr-456", - "user_agent": "grpc-go/1.0.0", - }, - }, - { - name: "context with SPIFFE ID and metadata", - ctx: func() context.Context { - spiffeID := spiffeid.RequireFromString("spiffe://example.org/agent/test") - ctx := context.WithValue(context.Background(), authn.SpiffeIDContextKey, spiffeID) - - return metadata.NewIncomingContext(ctx, metadata.Pairs( - RequestIDKey, "req-789", - UserAgentKey, "custom-client/2.0", - )) - }(), - callMeta: interceptors.NewServerCallMeta("/test.Service/Method", nil, nil), - expectedFields: map[string]string{ - "spiffe_id": "spiffe://example.org/agent/test", - "request_id": "req-789", - "user_agent": "custom-client/2.0", - }, - }, - { - name: "context with partial metadata", - ctx: metadata.NewIncomingContext(context.Background(), metadata.Pairs( - RequestIDKey, "req-only", - )), - callMeta: interceptors.NewServerCallMeta("/test.Service/Method", nil, nil), - expectedFields: map[string]string{ - "request_id": "req-only", - }, - }, - { - name: "noisy endpoint - health check", - ctx: context.Background(), - callMeta: interceptors.NewServerCallMeta("/grpc.health.v1.Health/Check", nil, nil), - expectNil: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - fields := ExtractFields(tt.ctx, tt.callMeta) - - if tt.expectNil { - if fields != nil { - t.Errorf("expected nil fields for noisy endpoint, got %v", fields) - } - - return - } - - fieldsMap := fieldsToMap(t, fields) - assertFieldsMatch(t, fieldsMap, tt.expectedFields) - }) - } -} - -// TestServerCodeToLevel tests the mapping of gRPC status codes to log levels. -func TestServerCodeToLevel(t *testing.T) { - t.Parallel() - - tests := []struct { - code codes.Code - expectedLevel grpc_logging.Level - }{ - // INFO level codes - {codes.OK, grpc_logging.LevelInfo}, - {codes.Canceled, grpc_logging.LevelInfo}, - {codes.DeadlineExceeded, grpc_logging.LevelInfo}, - {codes.NotFound, grpc_logging.LevelInfo}, - {codes.AlreadyExists, grpc_logging.LevelInfo}, - {codes.Aborted, grpc_logging.LevelInfo}, - - // WARN level codes - {codes.InvalidArgument, grpc_logging.LevelWarn}, - {codes.Unauthenticated, grpc_logging.LevelWarn}, - {codes.PermissionDenied, grpc_logging.LevelWarn}, - {codes.ResourceExhausted, grpc_logging.LevelWarn}, - {codes.FailedPrecondition, grpc_logging.LevelWarn}, - {codes.OutOfRange, grpc_logging.LevelWarn}, - - // ERROR level codes - {codes.Internal, grpc_logging.LevelError}, - {codes.DataLoss, grpc_logging.LevelError}, - {codes.Unknown, grpc_logging.LevelError}, - {codes.Unimplemented, grpc_logging.LevelError}, - {codes.Unavailable, grpc_logging.LevelError}, - } - - for _, tt := range tests { - t.Run(tt.code.String(), func(t *testing.T) { - t.Parallel() - - level := ServerCodeToLevel(tt.code) - if level != tt.expectedLevel { - t.Errorf("ServerCodeToLevel(%v) = %v, want %v", tt.code, level, tt.expectedLevel) - } - }) - } -} - -// TestShouldLog tests the filtering of noisy endpoints. -func TestShouldLog(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - fullMethodName string - shouldLog bool - }{ - { - name: "regular method should log", - fullMethodName: "/dir.core.v1.CoreService/GetAgent", - shouldLog: true, - }, - { - name: "health check should not log", - fullMethodName: "/grpc.health.v1.Health/Check", - shouldLog: false, - }, - { - name: "health watch should not log", - fullMethodName: "/grpc.health.v1.Health/Watch", - shouldLog: false, - }, - { - name: "another regular method should log", - fullMethodName: "/dir.routing.v1.RoutingService/Query", - shouldLog: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - result := ShouldLog(tt.fullMethodName) - if result != tt.shouldLog { - t.Errorf("ShouldLog(%q) = %v, want %v", tt.fullMethodName, result, tt.shouldLog) - } - }) - } -} - -// TestDefaultOptions tests that DefaultOptions returns proper configuration. -func TestDefaultOptions(t *testing.T) { - t.Parallel() - - opts := DefaultOptions() - - // Verify we got options (non-empty) - if len(opts) == 0 { - t.Error("DefaultOptions() returned empty slice, want non-empty options") - } - - // The options should include: - // 1. LogOnEvents (StartCall, FinishCall) - // 2. FieldsFromContextAndCallMeta - // 3. Levels - const expectedOptions = 3 - if len(opts) != expectedOptions { - t.Errorf("DefaultOptions() returned %d options, want %d", len(opts), expectedOptions) - } -} - -// TestVerboseOptions tests that VerboseOptions returns proper configuration. -func TestVerboseOptions(t *testing.T) { - t.Parallel() - - opts := VerboseOptions() - - // Verify we got options (non-empty) - if len(opts) == 0 { - t.Error("VerboseOptions() returned empty slice, want non-empty options") - } - - // The options should include: - // 1. LogOnEvents (StartCall, FinishCall, PayloadReceived, PayloadSent) - // 2. FieldsFromContextAndCallMeta - // 3. Levels - const expectedOptions = 3 - if len(opts) != expectedOptions { - t.Errorf("VerboseOptions() returned %d options, want %d", len(opts), expectedOptions) - } -} - -// TestExtractFieldsVerbose tests the field extraction for verbose mode. -func TestExtractFieldsVerbose(t *testing.T) { - t.Parallel() - - //nolint:containedctx // Context in test table struct is acceptable for test organization - tests := []struct { - name string - ctx context.Context - callMeta interceptors.CallMeta - expectedFields map[string]string - }{ - { - name: "verbose mode with empty context", - ctx: context.Background(), - callMeta: interceptors.NewServerCallMeta("/test.Service/Method", nil, nil), - expectedFields: map[string]string{}, - }, - { - name: "verbose mode with SPIFFE ID", - ctx: func() context.Context { - spiffeID := spiffeid.RequireFromString("spiffe://example.org/agent/verbose") - - return context.WithValue(context.Background(), authn.SpiffeIDContextKey, spiffeID) - }(), - callMeta: interceptors.NewServerCallMeta("/test.Service/VerboseMethod", nil, nil), - expectedFields: map[string]string{ - "spiffe_id": "spiffe://example.org/agent/verbose", - }, - }, - { - name: "verbose mode with all metadata", - ctx: func() context.Context { - spiffeID := spiffeid.RequireFromString("spiffe://example.org/agent/full") - ctx := context.WithValue(context.Background(), authn.SpiffeIDContextKey, spiffeID) - - return metadata.NewIncomingContext(ctx, metadata.Pairs( - RequestIDKey, "verbose-req-123", - CorrelationIDKey, "verbose-corr-456", - UserAgentKey, "verbose-agent/1.0", - )) - }(), - callMeta: interceptors.NewServerCallMeta("/test.Service/FullMethod", nil, nil), - expectedFields: map[string]string{ - "spiffe_id": "spiffe://example.org/agent/full", - "request_id": "verbose-req-123", - "correlation_id": "verbose-corr-456", - "user_agent": "verbose-agent/1.0", - }, - }, - { - name: "verbose mode should NOT filter health checks", - ctx: context.Background(), - callMeta: interceptors.NewServerCallMeta("/grpc.health.v1.Health/Check", nil, nil), - expectedFields: map[string]string{}, // Should return empty fields, NOT nil - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - fields := extractFieldsVerbose(tt.ctx, tt.callMeta) - - // Verbose mode should NEVER return nil (unlike DefaultOptions which filters) - if fields == nil { - t.Error("extractFieldsVerbose() returned nil, verbose mode should not filter") - } - - fieldsMap := fieldsToMap(t, fields) - assertFieldsMatch(t, fieldsMap, tt.expectedFields) - }) - } -} - -// TestVerboseOptionsDoesNotFilterHealthChecks verifies verbose mode logs everything. -func TestVerboseOptionsDoesNotFilterHealthChecks(t *testing.T) { - t.Parallel() - - // VerboseOptions should NOT filter health checks (returns fields, not nil) - // This is different from DefaultOptions which filters them - opts := VerboseOptions() - if len(opts) != 3 { - t.Errorf("VerboseOptions() returned %d options, want 3", len(opts)) - } - - // Verify that it's configured for verbose logging (4 events vs 2 in default) - // This is implicitly tested by the options count and structure -} - -// TestExtractFieldsNilSafety tests that ExtractFields handles nil/empty contexts gracefully. -func TestExtractFieldsNilSafety(t *testing.T) { - t.Parallel() - - // Test with nil context (shouldn't panic) - defer func() { - if r := recover(); r != nil { - t.Errorf("ExtractFields panicked with nil context: %v", r) - } - }() - - fields := ExtractFields(context.Background(), interceptors.NewServerCallMeta("/test.Service/Method", nil, nil)) - if fields == nil { - t.Error("ExtractFields returned nil, want empty slice") - } -} - -// TestServerCodeToLevelUnknownCode tests handling of unknown gRPC codes. -func TestServerCodeToLevelUnknownCode(t *testing.T) { - t.Parallel() - - // Test with an unknown/future code (should default to WARN) - unknownCode := codes.Code(999) - level := ServerCodeToLevel(unknownCode) - - if level != grpc_logging.LevelWarn { - t.Errorf("ServerCodeToLevel(unknown) = %v, want %v", level, grpc_logging.LevelWarn) - } -} - -// TestExtractFieldsWithMultipleMetadataValues tests extraction when metadata has multiple values. -func TestExtractFieldsWithMultipleMetadataValues(t *testing.T) { - t.Parallel() - - // Create context with multiple values for the same key (gRPC allows this) - md := metadata.Pairs( - RequestIDKey, "first-id", - RequestIDKey, "second-id", - ) - ctx := metadata.NewIncomingContext(context.Background(), md) - - fields := ExtractFields(ctx, interceptors.NewServerCallMeta("/test.Service/Method", nil, nil)) - - fieldsMap := fieldsToMap(t, fields) - - // Should extract the first value - if requestID, ok := fieldsMap["request_id"]; !ok { - t.Error("expected request_id field not found") - } else if requestID != "first-id" { - t.Errorf("request_id = %q, want %q", requestID, "first-id") - } -} - -// TestNoisyEndpoints tests that all expected noisy endpoints are filtered. -func TestNoisyEndpoints(t *testing.T) { - t.Parallel() - - expectedNoisyEndpoints := []string{ - "/grpc.health.v1.Health/Check", - "/grpc.health.v1.Health/Watch", - } - - for _, endpoint := range expectedNoisyEndpoints { - t.Run(endpoint, func(t *testing.T) { - t.Parallel() - - if ShouldLog(endpoint) { - t.Errorf("expected %q to be filtered (noisy), but ShouldLog returned true", endpoint) - } - }) - } -} - -// TestExtractFieldsEmptyMetadataValues tests extraction with empty metadata values. -func TestExtractFieldsEmptyMetadataValues(t *testing.T) { - t.Parallel() - - // Create metadata with empty values - md := metadata.Pairs( - RequestIDKey, "", - ) - ctx := metadata.NewIncomingContext(context.Background(), md) - - fields := ExtractFields(ctx, interceptors.NewServerCallMeta("/test.Service/Method", nil, nil)) - - fieldsMap := fieldsToMap(t, fields) - - // Empty values should still be extracted - if requestID, ok := fieldsMap["request_id"]; !ok { - t.Error("expected request_id field not found") - } else if requestID != "" { - t.Errorf("request_id = %q, want empty string", requestID) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package logging + +import ( + "context" + "testing" + + "github.com/agntcy/dir/server/authn" + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors" + grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" + "github.com/spiffe/go-spiffe/v2/spiffeid" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// fieldsToMap converts a Fields slice to a map for easier testing. +func fieldsToMap(t *testing.T, fields grpc_logging.Fields) map[string]string { + t.Helper() + + fieldsMap := make(map[string]string) + for i := 0; i < len(fields); i += 2 { + key, ok := fields[i].(string) + if !ok { + t.Fatalf("expected field key at index %d to be string, got %T", i, fields[i]) + } + + value, ok := fields[i+1].(string) + if !ok { + t.Fatalf("expected field value at index %d to be string, got %T", i+1, fields[i+1]) + } + + fieldsMap[key] = value + } + + return fieldsMap +} + +// assertFieldsMatch verifies that the actual fields match the expected fields. +func assertFieldsMatch(t *testing.T, actual map[string]string, expected map[string]string) { + t.Helper() + + // Check that all expected fields are present + for key, expectedValue := range expected { + if actualValue, ok := actual[key]; !ok { + t.Errorf("expected field %q not found", key) + } else if actualValue != expectedValue { + t.Errorf("field %q = %q, want %q", key, actualValue, expectedValue) + } + } + + // Check that no unexpected fields are present + for key := range actual { + if _, ok := expected[key]; !ok { + t.Errorf("unexpected field %q found", key) + } + } +} + +// TestExtractFields tests the extraction of custom fields from gRPC context. +func TestExtractFields(t *testing.T) { + t.Parallel() + + //nolint:containedctx // Context in test table struct is acceptable for test organization + tests := []struct { + name string + ctx context.Context + callMeta interceptors.CallMeta + expectedFields map[string]string + expectNil bool + }{ + { + name: "empty context", + ctx: context.Background(), + callMeta: interceptors.NewServerCallMeta("/test.Service/Method", nil, nil), + expectedFields: map[string]string{}, + }, + { + name: "context with SPIFFE ID", + ctx: func() context.Context { + spiffeID := spiffeid.RequireFromString("spiffe://example.org/agent/test") + + return context.WithValue(context.Background(), authn.SpiffeIDContextKey, spiffeID) + }(), + callMeta: interceptors.NewServerCallMeta("/test.Service/Method", nil, nil), + expectedFields: map[string]string{ + "spiffe_id": "spiffe://example.org/agent/test", + }, + }, + { + name: "context with metadata", + ctx: metadata.NewIncomingContext(context.Background(), metadata.Pairs( + RequestIDKey, "req-123", + CorrelationIDKey, "corr-456", + UserAgentKey, "grpc-go/1.0.0", + )), + callMeta: interceptors.NewServerCallMeta("/test.Service/Method", nil, nil), + expectedFields: map[string]string{ + "request_id": "req-123", + "correlation_id": "corr-456", + "user_agent": "grpc-go/1.0.0", + }, + }, + { + name: "context with SPIFFE ID and metadata", + ctx: func() context.Context { + spiffeID := spiffeid.RequireFromString("spiffe://example.org/agent/test") + ctx := context.WithValue(context.Background(), authn.SpiffeIDContextKey, spiffeID) + + return metadata.NewIncomingContext(ctx, metadata.Pairs( + RequestIDKey, "req-789", + UserAgentKey, "custom-client/2.0", + )) + }(), + callMeta: interceptors.NewServerCallMeta("/test.Service/Method", nil, nil), + expectedFields: map[string]string{ + "spiffe_id": "spiffe://example.org/agent/test", + "request_id": "req-789", + "user_agent": "custom-client/2.0", + }, + }, + { + name: "context with partial metadata", + ctx: metadata.NewIncomingContext(context.Background(), metadata.Pairs( + RequestIDKey, "req-only", + )), + callMeta: interceptors.NewServerCallMeta("/test.Service/Method", nil, nil), + expectedFields: map[string]string{ + "request_id": "req-only", + }, + }, + { + name: "noisy endpoint - health check", + ctx: context.Background(), + callMeta: interceptors.NewServerCallMeta("/grpc.health.v1.Health/Check", nil, nil), + expectNil: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + fields := ExtractFields(tt.ctx, tt.callMeta) + + if tt.expectNil { + if fields != nil { + t.Errorf("expected nil fields for noisy endpoint, got %v", fields) + } + + return + } + + fieldsMap := fieldsToMap(t, fields) + assertFieldsMatch(t, fieldsMap, tt.expectedFields) + }) + } +} + +// TestServerCodeToLevel tests the mapping of gRPC status codes to log levels. +func TestServerCodeToLevel(t *testing.T) { + t.Parallel() + + tests := []struct { + code codes.Code + expectedLevel grpc_logging.Level + }{ + // INFO level codes + {codes.OK, grpc_logging.LevelInfo}, + {codes.Canceled, grpc_logging.LevelInfo}, + {codes.DeadlineExceeded, grpc_logging.LevelInfo}, + {codes.NotFound, grpc_logging.LevelInfo}, + {codes.AlreadyExists, grpc_logging.LevelInfo}, + {codes.Aborted, grpc_logging.LevelInfo}, + + // WARN level codes + {codes.InvalidArgument, grpc_logging.LevelWarn}, + {codes.Unauthenticated, grpc_logging.LevelWarn}, + {codes.PermissionDenied, grpc_logging.LevelWarn}, + {codes.ResourceExhausted, grpc_logging.LevelWarn}, + {codes.FailedPrecondition, grpc_logging.LevelWarn}, + {codes.OutOfRange, grpc_logging.LevelWarn}, + + // ERROR level codes + {codes.Internal, grpc_logging.LevelError}, + {codes.DataLoss, grpc_logging.LevelError}, + {codes.Unknown, grpc_logging.LevelError}, + {codes.Unimplemented, grpc_logging.LevelError}, + {codes.Unavailable, grpc_logging.LevelError}, + } + + for _, tt := range tests { + t.Run(tt.code.String(), func(t *testing.T) { + t.Parallel() + + level := ServerCodeToLevel(tt.code) + if level != tt.expectedLevel { + t.Errorf("ServerCodeToLevel(%v) = %v, want %v", tt.code, level, tt.expectedLevel) + } + }) + } +} + +// TestShouldLog tests the filtering of noisy endpoints. +func TestShouldLog(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + fullMethodName string + shouldLog bool + }{ + { + name: "regular method should log", + fullMethodName: "/dir.core.v1.CoreService/GetAgent", + shouldLog: true, + }, + { + name: "health check should not log", + fullMethodName: "/grpc.health.v1.Health/Check", + shouldLog: false, + }, + { + name: "health watch should not log", + fullMethodName: "/grpc.health.v1.Health/Watch", + shouldLog: false, + }, + { + name: "another regular method should log", + fullMethodName: "/dir.routing.v1.RoutingService/Query", + shouldLog: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + result := ShouldLog(tt.fullMethodName) + if result != tt.shouldLog { + t.Errorf("ShouldLog(%q) = %v, want %v", tt.fullMethodName, result, tt.shouldLog) + } + }) + } +} + +// TestDefaultOptions tests that DefaultOptions returns proper configuration. +func TestDefaultOptions(t *testing.T) { + t.Parallel() + + opts := DefaultOptions() + + // Verify we got options (non-empty) + if len(opts) == 0 { + t.Error("DefaultOptions() returned empty slice, want non-empty options") + } + + // The options should include: + // 1. LogOnEvents (StartCall, FinishCall) + // 2. FieldsFromContextAndCallMeta + // 3. Levels + const expectedOptions = 3 + if len(opts) != expectedOptions { + t.Errorf("DefaultOptions() returned %d options, want %d", len(opts), expectedOptions) + } +} + +// TestVerboseOptions tests that VerboseOptions returns proper configuration. +func TestVerboseOptions(t *testing.T) { + t.Parallel() + + opts := VerboseOptions() + + // Verify we got options (non-empty) + if len(opts) == 0 { + t.Error("VerboseOptions() returned empty slice, want non-empty options") + } + + // The options should include: + // 1. LogOnEvents (StartCall, FinishCall, PayloadReceived, PayloadSent) + // 2. FieldsFromContextAndCallMeta + // 3. Levels + const expectedOptions = 3 + if len(opts) != expectedOptions { + t.Errorf("VerboseOptions() returned %d options, want %d", len(opts), expectedOptions) + } +} + +// TestExtractFieldsVerbose tests the field extraction for verbose mode. +func TestExtractFieldsVerbose(t *testing.T) { + t.Parallel() + + //nolint:containedctx // Context in test table struct is acceptable for test organization + tests := []struct { + name string + ctx context.Context + callMeta interceptors.CallMeta + expectedFields map[string]string + }{ + { + name: "verbose mode with empty context", + ctx: context.Background(), + callMeta: interceptors.NewServerCallMeta("/test.Service/Method", nil, nil), + expectedFields: map[string]string{}, + }, + { + name: "verbose mode with SPIFFE ID", + ctx: func() context.Context { + spiffeID := spiffeid.RequireFromString("spiffe://example.org/agent/verbose") + + return context.WithValue(context.Background(), authn.SpiffeIDContextKey, spiffeID) + }(), + callMeta: interceptors.NewServerCallMeta("/test.Service/VerboseMethod", nil, nil), + expectedFields: map[string]string{ + "spiffe_id": "spiffe://example.org/agent/verbose", + }, + }, + { + name: "verbose mode with all metadata", + ctx: func() context.Context { + spiffeID := spiffeid.RequireFromString("spiffe://example.org/agent/full") + ctx := context.WithValue(context.Background(), authn.SpiffeIDContextKey, spiffeID) + + return metadata.NewIncomingContext(ctx, metadata.Pairs( + RequestIDKey, "verbose-req-123", + CorrelationIDKey, "verbose-corr-456", + UserAgentKey, "verbose-agent/1.0", + )) + }(), + callMeta: interceptors.NewServerCallMeta("/test.Service/FullMethod", nil, nil), + expectedFields: map[string]string{ + "spiffe_id": "spiffe://example.org/agent/full", + "request_id": "verbose-req-123", + "correlation_id": "verbose-corr-456", + "user_agent": "verbose-agent/1.0", + }, + }, + { + name: "verbose mode should NOT filter health checks", + ctx: context.Background(), + callMeta: interceptors.NewServerCallMeta("/grpc.health.v1.Health/Check", nil, nil), + expectedFields: map[string]string{}, // Should return empty fields, NOT nil + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + fields := extractFieldsVerbose(tt.ctx, tt.callMeta) + + // Verbose mode should NEVER return nil (unlike DefaultOptions which filters) + if fields == nil { + t.Error("extractFieldsVerbose() returned nil, verbose mode should not filter") + } + + fieldsMap := fieldsToMap(t, fields) + assertFieldsMatch(t, fieldsMap, tt.expectedFields) + }) + } +} + +// TestVerboseOptionsDoesNotFilterHealthChecks verifies verbose mode logs everything. +func TestVerboseOptionsDoesNotFilterHealthChecks(t *testing.T) { + t.Parallel() + + // VerboseOptions should NOT filter health checks (returns fields, not nil) + // This is different from DefaultOptions which filters them + opts := VerboseOptions() + if len(opts) != 3 { + t.Errorf("VerboseOptions() returned %d options, want 3", len(opts)) + } + + // Verify that it's configured for verbose logging (4 events vs 2 in default) + // This is implicitly tested by the options count and structure +} + +// TestExtractFieldsNilSafety tests that ExtractFields handles nil/empty contexts gracefully. +func TestExtractFieldsNilSafety(t *testing.T) { + t.Parallel() + + // Test with nil context (shouldn't panic) + defer func() { + if r := recover(); r != nil { + t.Errorf("ExtractFields panicked with nil context: %v", r) + } + }() + + fields := ExtractFields(context.Background(), interceptors.NewServerCallMeta("/test.Service/Method", nil, nil)) + if fields == nil { + t.Error("ExtractFields returned nil, want empty slice") + } +} + +// TestServerCodeToLevelUnknownCode tests handling of unknown gRPC codes. +func TestServerCodeToLevelUnknownCode(t *testing.T) { + t.Parallel() + + // Test with an unknown/future code (should default to WARN) + unknownCode := codes.Code(999) + level := ServerCodeToLevel(unknownCode) + + if level != grpc_logging.LevelWarn { + t.Errorf("ServerCodeToLevel(unknown) = %v, want %v", level, grpc_logging.LevelWarn) + } +} + +// TestExtractFieldsWithMultipleMetadataValues tests extraction when metadata has multiple values. +func TestExtractFieldsWithMultipleMetadataValues(t *testing.T) { + t.Parallel() + + // Create context with multiple values for the same key (gRPC allows this) + md := metadata.Pairs( + RequestIDKey, "first-id", + RequestIDKey, "second-id", + ) + ctx := metadata.NewIncomingContext(context.Background(), md) + + fields := ExtractFields(ctx, interceptors.NewServerCallMeta("/test.Service/Method", nil, nil)) + + fieldsMap := fieldsToMap(t, fields) + + // Should extract the first value + if requestID, ok := fieldsMap["request_id"]; !ok { + t.Error("expected request_id field not found") + } else if requestID != "first-id" { + t.Errorf("request_id = %q, want %q", requestID, "first-id") + } +} + +// TestNoisyEndpoints tests that all expected noisy endpoints are filtered. +func TestNoisyEndpoints(t *testing.T) { + t.Parallel() + + expectedNoisyEndpoints := []string{ + "/grpc.health.v1.Health/Check", + "/grpc.health.v1.Health/Watch", + } + + for _, endpoint := range expectedNoisyEndpoints { + t.Run(endpoint, func(t *testing.T) { + t.Parallel() + + if ShouldLog(endpoint) { + t.Errorf("expected %q to be filtered (noisy), but ShouldLog returned true", endpoint) + } + }) + } +} + +// TestExtractFieldsEmptyMetadataValues tests extraction with empty metadata values. +func TestExtractFieldsEmptyMetadataValues(t *testing.T) { + t.Parallel() + + // Create metadata with empty values + md := metadata.Pairs( + RequestIDKey, "", + ) + ctx := metadata.NewIncomingContext(context.Background(), md) + + fields := ExtractFields(ctx, interceptors.NewServerCallMeta("/test.Service/Method", nil, nil)) + + fieldsMap := fieldsToMap(t, fields) + + // Empty values should still be extracted + if requestID, ok := fieldsMap["request_id"]; !ok { + t.Error("expected request_id field not found") + } else if requestID != "" { + t.Errorf("request_id = %q, want empty string", requestID) + } +} diff --git a/server/middleware/logging/server.go b/server/middleware/logging/server.go index 789040c49..83241454c 100644 --- a/server/middleware/logging/server.go +++ b/server/middleware/logging/server.go @@ -1,35 +1,35 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package logging - -import ( - "log/slog" - - grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" - "google.golang.org/grpc" -) - -// ServerOptions creates unary and stream interceptors for gRPC server logging. -// If verbose is true, uses VerboseOptions (includes payloads), otherwise uses DefaultOptions. -func ServerOptions(logger *slog.Logger, verbose bool) []grpc.ServerOption { - // Create the interceptor logger adapter - interceptorLogger := InterceptorLogger(logger) - - // Choose options based on verbose mode - var opts []grpc_logging.Option - if verbose { - opts = VerboseOptions() - } else { - opts = DefaultOptions() - } - - return []grpc.ServerOption{ - grpc.ChainUnaryInterceptor( - grpc_logging.UnaryServerInterceptor(interceptorLogger, opts...), - ), - grpc.ChainStreamInterceptor( - grpc_logging.StreamServerInterceptor(interceptorLogger, opts...), - ), - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package logging + +import ( + "log/slog" + + grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" + "google.golang.org/grpc" +) + +// ServerOptions creates unary and stream interceptors for gRPC server logging. +// If verbose is true, uses VerboseOptions (includes payloads), otherwise uses DefaultOptions. +func ServerOptions(logger *slog.Logger, verbose bool) []grpc.ServerOption { + // Create the interceptor logger adapter + interceptorLogger := InterceptorLogger(logger) + + // Choose options based on verbose mode + var opts []grpc_logging.Option + if verbose { + opts = VerboseOptions() + } else { + opts = DefaultOptions() + } + + return []grpc.ServerOption{ + grpc.ChainUnaryInterceptor( + grpc_logging.UnaryServerInterceptor(interceptorLogger, opts...), + ), + grpc.ChainStreamInterceptor( + grpc_logging.StreamServerInterceptor(interceptorLogger, opts...), + ), + } +} diff --git a/server/middleware/logging/server_test.go b/server/middleware/logging/server_test.go index cfd49c6c9..7254d1678 100644 --- a/server/middleware/logging/server_test.go +++ b/server/middleware/logging/server_test.go @@ -1,84 +1,84 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package logging - -import ( - "bytes" - "log/slog" - "testing" -) - -// TestServerOptions tests the ServerOptions factory function. -func TestServerOptions(t *testing.T) { - t.Parallel() - - var buf bytes.Buffer - - logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) - - tests := []struct { - name string - verbose bool - expectedOpts int - }{ - { - name: "default mode", - verbose: false, - expectedOpts: 2, // unary + stream interceptors - }, - { - name: "verbose mode", - verbose: true, - expectedOpts: 2, // unary + stream interceptors - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - opts := ServerOptions(logger, tt.verbose) - - if len(opts) != tt.expectedOpts { - t.Errorf("ServerOptions() returned %d options, want %d", len(opts), tt.expectedOpts) - } - }) - } -} - -// TestServerOptionsNonNil tests that ServerOptions never returns nil. -func TestServerOptionsNonNil(t *testing.T) { - t.Parallel() - - var buf bytes.Buffer - - logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) - - opts := ServerOptions(logger, false) - if opts == nil { - t.Error("ServerOptions() returned nil, want non-nil slice") - } - - optsVerbose := ServerOptions(logger, true) - if optsVerbose == nil { - t.Error("ServerOptions(verbose=true) returned nil, want non-nil slice") - } -} - -// TestServerOptionsWithNilLogger tests that ServerOptions doesn't panic with nil logger. -func TestServerOptionsWithNilLogger(t *testing.T) { - t.Parallel() - - // This should not panic - defer func() { - if r := recover(); r != nil { - t.Errorf("ServerOptions panicked with nil logger: %v", r) - } - }() - - opts := ServerOptions(nil, false) - if opts == nil { - t.Error("ServerOptions() returned nil, want non-nil slice") - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package logging + +import ( + "bytes" + "log/slog" + "testing" +) + +// TestServerOptions tests the ServerOptions factory function. +func TestServerOptions(t *testing.T) { + t.Parallel() + + var buf bytes.Buffer + + logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) + + tests := []struct { + name string + verbose bool + expectedOpts int + }{ + { + name: "default mode", + verbose: false, + expectedOpts: 2, // unary + stream interceptors + }, + { + name: "verbose mode", + verbose: true, + expectedOpts: 2, // unary + stream interceptors + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + opts := ServerOptions(logger, tt.verbose) + + if len(opts) != tt.expectedOpts { + t.Errorf("ServerOptions() returned %d options, want %d", len(opts), tt.expectedOpts) + } + }) + } +} + +// TestServerOptionsNonNil tests that ServerOptions never returns nil. +func TestServerOptionsNonNil(t *testing.T) { + t.Parallel() + + var buf bytes.Buffer + + logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) + + opts := ServerOptions(logger, false) + if opts == nil { + t.Error("ServerOptions() returned nil, want non-nil slice") + } + + optsVerbose := ServerOptions(logger, true) + if optsVerbose == nil { + t.Error("ServerOptions(verbose=true) returned nil, want non-nil slice") + } +} + +// TestServerOptionsWithNilLogger tests that ServerOptions doesn't panic with nil logger. +func TestServerOptionsWithNilLogger(t *testing.T) { + t.Parallel() + + // This should not panic + defer func() { + if r := recover(); r != nil { + t.Errorf("ServerOptions panicked with nil logger: %v", r) + } + }() + + opts := ServerOptions(nil, false) + if opts == nil { + t.Error("ServerOptions() returned nil, want non-nil slice") + } +} diff --git a/server/middleware/ratelimit/config/config.go b/server/middleware/ratelimit/config/config.go index 8bc13bd8d..aa63c3545 100644 --- a/server/middleware/ratelimit/config/config.go +++ b/server/middleware/ratelimit/config/config.go @@ -1,176 +1,176 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "errors" - "fmt" -) - -// Default rate limiting configuration values. -const ( - // DefaultGlobalRPS is the default global rate limit in requests per second - // for unauthenticated clients. - DefaultGlobalRPS = 100.0 - - // DefaultGlobalBurst is the default burst capacity for the global rate limiter. - DefaultGlobalBurst = 200 - - // DefaultPerClientRPS is the default rate limit in requests per second - // for each authenticated client. - DefaultPerClientRPS = 1000.0 - - // DefaultPerClientBurst is the default burst capacity for per-client rate limiters. - DefaultPerClientBurst = 1500 -) - -// Config defines rate limiting configuration for the gRPC server. -// It supports global rate limiting for unauthenticated clients, -// per-client rate limiting for authenticated clients (identified by SPIFFE ID), -// and optional per-method overrides for fine-grained control. -type Config struct { - // Enabled determines if rate limiting is active. - // When false, all rate limiting checks are bypassed. - Enabled bool `json:"enabled" mapstructure:"enabled"` - - // GlobalRPS defines the global rate limit in requests per second - // for unauthenticated clients (no SPIFFE ID in context). - // This is a fallback limit to prevent abuse from anonymous clients. - // Default: 100.0 - GlobalRPS float64 `json:"global_rps" mapstructure:"global_rps"` - - // GlobalBurst defines the burst capacity for the global rate limiter. - // This allows temporary traffic spikes above the sustained rate. - // Default: 200 - GlobalBurst int `json:"global_burst" mapstructure:"global_burst"` - - // PerClientRPS defines the rate limit in requests per second - // for each authenticated client (identified by SPIFFE ID). - // Each unique client gets their own rate limiter with this configuration. - // Default: 1000.0 - PerClientRPS float64 `json:"per_client_rps" mapstructure:"per_client_rps"` - - // PerClientBurst defines the burst capacity for per-client rate limiters. - // This allows clients to handle temporary traffic spikes. - // Default: 1500 - PerClientBurst int `json:"per_client_burst" mapstructure:"per_client_burst"` - - // MethodLimits defines optional per-method rate limit overrides. - // Keys are full gRPC method paths (e.g., "/agntcy.dir.store.v1.StoreService/CreateRecord"). - // These limits override the per-client limits for specific methods. - // This allows protecting expensive operations with stricter limits. - MethodLimits map[string]MethodLimit `json:"method_limits,omitempty" mapstructure:"method_limits"` -} - -// MethodLimit defines rate limiting parameters for a specific gRPC method. -type MethodLimit struct { - // RPS defines the requests per second limit for this method. - RPS float64 `json:"rps" mapstructure:"rps"` - - // Burst defines the burst capacity for this method. - Burst int `json:"burst" mapstructure:"burst"` -} - -// Validate checks if the configuration is valid and returns an error if not. -// It performs comprehensive validation of all rate limiting parameters. -func (c *Config) Validate() error { - // If rate limiting is disabled, no validation needed - if !c.Enabled { - return nil - } - - // Validate global rate limiting configuration - if err := c.validateGlobalLimits(); err != nil { - return err - } - - // Validate per-client rate limiting configuration - if err := c.validatePerClientLimits(); err != nil { - return err - } - - // Validate method-specific rate limiting configuration - if err := c.validateMethodLimits(); err != nil { - return err - } - - return nil -} - -// validateGlobalLimits validates the global rate limiting configuration. -// It checks that global RPS and burst values are non-negative and properly configured. -func (c *Config) validateGlobalLimits() error { - if c.GlobalRPS < 0 { - return fmt.Errorf("global_rps must be non-negative, got: %f", c.GlobalRPS) - } - - if c.GlobalBurst < 0 { - return fmt.Errorf("global_burst must be non-negative, got: %d", c.GlobalBurst) - } - - // Validate burst capacity relative to rate - // Burst should be at least equal to rate to allow sustained throughput - if c.GlobalRPS > 0 && c.GlobalBurst > 0 && float64(c.GlobalBurst) < c.GlobalRPS { - return fmt.Errorf("global_burst (%d) should be >= global_rps (%f) for optimal performance", c.GlobalBurst, c.GlobalRPS) - } - - return nil -} - -// validatePerClientLimits validates the per-client rate limiting configuration. -// It checks that per-client RPS and burst values are non-negative and properly configured. -func (c *Config) validatePerClientLimits() error { - if c.PerClientRPS < 0 { - return fmt.Errorf("per_client_rps must be non-negative, got: %f", c.PerClientRPS) - } - - if c.PerClientBurst < 0 { - return fmt.Errorf("per_client_burst must be non-negative, got: %d", c.PerClientBurst) - } - - // Validate burst capacity relative to rate - if c.PerClientRPS > 0 && c.PerClientBurst > 0 && float64(c.PerClientBurst) < c.PerClientRPS { - return fmt.Errorf("per_client_burst (%d) should be >= per_client_rps (%f) for optimal performance", c.PerClientBurst, c.PerClientRPS) - } - - return nil -} - -// validateMethodLimits validates the method-specific rate limiting configuration. -// It checks that all method limits have valid keys and non-negative RPS and burst values. -func (c *Config) validateMethodLimits() error { - for method, limit := range c.MethodLimits { - if method == "" { - return errors.New("method limit key cannot be empty") - } - - if limit.RPS < 0 { - return fmt.Errorf("method %s: rps must be non-negative, got: %f", method, limit.RPS) - } - - if limit.Burst < 0 { - return fmt.Errorf("method %s: burst must be non-negative, got: %d", method, limit.Burst) - } - - // Validate burst capacity relative to rate - if limit.RPS > 0 && limit.Burst > 0 && float64(limit.Burst) < limit.RPS { - return fmt.Errorf("method %s: burst (%d) should be >= rps (%f) for optimal performance", method, limit.Burst, limit.RPS) - } - } - - return nil -} - -// DefaultConfig returns a configuration with sensible default values. -// Rate limiting is disabled by default for backward compatibility. -func DefaultConfig() *Config { - return &Config{ - Enabled: false, - GlobalRPS: DefaultGlobalRPS, - GlobalBurst: DefaultGlobalBurst, - PerClientRPS: DefaultPerClientRPS, - PerClientBurst: DefaultPerClientBurst, - MethodLimits: make(map[string]MethodLimit), - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "errors" + "fmt" +) + +// Default rate limiting configuration values. +const ( + // DefaultGlobalRPS is the default global rate limit in requests per second + // for unauthenticated clients. + DefaultGlobalRPS = 100.0 + + // DefaultGlobalBurst is the default burst capacity for the global rate limiter. + DefaultGlobalBurst = 200 + + // DefaultPerClientRPS is the default rate limit in requests per second + // for each authenticated client. + DefaultPerClientRPS = 1000.0 + + // DefaultPerClientBurst is the default burst capacity for per-client rate limiters. + DefaultPerClientBurst = 1500 +) + +// Config defines rate limiting configuration for the gRPC server. +// It supports global rate limiting for unauthenticated clients, +// per-client rate limiting for authenticated clients (identified by SPIFFE ID), +// and optional per-method overrides for fine-grained control. +type Config struct { + // Enabled determines if rate limiting is active. + // When false, all rate limiting checks are bypassed. + Enabled bool `json:"enabled" mapstructure:"enabled"` + + // GlobalRPS defines the global rate limit in requests per second + // for unauthenticated clients (no SPIFFE ID in context). + // This is a fallback limit to prevent abuse from anonymous clients. + // Default: 100.0 + GlobalRPS float64 `json:"global_rps" mapstructure:"global_rps"` + + // GlobalBurst defines the burst capacity for the global rate limiter. + // This allows temporary traffic spikes above the sustained rate. + // Default: 200 + GlobalBurst int `json:"global_burst" mapstructure:"global_burst"` + + // PerClientRPS defines the rate limit in requests per second + // for each authenticated client (identified by SPIFFE ID). + // Each unique client gets their own rate limiter with this configuration. + // Default: 1000.0 + PerClientRPS float64 `json:"per_client_rps" mapstructure:"per_client_rps"` + + // PerClientBurst defines the burst capacity for per-client rate limiters. + // This allows clients to handle temporary traffic spikes. + // Default: 1500 + PerClientBurst int `json:"per_client_burst" mapstructure:"per_client_burst"` + + // MethodLimits defines optional per-method rate limit overrides. + // Keys are full gRPC method paths (e.g., "/agntcy.dir.store.v1.StoreService/CreateRecord"). + // These limits override the per-client limits for specific methods. + // This allows protecting expensive operations with stricter limits. + MethodLimits map[string]MethodLimit `json:"method_limits,omitempty" mapstructure:"method_limits"` +} + +// MethodLimit defines rate limiting parameters for a specific gRPC method. +type MethodLimit struct { + // RPS defines the requests per second limit for this method. + RPS float64 `json:"rps" mapstructure:"rps"` + + // Burst defines the burst capacity for this method. + Burst int `json:"burst" mapstructure:"burst"` +} + +// Validate checks if the configuration is valid and returns an error if not. +// It performs comprehensive validation of all rate limiting parameters. +func (c *Config) Validate() error { + // If rate limiting is disabled, no validation needed + if !c.Enabled { + return nil + } + + // Validate global rate limiting configuration + if err := c.validateGlobalLimits(); err != nil { + return err + } + + // Validate per-client rate limiting configuration + if err := c.validatePerClientLimits(); err != nil { + return err + } + + // Validate method-specific rate limiting configuration + if err := c.validateMethodLimits(); err != nil { + return err + } + + return nil +} + +// validateGlobalLimits validates the global rate limiting configuration. +// It checks that global RPS and burst values are non-negative and properly configured. +func (c *Config) validateGlobalLimits() error { + if c.GlobalRPS < 0 { + return fmt.Errorf("global_rps must be non-negative, got: %f", c.GlobalRPS) + } + + if c.GlobalBurst < 0 { + return fmt.Errorf("global_burst must be non-negative, got: %d", c.GlobalBurst) + } + + // Validate burst capacity relative to rate + // Burst should be at least equal to rate to allow sustained throughput + if c.GlobalRPS > 0 && c.GlobalBurst > 0 && float64(c.GlobalBurst) < c.GlobalRPS { + return fmt.Errorf("global_burst (%d) should be >= global_rps (%f) for optimal performance", c.GlobalBurst, c.GlobalRPS) + } + + return nil +} + +// validatePerClientLimits validates the per-client rate limiting configuration. +// It checks that per-client RPS and burst values are non-negative and properly configured. +func (c *Config) validatePerClientLimits() error { + if c.PerClientRPS < 0 { + return fmt.Errorf("per_client_rps must be non-negative, got: %f", c.PerClientRPS) + } + + if c.PerClientBurst < 0 { + return fmt.Errorf("per_client_burst must be non-negative, got: %d", c.PerClientBurst) + } + + // Validate burst capacity relative to rate + if c.PerClientRPS > 0 && c.PerClientBurst > 0 && float64(c.PerClientBurst) < c.PerClientRPS { + return fmt.Errorf("per_client_burst (%d) should be >= per_client_rps (%f) for optimal performance", c.PerClientBurst, c.PerClientRPS) + } + + return nil +} + +// validateMethodLimits validates the method-specific rate limiting configuration. +// It checks that all method limits have valid keys and non-negative RPS and burst values. +func (c *Config) validateMethodLimits() error { + for method, limit := range c.MethodLimits { + if method == "" { + return errors.New("method limit key cannot be empty") + } + + if limit.RPS < 0 { + return fmt.Errorf("method %s: rps must be non-negative, got: %f", method, limit.RPS) + } + + if limit.Burst < 0 { + return fmt.Errorf("method %s: burst must be non-negative, got: %d", method, limit.Burst) + } + + // Validate burst capacity relative to rate + if limit.RPS > 0 && limit.Burst > 0 && float64(limit.Burst) < limit.RPS { + return fmt.Errorf("method %s: burst (%d) should be >= rps (%f) for optimal performance", method, limit.Burst, limit.RPS) + } + } + + return nil +} + +// DefaultConfig returns a configuration with sensible default values. +// Rate limiting is disabled by default for backward compatibility. +func DefaultConfig() *Config { + return &Config{ + Enabled: false, + GlobalRPS: DefaultGlobalRPS, + GlobalBurst: DefaultGlobalBurst, + PerClientRPS: DefaultPerClientRPS, + PerClientBurst: DefaultPerClientBurst, + MethodLimits: make(map[string]MethodLimit), + } +} diff --git a/server/middleware/ratelimit/config/config_test.go b/server/middleware/ratelimit/config/config_test.go index b7b36a0d0..9f694d078 100644 --- a/server/middleware/ratelimit/config/config_test.go +++ b/server/middleware/ratelimit/config/config_test.go @@ -1,542 +1,542 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "testing" -) - -func TestDefaultConfig(t *testing.T) { - cfg := DefaultConfig() - - if cfg == nil { - t.Fatal("DefaultConfig() returned nil") - } - - // Verify default values - if cfg.Enabled { - t.Error("Expected Enabled to be false by default") - } - - if cfg.GlobalRPS != 100.0 { - t.Errorf("Expected GlobalRPS to be 100.0, got: %f", cfg.GlobalRPS) - } - - if cfg.GlobalBurst != 200 { - t.Errorf("Expected GlobalBurst to be 200, got: %d", cfg.GlobalBurst) - } - - if cfg.PerClientRPS != 1000.0 { - t.Errorf("Expected PerClientRPS to be 1000.0, got: %f", cfg.PerClientRPS) - } - - if cfg.PerClientBurst != 1500 { - t.Errorf("Expected PerClientBurst to be 1500, got: %d", cfg.PerClientBurst) - } - - if cfg.MethodLimits == nil { - t.Error("Expected MethodLimits to be initialized (empty map)") - } - - if len(cfg.MethodLimits) != 0 { - t.Errorf("Expected MethodLimits to be empty, got: %d entries", len(cfg.MethodLimits)) - } -} - -// TestConfig_Validate_BasicCases tests basic validation behavior -// including disabled configurations and zero values. -func TestConfig_Validate_BasicCases(t *testing.T) { - tests := []struct { - name string - config Config - wantErr bool - errMsg string - }{ - { - name: "valid default configuration", - config: Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 1500, - MethodLimits: make(map[string]MethodLimit), - }, - wantErr: false, - }, - { - name: "disabled configuration should pass validation", - config: Config{ - Enabled: false, - GlobalRPS: -100.0, // Invalid, but should be ignored when disabled - GlobalBurst: -200, - PerClientRPS: -1000.0, - PerClientBurst: -1500, - }, - wantErr: false, - }, - { - name: "zero values should be valid", - config: Config{ - Enabled: true, - GlobalRPS: 0, - GlobalBurst: 0, - PerClientRPS: 0, - PerClientBurst: 0, - }, - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.config.Validate() - - if tt.wantErr { - if err == nil { - t.Errorf("Config.Validate() expected error but got none") - - return - } - - if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) { - t.Errorf("Config.Validate() error = %q, want to contain %q", err.Error(), tt.errMsg) - } - } else if err != nil { - t.Errorf("Config.Validate() unexpected error: %v", err) - } - }) - } -} - -// TestConfig_Validate_GlobalLimits tests validation of global rate limiting parameters. -func TestConfig_Validate_GlobalLimits(t *testing.T) { - tests := []struct { - name string - config Config - wantErr bool - errMsg string - }{ - { - name: "negative global RPS should fail", - config: Config{ - Enabled: true, - GlobalRPS: -10.0, - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 1500, - }, - wantErr: true, - errMsg: "global_rps must be non-negative", - }, - { - name: "negative global burst should fail", - config: Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: -200, - PerClientRPS: 1000.0, - PerClientBurst: 1500, - }, - wantErr: true, - errMsg: "global_burst must be non-negative", - }, - { - name: "global burst less than RPS should fail", - config: Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 50, // Less than RPS - PerClientRPS: 1000.0, - PerClientBurst: 1500, - }, - wantErr: true, - errMsg: "global_burst (50) should be >= global_rps (100", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.config.Validate() - - if tt.wantErr { - if err == nil { - t.Errorf("Config.Validate() expected error but got none") - - return - } - - if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) { - t.Errorf("Config.Validate() error = %q, want to contain %q", err.Error(), tt.errMsg) - } - } else if err != nil { - t.Errorf("Config.Validate() unexpected error: %v", err) - } - }) - } -} - -// TestConfig_Validate_PerClientLimits tests validation of per-client rate limiting parameters. -func TestConfig_Validate_PerClientLimits(t *testing.T) { - tests := []struct { - name string - config Config - wantErr bool - errMsg string - }{ - { - name: "negative per-client RPS should fail", - config: Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: -1000.0, - PerClientBurst: 1500, - }, - wantErr: true, - errMsg: "per_client_rps must be non-negative", - }, - { - name: "negative per-client burst should fail", - config: Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: -1500, - }, - wantErr: true, - errMsg: "per_client_burst must be non-negative", - }, - { - name: "per-client burst less than RPS should fail", - config: Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 500, // Less than RPS - }, - wantErr: true, - errMsg: "per_client_burst (500) should be >= per_client_rps (1000", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.config.Validate() - - if tt.wantErr { - if err == nil { - t.Errorf("Config.Validate() expected error but got none") - - return - } - - if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) { - t.Errorf("Config.Validate() error = %q, want to contain %q", err.Error(), tt.errMsg) - } - } else if err != nil { - t.Errorf("Config.Validate() unexpected error: %v", err) - } - }) - } -} - -// TestConfig_Validate_MethodLimits tests validation of method-specific rate limiting parameters. -func TestConfig_Validate_MethodLimits(t *testing.T) { - tests := []struct { - name string - config Config - wantErr bool - errMsg string - }{ - { - name: "valid configuration with method limits", - config: Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 1500, - MethodLimits: map[string]MethodLimit{ - "/agntcy.dir.store.v1.StoreService/CreateRecord": { - RPS: 50.0, - Burst: 100, - }, - "/agntcy.dir.search.v1.SearchService/Search": { - RPS: 20.0, - Burst: 40, - }, - }, - }, - wantErr: false, - }, - { - name: "empty method key should fail", - config: Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 1500, - MethodLimits: map[string]MethodLimit{ - "": { - RPS: 50.0, - Burst: 100, - }, - }, - }, - wantErr: true, - errMsg: "method limit key cannot be empty", - }, - { - name: "negative method RPS should fail", - config: Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 1500, - MethodLimits: map[string]MethodLimit{ - "/test/Method": { - RPS: -50.0, - Burst: 100, - }, - }, - }, - wantErr: true, - errMsg: "rps must be non-negative", - }, - { - name: "negative method burst should fail", - config: Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 1500, - MethodLimits: map[string]MethodLimit{ - "/test/Method": { - RPS: 50.0, - Burst: -100, - }, - }, - }, - wantErr: true, - errMsg: "burst must be non-negative", - }, - { - name: "method burst less than RPS should fail", - config: Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 1500, - MethodLimits: map[string]MethodLimit{ - "/test/Method": { - RPS: 100.0, - Burst: 50, // Less than RPS - }, - }, - }, - wantErr: true, - errMsg: "burst (50) should be >= rps (100", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.config.Validate() - - if tt.wantErr { - if err == nil { - t.Errorf("Config.Validate() expected error but got none") - - return - } - - if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) { - t.Errorf("Config.Validate() error = %q, want to contain %q", err.Error(), tt.errMsg) - } - } else if err != nil { - t.Errorf("Config.Validate() unexpected error: %v", err) - } - }) - } -} - -// TestConfig_Validate_EdgeCases tests edge cases and special scenarios -// for rate limiting configuration. -func TestConfig_Validate_EdgeCases(t *testing.T) { - tests := []struct { - name string - config Config - wantErr bool - errMsg string - }{ - { - name: "very large values should be valid", - config: Config{ - Enabled: true, - GlobalRPS: 1000000.0, - GlobalBurst: 2000000, - PerClientRPS: 10000000.0, - PerClientBurst: 20000000, - }, - wantErr: false, - }, - { - name: "fractional RPS values should be valid", - config: Config{ - Enabled: true, - GlobalRPS: 0.5, // 1 request per 2 seconds - GlobalBurst: 1, - PerClientRPS: 10.5, - PerClientBurst: 21, - }, - wantErr: false, - }, - { - name: "burst equal to RPS should be valid", - config: Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 100, // Equal to RPS - PerClientRPS: 1000.0, - PerClientBurst: 1000, - }, - wantErr: false, - }, - { - name: "zero RPS with non-zero burst should be valid", - config: Config{ - Enabled: true, - GlobalRPS: 0, // No sustained rate - GlobalBurst: 100, // But allows bursts - PerClientRPS: 0, - PerClientBurst: 100, - }, - wantErr: false, - }, - { - name: "non-zero RPS with zero burst should skip burst validation", - config: Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 0, // Zero burst is allowed (will be handled by limiter) - PerClientRPS: 1000.0, - PerClientBurst: 0, - }, - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.config.Validate() - - if tt.wantErr { - if err == nil { - t.Errorf("Config.Validate() expected error but got none") - - return - } - - if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) { - t.Errorf("Config.Validate() error = %q, want to contain %q", err.Error(), tt.errMsg) - } - } else if err != nil { - t.Errorf("Config.Validate() unexpected error: %v", err) - } - }) - } -} - -func TestMethodLimit_Validation(t *testing.T) { - tests := []struct { - name string - method string - limit MethodLimit - wantErr bool - errMsg string - }{ - { - name: "valid method limit", - method: "/test/Method", - limit: MethodLimit{ - RPS: 50.0, - Burst: 100, - }, - wantErr: false, - }, - { - name: "zero RPS and burst", - method: "/test/Method", - limit: MethodLimit{ - RPS: 0, - Burst: 0, - }, - wantErr: false, - }, - { - name: "fractional RPS", - method: "/test/Method", - limit: MethodLimit{ - RPS: 0.1, - Burst: 1, - }, - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 1500, - MethodLimits: map[string]MethodLimit{ - tt.method: tt.limit, - }, - } - - err := cfg.Validate() - - if tt.wantErr { - if err == nil { - t.Errorf("Expected error but got none") - - return - } - - if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) { - t.Errorf("Error message = %q, want to contain %q", err.Error(), tt.errMsg) - } - } else if err != nil { - t.Errorf("Unexpected error: %v", err) - } - }) - } -} - -// contains checks if a string contains a substring. -func contains(s, substr string) bool { - return len(s) >= len(substr) && (s == substr || len(substr) == 0 || - (len(s) > 0 && len(substr) > 0 && indexOfString(s, substr) >= 0)) -} - -// indexOfString returns the index of substr in s, or -1 if not found. -func indexOfString(s, substr string) int { - for i := 0; i <= len(s)-len(substr); i++ { - if s[i:i+len(substr)] == substr { - return i - } - } - - return -1 -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "testing" +) + +func TestDefaultConfig(t *testing.T) { + cfg := DefaultConfig() + + if cfg == nil { + t.Fatal("DefaultConfig() returned nil") + } + + // Verify default values + if cfg.Enabled { + t.Error("Expected Enabled to be false by default") + } + + if cfg.GlobalRPS != 100.0 { + t.Errorf("Expected GlobalRPS to be 100.0, got: %f", cfg.GlobalRPS) + } + + if cfg.GlobalBurst != 200 { + t.Errorf("Expected GlobalBurst to be 200, got: %d", cfg.GlobalBurst) + } + + if cfg.PerClientRPS != 1000.0 { + t.Errorf("Expected PerClientRPS to be 1000.0, got: %f", cfg.PerClientRPS) + } + + if cfg.PerClientBurst != 1500 { + t.Errorf("Expected PerClientBurst to be 1500, got: %d", cfg.PerClientBurst) + } + + if cfg.MethodLimits == nil { + t.Error("Expected MethodLimits to be initialized (empty map)") + } + + if len(cfg.MethodLimits) != 0 { + t.Errorf("Expected MethodLimits to be empty, got: %d entries", len(cfg.MethodLimits)) + } +} + +// TestConfig_Validate_BasicCases tests basic validation behavior +// including disabled configurations and zero values. +func TestConfig_Validate_BasicCases(t *testing.T) { + tests := []struct { + name string + config Config + wantErr bool + errMsg string + }{ + { + name: "valid default configuration", + config: Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 1500, + MethodLimits: make(map[string]MethodLimit), + }, + wantErr: false, + }, + { + name: "disabled configuration should pass validation", + config: Config{ + Enabled: false, + GlobalRPS: -100.0, // Invalid, but should be ignored when disabled + GlobalBurst: -200, + PerClientRPS: -1000.0, + PerClientBurst: -1500, + }, + wantErr: false, + }, + { + name: "zero values should be valid", + config: Config{ + Enabled: true, + GlobalRPS: 0, + GlobalBurst: 0, + PerClientRPS: 0, + PerClientBurst: 0, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.config.Validate() + + if tt.wantErr { + if err == nil { + t.Errorf("Config.Validate() expected error but got none") + + return + } + + if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) { + t.Errorf("Config.Validate() error = %q, want to contain %q", err.Error(), tt.errMsg) + } + } else if err != nil { + t.Errorf("Config.Validate() unexpected error: %v", err) + } + }) + } +} + +// TestConfig_Validate_GlobalLimits tests validation of global rate limiting parameters. +func TestConfig_Validate_GlobalLimits(t *testing.T) { + tests := []struct { + name string + config Config + wantErr bool + errMsg string + }{ + { + name: "negative global RPS should fail", + config: Config{ + Enabled: true, + GlobalRPS: -10.0, + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 1500, + }, + wantErr: true, + errMsg: "global_rps must be non-negative", + }, + { + name: "negative global burst should fail", + config: Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: -200, + PerClientRPS: 1000.0, + PerClientBurst: 1500, + }, + wantErr: true, + errMsg: "global_burst must be non-negative", + }, + { + name: "global burst less than RPS should fail", + config: Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 50, // Less than RPS + PerClientRPS: 1000.0, + PerClientBurst: 1500, + }, + wantErr: true, + errMsg: "global_burst (50) should be >= global_rps (100", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.config.Validate() + + if tt.wantErr { + if err == nil { + t.Errorf("Config.Validate() expected error but got none") + + return + } + + if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) { + t.Errorf("Config.Validate() error = %q, want to contain %q", err.Error(), tt.errMsg) + } + } else if err != nil { + t.Errorf("Config.Validate() unexpected error: %v", err) + } + }) + } +} + +// TestConfig_Validate_PerClientLimits tests validation of per-client rate limiting parameters. +func TestConfig_Validate_PerClientLimits(t *testing.T) { + tests := []struct { + name string + config Config + wantErr bool + errMsg string + }{ + { + name: "negative per-client RPS should fail", + config: Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: -1000.0, + PerClientBurst: 1500, + }, + wantErr: true, + errMsg: "per_client_rps must be non-negative", + }, + { + name: "negative per-client burst should fail", + config: Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: -1500, + }, + wantErr: true, + errMsg: "per_client_burst must be non-negative", + }, + { + name: "per-client burst less than RPS should fail", + config: Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 500, // Less than RPS + }, + wantErr: true, + errMsg: "per_client_burst (500) should be >= per_client_rps (1000", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.config.Validate() + + if tt.wantErr { + if err == nil { + t.Errorf("Config.Validate() expected error but got none") + + return + } + + if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) { + t.Errorf("Config.Validate() error = %q, want to contain %q", err.Error(), tt.errMsg) + } + } else if err != nil { + t.Errorf("Config.Validate() unexpected error: %v", err) + } + }) + } +} + +// TestConfig_Validate_MethodLimits tests validation of method-specific rate limiting parameters. +func TestConfig_Validate_MethodLimits(t *testing.T) { + tests := []struct { + name string + config Config + wantErr bool + errMsg string + }{ + { + name: "valid configuration with method limits", + config: Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 1500, + MethodLimits: map[string]MethodLimit{ + "/agntcy.dir.store.v1.StoreService/CreateRecord": { + RPS: 50.0, + Burst: 100, + }, + "/agntcy.dir.search.v1.SearchService/Search": { + RPS: 20.0, + Burst: 40, + }, + }, + }, + wantErr: false, + }, + { + name: "empty method key should fail", + config: Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 1500, + MethodLimits: map[string]MethodLimit{ + "": { + RPS: 50.0, + Burst: 100, + }, + }, + }, + wantErr: true, + errMsg: "method limit key cannot be empty", + }, + { + name: "negative method RPS should fail", + config: Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 1500, + MethodLimits: map[string]MethodLimit{ + "/test/Method": { + RPS: -50.0, + Burst: 100, + }, + }, + }, + wantErr: true, + errMsg: "rps must be non-negative", + }, + { + name: "negative method burst should fail", + config: Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 1500, + MethodLimits: map[string]MethodLimit{ + "/test/Method": { + RPS: 50.0, + Burst: -100, + }, + }, + }, + wantErr: true, + errMsg: "burst must be non-negative", + }, + { + name: "method burst less than RPS should fail", + config: Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 1500, + MethodLimits: map[string]MethodLimit{ + "/test/Method": { + RPS: 100.0, + Burst: 50, // Less than RPS + }, + }, + }, + wantErr: true, + errMsg: "burst (50) should be >= rps (100", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.config.Validate() + + if tt.wantErr { + if err == nil { + t.Errorf("Config.Validate() expected error but got none") + + return + } + + if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) { + t.Errorf("Config.Validate() error = %q, want to contain %q", err.Error(), tt.errMsg) + } + } else if err != nil { + t.Errorf("Config.Validate() unexpected error: %v", err) + } + }) + } +} + +// TestConfig_Validate_EdgeCases tests edge cases and special scenarios +// for rate limiting configuration. +func TestConfig_Validate_EdgeCases(t *testing.T) { + tests := []struct { + name string + config Config + wantErr bool + errMsg string + }{ + { + name: "very large values should be valid", + config: Config{ + Enabled: true, + GlobalRPS: 1000000.0, + GlobalBurst: 2000000, + PerClientRPS: 10000000.0, + PerClientBurst: 20000000, + }, + wantErr: false, + }, + { + name: "fractional RPS values should be valid", + config: Config{ + Enabled: true, + GlobalRPS: 0.5, // 1 request per 2 seconds + GlobalBurst: 1, + PerClientRPS: 10.5, + PerClientBurst: 21, + }, + wantErr: false, + }, + { + name: "burst equal to RPS should be valid", + config: Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 100, // Equal to RPS + PerClientRPS: 1000.0, + PerClientBurst: 1000, + }, + wantErr: false, + }, + { + name: "zero RPS with non-zero burst should be valid", + config: Config{ + Enabled: true, + GlobalRPS: 0, // No sustained rate + GlobalBurst: 100, // But allows bursts + PerClientRPS: 0, + PerClientBurst: 100, + }, + wantErr: false, + }, + { + name: "non-zero RPS with zero burst should skip burst validation", + config: Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 0, // Zero burst is allowed (will be handled by limiter) + PerClientRPS: 1000.0, + PerClientBurst: 0, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.config.Validate() + + if tt.wantErr { + if err == nil { + t.Errorf("Config.Validate() expected error but got none") + + return + } + + if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) { + t.Errorf("Config.Validate() error = %q, want to contain %q", err.Error(), tt.errMsg) + } + } else if err != nil { + t.Errorf("Config.Validate() unexpected error: %v", err) + } + }) + } +} + +func TestMethodLimit_Validation(t *testing.T) { + tests := []struct { + name string + method string + limit MethodLimit + wantErr bool + errMsg string + }{ + { + name: "valid method limit", + method: "/test/Method", + limit: MethodLimit{ + RPS: 50.0, + Burst: 100, + }, + wantErr: false, + }, + { + name: "zero RPS and burst", + method: "/test/Method", + limit: MethodLimit{ + RPS: 0, + Burst: 0, + }, + wantErr: false, + }, + { + name: "fractional RPS", + method: "/test/Method", + limit: MethodLimit{ + RPS: 0.1, + Burst: 1, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 1500, + MethodLimits: map[string]MethodLimit{ + tt.method: tt.limit, + }, + } + + err := cfg.Validate() + + if tt.wantErr { + if err == nil { + t.Errorf("Expected error but got none") + + return + } + + if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) { + t.Errorf("Error message = %q, want to contain %q", err.Error(), tt.errMsg) + } + } else if err != nil { + t.Errorf("Unexpected error: %v", err) + } + }) + } +} + +// contains checks if a string contains a substring. +func contains(s, substr string) bool { + return len(s) >= len(substr) && (s == substr || len(substr) == 0 || + (len(s) > 0 && len(substr) > 0 && indexOfString(s, substr) >= 0)) +} + +// indexOfString returns the index of substr in s, or -1 if not found. +func indexOfString(s, substr string) int { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return i + } + } + + return -1 +} diff --git a/server/middleware/ratelimit/limiter.go b/server/middleware/ratelimit/limiter.go index 2aa26e18e..37eda595f 100644 --- a/server/middleware/ratelimit/limiter.go +++ b/server/middleware/ratelimit/limiter.go @@ -1,243 +1,243 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package ratelimit - -import ( - "context" - "errors" - "fmt" - "sync" - - "github.com/agntcy/dir/server/authn" - "github.com/agntcy/dir/server/middleware/ratelimit/config" - "github.com/agntcy/dir/utils/logging" - "golang.org/x/time/rate" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var logger = logging.Logger("ratelimit") - -// Limiter defines the interface for rate limiting operations. -// This interface matches the go-grpc-middleware/v2 Limiter interface, -// allowing this implementation to be used with standard interceptors. -// -// Implementations should be thread-safe and support concurrent access. -type Limiter interface { - // Limit checks if a request should be rate limited. - // It extracts client identity and method from context, then applies rate limiting rules. - // Returns an error with codes.ResourceExhausted if rate limit is exceeded. - Limit(ctx context.Context) error -} - -// ClientLimiter implements per-client rate limiting using token bucket algorithm. -// It maintains separate rate limiters for each unique client (identified by SPIFFE ID), -// with support for global limits (for unauthenticated clients) and per-method overrides. -// -// Thread Safety: -// ClientLimiter is safe for concurrent use by multiple goroutines. -// It uses sync.Map for lock-free reads and atomic operations for limiter creation. -type ClientLimiter struct { - // limiters stores per-client rate limiters (clientID -> *rate.Limiter) - // Uses sync.Map for efficient concurrent access without locks - limiters sync.Map - - // globalLimiter is the fallback rate limiter for unauthenticated clients - globalLimiter *rate.Limiter - - // config holds the rate limiting configuration - config *config.Config -} - -// NewClientLimiter creates a new ClientLimiter with the given configuration. -// It validates the configuration and initializes the global rate limiter. -func NewClientLimiter(cfg *config.Config) (*ClientLimiter, error) { - if cfg == nil { - return nil, errors.New("config cannot be nil") - } - - // Validate configuration - if err := cfg.Validate(); err != nil { - return nil, fmt.Errorf("invalid rate limit config: %w", err) - } - - // If rate limiting is disabled, return a limiter with nil global limiter - // Allow() will always return true in this case - if !cfg.Enabled { - logger.Info("Rate limiting is disabled") - - return &ClientLimiter{ - config: cfg, - globalLimiter: nil, - }, nil - } - - // Create global rate limiter for unauthenticated clients - var globalLimiter *rate.Limiter - if cfg.GlobalRPS > 0 { - globalLimiter = rate.NewLimiter(rate.Limit(cfg.GlobalRPS), cfg.GlobalBurst) - logger.Info("Global rate limiter initialized", - "rps", cfg.GlobalRPS, - "burst", cfg.GlobalBurst, - ) - } - - logger.Info("Client rate limiter initialized", - "per_client_rps", cfg.PerClientRPS, - "per_client_burst", cfg.PerClientBurst, - "method_overrides", len(cfg.MethodLimits), - ) - - return &ClientLimiter{ - globalLimiter: globalLimiter, - config: cfg, - }, nil -} - -// Limit checks if a request should be rate limited. -// It implements the go-grpc-middleware/v2 Limiter interface. -// -// The method extracts client identity and method from context, then applies -// the token bucket algorithm: -// - Returns nil if a token is available (request allowed) -// - Returns codes.ResourceExhausted error if rate limited -// -// The method checks rate limits in the following order: -// 1. If rate limiting is disabled, always allow -// 2. Check for method-specific override -// 3. Check per-client limit (if clientID provided) -// 4. Fall back to global limit (for anonymous/unauthenticated clients). -func (l *ClientLimiter) Limit(ctx context.Context) error { - // If rate limiting is disabled, always allow - if !l.config.Enabled { - return nil - } - - // Extract client ID from context (SPIFFE ID if authenticated) - clientID := extractClientID(ctx) - - // Extract method name from context - method, _ := grpc.Method(ctx) - - // Get the appropriate rate limiter - limiter := l.getLimiterForRequest(clientID, method) - - // If no limiter is configured (both client and global limiters are nil or zero-rate), - // allow the request - if limiter == nil { - return nil - } - - // Check if request is allowed by the token bucket - if !limiter.Allow() { - logger.Warn("Rate limit exceeded", - "client_id", clientID, - "method", method, - ) - - return status.Error(codes.ResourceExhausted, "rate limit exceeded") //nolint:wrapcheck // gRPC status error for client - } - - return nil -} - -// extractClientID extracts the client identifier from the gRPC context. -// It returns the SPIFFE ID string if the client is authenticated via authn middleware, -// or an empty string for unauthenticated clients (which will use global rate limit). -func extractClientID(ctx context.Context) string { - // Try to extract SPIFFE ID from context (set by authentication middleware) - if spiffeID, ok := authn.SpiffeIDFromContext(ctx); ok { - return spiffeID.String() - } - - // No authentication - return empty string to use global rate limiter - return "" -} - -// getLimiterForRequest returns the appropriate rate limiter for a request. -// It checks in order: -// 1. Method-specific override (if configured) -// 2. Per-client limiter (if clientID provided) -// 3. Global limiter (fallback) -// -// Returns nil if no rate limiter is applicable. -func (l *ClientLimiter) getLimiterForRequest(clientID string, method string) *rate.Limiter { - // Check for method-specific override first - if method != "" { - if methodLimit, exists := l.config.MethodLimits[method]; exists { - // Create a unique key combining client and method - key := fmt.Sprintf("%s:%s", clientID, method) - - return l.getOrCreateLimiter(key, methodLimit.RPS, methodLimit.Burst) - } - } - - // If client ID is provided, use per-client limiter - if clientID != "" && l.config.PerClientRPS > 0 { - return l.getOrCreateLimiter(clientID, l.config.PerClientRPS, l.config.PerClientBurst) - } - - // Fall back to global limiter - return l.globalLimiter -} - -// getOrCreateLimiter gets an existing rate limiter or creates a new one. -// This method is thread-safe and uses sync.Map for efficient concurrent access. -// -// The rate limiter is stored in the limiters map using the provided key. -// If a limiter already exists for the key, it is reused. -// Otherwise, a new limiter is created with the specified rate and burst parameters. -func (l *ClientLimiter) getOrCreateLimiter(key string, rps float64, burst int) *rate.Limiter { - // Fast path: check if limiter already exists - if value, exists := l.limiters.Load(key); exists { - limiter, ok := value.(*rate.Limiter) - if !ok { - // This should never happen as we control what goes into the map - panic(fmt.Sprintf("invalid type in limiters map: expected *rate.Limiter, got %T", value)) - } - - return limiter - } - - // If RPS is zero, don't create a limiter (unlimited) - if rps == 0 { - return nil - } - - // Slow path: create new limiter - // Use LoadOrStore to handle race conditions (multiple goroutines creating for same key) - newLimiter := rate.NewLimiter(rate.Limit(rps), burst) - actual, loaded := l.limiters.LoadOrStore(key, newLimiter) - - if !loaded { - logger.Debug("Created new rate limiter", - "key", key, - "rps", rps, - "burst", burst, - ) - } - - limiter, ok := actual.(*rate.Limiter) - if !ok { - // This should never happen as we control what goes into the map - panic(fmt.Sprintf("invalid type in limiters map: expected *rate.Limiter, got %T", actual)) - } - - return limiter -} - -// GetLimiterCount returns the number of active rate limiters. -// This is primarily useful for testing and monitoring. -func (l *ClientLimiter) GetLimiterCount() int { - count := 0 - - l.limiters.Range(func(key, value interface{}) bool { - count++ - - return true - }) - - return count -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package ratelimit + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/agntcy/dir/server/authn" + "github.com/agntcy/dir/server/middleware/ratelimit/config" + "github.com/agntcy/dir/utils/logging" + "golang.org/x/time/rate" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var logger = logging.Logger("ratelimit") + +// Limiter defines the interface for rate limiting operations. +// This interface matches the go-grpc-middleware/v2 Limiter interface, +// allowing this implementation to be used with standard interceptors. +// +// Implementations should be thread-safe and support concurrent access. +type Limiter interface { + // Limit checks if a request should be rate limited. + // It extracts client identity and method from context, then applies rate limiting rules. + // Returns an error with codes.ResourceExhausted if rate limit is exceeded. + Limit(ctx context.Context) error +} + +// ClientLimiter implements per-client rate limiting using token bucket algorithm. +// It maintains separate rate limiters for each unique client (identified by SPIFFE ID), +// with support for global limits (for unauthenticated clients) and per-method overrides. +// +// Thread Safety: +// ClientLimiter is safe for concurrent use by multiple goroutines. +// It uses sync.Map for lock-free reads and atomic operations for limiter creation. +type ClientLimiter struct { + // limiters stores per-client rate limiters (clientID -> *rate.Limiter) + // Uses sync.Map for efficient concurrent access without locks + limiters sync.Map + + // globalLimiter is the fallback rate limiter for unauthenticated clients + globalLimiter *rate.Limiter + + // config holds the rate limiting configuration + config *config.Config +} + +// NewClientLimiter creates a new ClientLimiter with the given configuration. +// It validates the configuration and initializes the global rate limiter. +func NewClientLimiter(cfg *config.Config) (*ClientLimiter, error) { + if cfg == nil { + return nil, errors.New("config cannot be nil") + } + + // Validate configuration + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("invalid rate limit config: %w", err) + } + + // If rate limiting is disabled, return a limiter with nil global limiter + // Allow() will always return true in this case + if !cfg.Enabled { + logger.Info("Rate limiting is disabled") + + return &ClientLimiter{ + config: cfg, + globalLimiter: nil, + }, nil + } + + // Create global rate limiter for unauthenticated clients + var globalLimiter *rate.Limiter + if cfg.GlobalRPS > 0 { + globalLimiter = rate.NewLimiter(rate.Limit(cfg.GlobalRPS), cfg.GlobalBurst) + logger.Info("Global rate limiter initialized", + "rps", cfg.GlobalRPS, + "burst", cfg.GlobalBurst, + ) + } + + logger.Info("Client rate limiter initialized", + "per_client_rps", cfg.PerClientRPS, + "per_client_burst", cfg.PerClientBurst, + "method_overrides", len(cfg.MethodLimits), + ) + + return &ClientLimiter{ + globalLimiter: globalLimiter, + config: cfg, + }, nil +} + +// Limit checks if a request should be rate limited. +// It implements the go-grpc-middleware/v2 Limiter interface. +// +// The method extracts client identity and method from context, then applies +// the token bucket algorithm: +// - Returns nil if a token is available (request allowed) +// - Returns codes.ResourceExhausted error if rate limited +// +// The method checks rate limits in the following order: +// 1. If rate limiting is disabled, always allow +// 2. Check for method-specific override +// 3. Check per-client limit (if clientID provided) +// 4. Fall back to global limit (for anonymous/unauthenticated clients). +func (l *ClientLimiter) Limit(ctx context.Context) error { + // If rate limiting is disabled, always allow + if !l.config.Enabled { + return nil + } + + // Extract client ID from context (SPIFFE ID if authenticated) + clientID := extractClientID(ctx) + + // Extract method name from context + method, _ := grpc.Method(ctx) + + // Get the appropriate rate limiter + limiter := l.getLimiterForRequest(clientID, method) + + // If no limiter is configured (both client and global limiters are nil or zero-rate), + // allow the request + if limiter == nil { + return nil + } + + // Check if request is allowed by the token bucket + if !limiter.Allow() { + logger.Warn("Rate limit exceeded", + "client_id", clientID, + "method", method, + ) + + return status.Error(codes.ResourceExhausted, "rate limit exceeded") //nolint:wrapcheck // gRPC status error for client + } + + return nil +} + +// extractClientID extracts the client identifier from the gRPC context. +// It returns the SPIFFE ID string if the client is authenticated via authn middleware, +// or an empty string for unauthenticated clients (which will use global rate limit). +func extractClientID(ctx context.Context) string { + // Try to extract SPIFFE ID from context (set by authentication middleware) + if spiffeID, ok := authn.SpiffeIDFromContext(ctx); ok { + return spiffeID.String() + } + + // No authentication - return empty string to use global rate limiter + return "" +} + +// getLimiterForRequest returns the appropriate rate limiter for a request. +// It checks in order: +// 1. Method-specific override (if configured) +// 2. Per-client limiter (if clientID provided) +// 3. Global limiter (fallback) +// +// Returns nil if no rate limiter is applicable. +func (l *ClientLimiter) getLimiterForRequest(clientID string, method string) *rate.Limiter { + // Check for method-specific override first + if method != "" { + if methodLimit, exists := l.config.MethodLimits[method]; exists { + // Create a unique key combining client and method + key := fmt.Sprintf("%s:%s", clientID, method) + + return l.getOrCreateLimiter(key, methodLimit.RPS, methodLimit.Burst) + } + } + + // If client ID is provided, use per-client limiter + if clientID != "" && l.config.PerClientRPS > 0 { + return l.getOrCreateLimiter(clientID, l.config.PerClientRPS, l.config.PerClientBurst) + } + + // Fall back to global limiter + return l.globalLimiter +} + +// getOrCreateLimiter gets an existing rate limiter or creates a new one. +// This method is thread-safe and uses sync.Map for efficient concurrent access. +// +// The rate limiter is stored in the limiters map using the provided key. +// If a limiter already exists for the key, it is reused. +// Otherwise, a new limiter is created with the specified rate and burst parameters. +func (l *ClientLimiter) getOrCreateLimiter(key string, rps float64, burst int) *rate.Limiter { + // Fast path: check if limiter already exists + if value, exists := l.limiters.Load(key); exists { + limiter, ok := value.(*rate.Limiter) + if !ok { + // This should never happen as we control what goes into the map + panic(fmt.Sprintf("invalid type in limiters map: expected *rate.Limiter, got %T", value)) + } + + return limiter + } + + // If RPS is zero, don't create a limiter (unlimited) + if rps == 0 { + return nil + } + + // Slow path: create new limiter + // Use LoadOrStore to handle race conditions (multiple goroutines creating for same key) + newLimiter := rate.NewLimiter(rate.Limit(rps), burst) + actual, loaded := l.limiters.LoadOrStore(key, newLimiter) + + if !loaded { + logger.Debug("Created new rate limiter", + "key", key, + "rps", rps, + "burst", burst, + ) + } + + limiter, ok := actual.(*rate.Limiter) + if !ok { + // This should never happen as we control what goes into the map + panic(fmt.Sprintf("invalid type in limiters map: expected *rate.Limiter, got %T", actual)) + } + + return limiter +} + +// GetLimiterCount returns the number of active rate limiters. +// This is primarily useful for testing and monitoring. +func (l *ClientLimiter) GetLimiterCount() int { + count := 0 + + l.limiters.Range(func(key, value interface{}) bool { + count++ + + return true + }) + + return count +} diff --git a/server/middleware/ratelimit/limiter_test.go b/server/middleware/ratelimit/limiter_test.go index f6ec8e985..4e8ca35d6 100644 --- a/server/middleware/ratelimit/limiter_test.go +++ b/server/middleware/ratelimit/limiter_test.go @@ -1,574 +1,574 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package ratelimit - -import ( - "context" - "fmt" - "sync" - "testing" - "time" - - "github.com/agntcy/dir/server/authn" - "github.com/agntcy/dir/server/middleware/ratelimit/config" - "github.com/spiffe/go-spiffe/v2/spiffeid" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// contextWithMethod creates a context with a gRPC method set for testing. -func contextWithMethod(method string) context.Context { - return grpc.NewContextWithServerTransportStream(context.Background(), &mockServerTransportStream{method: method}) -} - -// contextWithClientAndMethod creates a context with both SPIFFE ID and gRPC method for testing. -func contextWithClientAndMethod(clientID string, method string) context.Context { - ctx := contextWithMethod(method) - - if clientID != "" { - spiffeID, _ := spiffeid.FromString(clientID) - ctx = context.WithValue(ctx, authn.SpiffeIDContextKey, spiffeID) - } - - return ctx -} - -// mockServerTransportStream is a minimal implementation for setting method in context. -type mockServerTransportStream struct { - method string -} - -func (m *mockServerTransportStream) Method() string { - return m.method -} - -func (m *mockServerTransportStream) SetHeader(md metadata.MD) error { return nil } -func (m *mockServerTransportStream) SendHeader(md metadata.MD) error { return nil } -func (m *mockServerTransportStream) SetTrailer(md metadata.MD) error { return nil } - -func TestNewClientLimiter(t *testing.T) { - tests := []struct { - name string - config *config.Config - wantErr bool - errMsg string - }{ - { - name: "valid configuration", - config: &config.Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 1500, - MethodLimits: make(map[string]config.MethodLimit), - }, - wantErr: false, - }, - { - name: "nil configuration should fail", - config: nil, - wantErr: true, - errMsg: "config cannot be nil", - }, - { - name: "invalid configuration should fail", - config: &config.Config{ - Enabled: true, - GlobalRPS: -100.0, // Invalid - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 1500, - }, - wantErr: true, - errMsg: "invalid rate limit config", - }, - { - name: "disabled configuration should succeed", - config: &config.Config{ - Enabled: false, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 1500, - }, - wantErr: false, - }, - { - name: "zero global RPS should create limiter without global limit", - config: &config.Config{ - Enabled: true, - GlobalRPS: 0, // Zero means no global limit - GlobalBurst: 0, - PerClientRPS: 1000.0, - PerClientBurst: 1500, - }, - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - limiter, err := NewClientLimiter(tt.config) - - //nolint:nestif // Standard table-driven test error checking pattern - if tt.wantErr { - if err == nil { - t.Errorf("NewClientLimiter() expected error but got none") - - return - } - - if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) { - t.Errorf("NewClientLimiter() error = %q, want to contain %q", err.Error(), tt.errMsg) - } - } else { - if err != nil { - t.Errorf("NewClientLimiter() unexpected error: %v", err) - - return - } - - if limiter == nil { - t.Error("NewClientLimiter() returned nil limiter") - } - } - }) - } -} - -func TestClientLimiter_Limit_PerClientLimiting(t *testing.T) { - cfg := &config.Config{ - Enabled: true, - GlobalRPS: 10.0, - GlobalBurst: 20, - PerClientRPS: 10.0, // 10 req/sec - PerClientBurst: 20, // burst 20 - MethodLimits: make(map[string]config.MethodLimit), - } - - limiter, err := NewClientLimiter(cfg) - if err != nil { - t.Fatalf("NewClientLimiter() error: %v", err) - } - - ctx1 := contextWithClientAndMethod("spiffe://example.org/client1", "/test/Method") - ctx2 := contextWithClientAndMethod("spiffe://example.org/client2", "/test/Method") - - // Client 1: Exhaust burst capacity - for i := range 20 { - if err := limiter.Limit(ctx1); err != nil { - t.Errorf("Request %d should be allowed (within burst), got error: %v", i+1, err) - } - } - - // Client 1: 21st request should be rate limited - if err := limiter.Limit(ctx1); err == nil { - t.Error("Request 21 should be rate limited") - } else if status.Code(err) != codes.ResourceExhausted { - t.Errorf("Expected ResourceExhausted, got: %v", status.Code(err)) - } - - // Client 2: Should still have full capacity (separate limiter) - for i := range 20 { - if err := limiter.Limit(ctx2); err != nil { - t.Errorf("Client2 request %d should be allowed, got error: %v", i+1, err) - } - } - - // Client 2: 21st request should be rate limited - if err := limiter.Limit(ctx2); err == nil { - t.Error("Client2 request 21 should be rate limited") - } else if status.Code(err) != codes.ResourceExhausted { - t.Errorf("Expected ResourceExhausted, got: %v", status.Code(err)) - } -} - -func TestClientLimiter_Limit_GlobalLimiting(t *testing.T) { - cfg := &config.Config{ - Enabled: true, - GlobalRPS: 10.0, - GlobalBurst: 20, - PerClientRPS: 0, // No per-client limit - PerClientBurst: 0, - MethodLimits: make(map[string]config.MethodLimit), - } - - limiter, err := NewClientLimiter(cfg) - if err != nil { - t.Fatalf("NewClientLimiter() error: %v", err) - } - - ctx := contextWithClientAndMethod("", "/test/Method") - - // Anonymous client: Exhaust burst capacity - for i := range 20 { - if err := limiter.Limit(ctx); err != nil { - t.Errorf("Request %d should be allowed (within burst), got error: %v", i+1, err) - } - } - - // 21st request should be rate limited - if err := limiter.Limit(ctx); err == nil { - t.Error("Request 21 should be rate limited") - } else if status.Code(err) != codes.ResourceExhausted { - t.Errorf("Expected ResourceExhausted, got: %v", status.Code(err)) - } -} - -func TestClientLimiter_Limit_MethodOverrides(t *testing.T) { - cfg := &config.Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 100.0, - PerClientBurst: 200, - MethodLimits: map[string]config.MethodLimit{ - "/expensive/Method": { - RPS: 5.0, - Burst: 10, - }, - }, - } - - limiter, err := NewClientLimiter(cfg) - if err != nil { - t.Fatalf("NewClientLimiter() error: %v", err) - } - - ctxRegular := contextWithClientAndMethod("spiffe://example.org/client1", "/regular/Method") - ctxExpensive := contextWithClientAndMethod("spiffe://example.org/client1", "/expensive/Method") - - // Regular method should use per-client limit (burst 200) - for i := range 200 { - if err := limiter.Limit(ctxRegular); err != nil { - t.Errorf("Regular method request %d should be allowed, got error: %v", i+1, err) - } - } - - // Expensive method should use method-specific limit (burst 10) - for i := range 10 { - if err := limiter.Limit(ctxExpensive); err != nil { - t.Errorf("Expensive method request %d should be allowed (within burst), got error: %v", i+1, err) - } - } - - // 11th request to expensive method should be rate limited - if err := limiter.Limit(ctxExpensive); err == nil { - t.Error("Expensive method request 11 should be rate limited") - } else if status.Code(err) != codes.ResourceExhausted { - t.Errorf("Expected ResourceExhausted, got: %v", status.Code(err)) - } -} - -func TestClientLimiter_Limit_TokenRefill(t *testing.T) { - cfg := &config.Config{ - Enabled: true, - GlobalRPS: 10.0, // 10 req/sec = 1 token per 100ms - GlobalBurst: 10, // Burst should be >= RPS - PerClientRPS: 10.0, - PerClientBurst: 10, - MethodLimits: make(map[string]config.MethodLimit), - } - - limiter, err := NewClientLimiter(cfg) - if err != nil { - t.Fatalf("NewClientLimiter() error: %v", err) - } - - ctx := contextWithClientAndMethod("spiffe://example.org/client1", "/test/Method") - - // Exhaust tokens - for i := range 10 { - if err := limiter.Limit(ctx); err != nil { - t.Errorf("Request %d should be allowed, got error: %v", i+1, err) - } - } - - // Should be rate limited now - if err := limiter.Limit(ctx); err == nil { - t.Error("Should be rate limited after exhausting burst") - } - - // Wait for token refill (150ms should give us 1-2 tokens at 10 req/sec) - time.Sleep(150 * time.Millisecond) - - // Should succeed now - if err := limiter.Limit(ctx); err != nil { - t.Errorf("Should be allowed after token refill, got error: %v", err) - } -} - -func TestClientLimiter_Limit_Disabled(t *testing.T) { - cfg := &config.Config{ - Enabled: false, - GlobalRPS: 1.0, // Very low limit - GlobalBurst: 1, - PerClientRPS: 1.0, - PerClientBurst: 1, - } - - limiter, err := NewClientLimiter(cfg) - if err != nil { - t.Fatalf("NewClientLimiter() error: %v", err) - } - - ctx := contextWithClientAndMethod("spiffe://example.org/client1", "/test/Method") - - // All requests should be allowed when disabled - for i := range 100 { - if err := limiter.Limit(ctx); err != nil { - t.Errorf("Request %d should be allowed (rate limiting disabled), got error: %v", i+1, err) - } - } -} - -func TestClientLimiter_Limit_ConcurrentAccess(t *testing.T) { - // This test should be run with: go test -race - cfg := &config.Config{ - Enabled: true, - GlobalRPS: 1000.0, - GlobalBurst: 2000, - PerClientRPS: 1000.0, - PerClientBurst: 2000, - MethodLimits: make(map[string]config.MethodLimit), - } - - limiter, err := NewClientLimiter(cfg) - if err != nil { - t.Fatalf("NewClientLimiter() error: %v", err) - } - - var wg sync.WaitGroup - - // Simulate 100 concurrent clients, each making 100 requests - numClients := 100 - requestsPerClient := 100 - - for i := range numClients { - wg.Add(1) - - go func(clientID int) { - defer wg.Done() - - clientIDStr := fmt.Sprintf("spiffe://example.org/client%d", clientID) - - ctx := contextWithClientAndMethod(clientIDStr, "/test/Method") - for range requestsPerClient { - _ = limiter.Limit(ctx) - } - }(i) - } - - wg.Wait() - - // Verify we created limiters for all clients - count := limiter.GetLimiterCount() - if count != numClients { - t.Errorf("Expected %d limiters, got %d", numClients, count) - } -} - -func TestClientLimiter_GetLimiterCount(t *testing.T) { - cfg := &config.Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 100.0, - PerClientBurst: 200, - MethodLimits: make(map[string]config.MethodLimit), - } - - limiter, err := NewClientLimiter(cfg) - if err != nil { - t.Fatalf("NewClientLimiter() error: %v", err) - } - - // Initially, no limiters created - if count := limiter.GetLimiterCount(); count != 0 { - t.Errorf("Expected 0 limiters initially, got %d", count) - } - - // Make requests from 3 different clients - ctx1 := contextWithClientAndMethod("spiffe://example.org/client1", "/test/Method") - ctx2 := contextWithClientAndMethod("spiffe://example.org/client2", "/test/Method") - ctx3 := contextWithClientAndMethod("spiffe://example.org/client3", "/test/Method") - - _ = limiter.Limit(ctx1) - _ = limiter.Limit(ctx2) - _ = limiter.Limit(ctx3) - - // Should have 3 limiters - if count := limiter.GetLimiterCount(); count != 3 { - t.Errorf("Expected 3 limiters, got %d", count) - } - - // Making more requests from existing clients shouldn't create new limiters - _ = limiter.Limit(ctx1) - _ = limiter.Limit(ctx2) - - if count := limiter.GetLimiterCount(); count != 3 { - t.Errorf("Expected 3 limiters (reused), got %d", count) - } -} - -func TestClientLimiter_MethodSpecificLimiters(t *testing.T) { - cfg := &config.Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 100.0, - PerClientBurst: 200, - MethodLimits: map[string]config.MethodLimit{ - "/method1": {RPS: 10.0, Burst: 20}, - "/method2": {RPS: 20.0, Burst: 40}, - }, - } - - limiter, err := NewClientLimiter(cfg) - if err != nil { - t.Fatalf("NewClientLimiter() error: %v", err) - } - - // Make requests to different methods - ctx1 := contextWithClientAndMethod("spiffe://example.org/client1", "/method1") - ctx2 := contextWithClientAndMethod("spiffe://example.org/client1", "/method2") - ctx3 := contextWithClientAndMethod("spiffe://example.org/client1", "/regular") - - _ = limiter.Limit(ctx1) - _ = limiter.Limit(ctx2) - _ = limiter.Limit(ctx3) - - // Should have 3 limiters: - // - client1:/method1 (method-specific) - // - client1:/method2 (method-specific) - // - client1 (regular per-client) - count := limiter.GetLimiterCount() - if count != 3 { - t.Errorf("Expected 3 limiters (2 method-specific + 1 regular), got %d", count) - } -} - -func TestClientLimiter_ZeroRPS(t *testing.T) { - cfg := &config.Config{ - Enabled: true, - GlobalRPS: 0, // Zero RPS = unlimited - GlobalBurst: 0, - PerClientRPS: 0, - PerClientBurst: 0, - MethodLimits: make(map[string]config.MethodLimit), - } - - limiter, err := NewClientLimiter(cfg) - if err != nil { - t.Fatalf("NewClientLimiter() error: %v", err) - } - - ctx := contextWithClientAndMethod("spiffe://example.org/client1", "/test/Method") - - // All requests should be allowed with zero RPS - for i := range 100 { - if err := limiter.Limit(ctx); err != nil { - t.Errorf("Request %d should be allowed (zero RPS = unlimited), got error: %v", i+1, err) - } - } -} - -// TestClientLimiter_PanicOnInvalidTypeInMap tests the defensive panic -// when an invalid type is stored in the limiters map. -// This should never happen in normal operation but protects against internal bugs. -func TestClientLimiter_PanicOnInvalidTypeInMap(t *testing.T) { - cfg := &config.Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 1500, - } - - limiter, err := NewClientLimiter(cfg) - if err != nil { - t.Fatalf("NewClientLimiter() error = %v", err) - } - - // Intentionally corrupt the limiters map by storing an invalid type - // This simulates an internal bug scenario - // The key should match what getLimiterForRequest uses for per-client limiters - limiter.limiters.Store("spiffe://example.org/corrupted", "invalid-type-not-a-limiter") - - // Test that Limit() panics when encountering the corrupted entry - defer func() { - if r := recover(); r == nil { - t.Error("Limit() should panic when limiters map contains invalid type") - } else { - // Verify panic message contains useful information - panicMsg := fmt.Sprintf("%v", r) - if !contains(panicMsg, "invalid type in limiters map") { - t.Errorf("Panic message should mention invalid type, got: %v", panicMsg) - } - } - }() - - ctx := contextWithClientAndMethod("spiffe://example.org/corrupted", "/test/Method") - _ = limiter.Limit(ctx) -} - -// TestClientLimiter_PanicOnInvalidTypeInLoadOrStore tests the defensive panic -// in the LoadOrStore path when an invalid type is encountered. -func TestClientLimiter_PanicOnInvalidTypeInLoadOrStore(t *testing.T) { - cfg := &config.Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 1500, - } - - limiter, err := NewClientLimiter(cfg) - if err != nil { - t.Fatalf("NewClientLimiter() error = %v", err) - } - - ctx := contextWithClientAndMethod("spiffe://example.org/client1", "/test/Method") - - // First, create a valid limiter for a client - _ = limiter.Limit(ctx) - - // Now corrupt the map for that same client - limiter.limiters.Store("spiffe://example.org/client1", "corrupted-value") - - // Test that subsequent operations panic - defer func() { - if r := recover(); r == nil { - t.Error("Operation should panic when limiters map contains invalid type") - } else { - panicMsg := fmt.Sprintf("%v", r) - if !contains(panicMsg, "invalid type in limiters map") { - t.Errorf("Panic message should mention invalid type, got: %v", panicMsg) - } - } - }() - - // This should trigger the panic when trying to use the corrupted limiter - _ = limiter.Limit(ctx) -} - -// contains checks if a string contains a substring. -func contains(s, substr string) bool { - return len(s) >= len(substr) && (s == substr || len(substr) == 0 || - (len(s) > 0 && len(substr) > 0 && indexOfString(s, substr) >= 0)) -} - -// indexOfString returns the index of substr in s, or -1 if not found. -func indexOfString(s, substr string) int { - for i := 0; i <= len(s)-len(substr); i++ { - if s[i:i+len(substr)] == substr { - return i - } - } - - return -1 -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package ratelimit + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/agntcy/dir/server/authn" + "github.com/agntcy/dir/server/middleware/ratelimit/config" + "github.com/spiffe/go-spiffe/v2/spiffeid" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// contextWithMethod creates a context with a gRPC method set for testing. +func contextWithMethod(method string) context.Context { + return grpc.NewContextWithServerTransportStream(context.Background(), &mockServerTransportStream{method: method}) +} + +// contextWithClientAndMethod creates a context with both SPIFFE ID and gRPC method for testing. +func contextWithClientAndMethod(clientID string, method string) context.Context { + ctx := contextWithMethod(method) + + if clientID != "" { + spiffeID, _ := spiffeid.FromString(clientID) + ctx = context.WithValue(ctx, authn.SpiffeIDContextKey, spiffeID) + } + + return ctx +} + +// mockServerTransportStream is a minimal implementation for setting method in context. +type mockServerTransportStream struct { + method string +} + +func (m *mockServerTransportStream) Method() string { + return m.method +} + +func (m *mockServerTransportStream) SetHeader(md metadata.MD) error { return nil } +func (m *mockServerTransportStream) SendHeader(md metadata.MD) error { return nil } +func (m *mockServerTransportStream) SetTrailer(md metadata.MD) error { return nil } + +func TestNewClientLimiter(t *testing.T) { + tests := []struct { + name string + config *config.Config + wantErr bool + errMsg string + }{ + { + name: "valid configuration", + config: &config.Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 1500, + MethodLimits: make(map[string]config.MethodLimit), + }, + wantErr: false, + }, + { + name: "nil configuration should fail", + config: nil, + wantErr: true, + errMsg: "config cannot be nil", + }, + { + name: "invalid configuration should fail", + config: &config.Config{ + Enabled: true, + GlobalRPS: -100.0, // Invalid + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 1500, + }, + wantErr: true, + errMsg: "invalid rate limit config", + }, + { + name: "disabled configuration should succeed", + config: &config.Config{ + Enabled: false, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 1500, + }, + wantErr: false, + }, + { + name: "zero global RPS should create limiter without global limit", + config: &config.Config{ + Enabled: true, + GlobalRPS: 0, // Zero means no global limit + GlobalBurst: 0, + PerClientRPS: 1000.0, + PerClientBurst: 1500, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + limiter, err := NewClientLimiter(tt.config) + + //nolint:nestif // Standard table-driven test error checking pattern + if tt.wantErr { + if err == nil { + t.Errorf("NewClientLimiter() expected error but got none") + + return + } + + if tt.errMsg != "" && !contains(err.Error(), tt.errMsg) { + t.Errorf("NewClientLimiter() error = %q, want to contain %q", err.Error(), tt.errMsg) + } + } else { + if err != nil { + t.Errorf("NewClientLimiter() unexpected error: %v", err) + + return + } + + if limiter == nil { + t.Error("NewClientLimiter() returned nil limiter") + } + } + }) + } +} + +func TestClientLimiter_Limit_PerClientLimiting(t *testing.T) { + cfg := &config.Config{ + Enabled: true, + GlobalRPS: 10.0, + GlobalBurst: 20, + PerClientRPS: 10.0, // 10 req/sec + PerClientBurst: 20, // burst 20 + MethodLimits: make(map[string]config.MethodLimit), + } + + limiter, err := NewClientLimiter(cfg) + if err != nil { + t.Fatalf("NewClientLimiter() error: %v", err) + } + + ctx1 := contextWithClientAndMethod("spiffe://example.org/client1", "/test/Method") + ctx2 := contextWithClientAndMethod("spiffe://example.org/client2", "/test/Method") + + // Client 1: Exhaust burst capacity + for i := range 20 { + if err := limiter.Limit(ctx1); err != nil { + t.Errorf("Request %d should be allowed (within burst), got error: %v", i+1, err) + } + } + + // Client 1: 21st request should be rate limited + if err := limiter.Limit(ctx1); err == nil { + t.Error("Request 21 should be rate limited") + } else if status.Code(err) != codes.ResourceExhausted { + t.Errorf("Expected ResourceExhausted, got: %v", status.Code(err)) + } + + // Client 2: Should still have full capacity (separate limiter) + for i := range 20 { + if err := limiter.Limit(ctx2); err != nil { + t.Errorf("Client2 request %d should be allowed, got error: %v", i+1, err) + } + } + + // Client 2: 21st request should be rate limited + if err := limiter.Limit(ctx2); err == nil { + t.Error("Client2 request 21 should be rate limited") + } else if status.Code(err) != codes.ResourceExhausted { + t.Errorf("Expected ResourceExhausted, got: %v", status.Code(err)) + } +} + +func TestClientLimiter_Limit_GlobalLimiting(t *testing.T) { + cfg := &config.Config{ + Enabled: true, + GlobalRPS: 10.0, + GlobalBurst: 20, + PerClientRPS: 0, // No per-client limit + PerClientBurst: 0, + MethodLimits: make(map[string]config.MethodLimit), + } + + limiter, err := NewClientLimiter(cfg) + if err != nil { + t.Fatalf("NewClientLimiter() error: %v", err) + } + + ctx := contextWithClientAndMethod("", "/test/Method") + + // Anonymous client: Exhaust burst capacity + for i := range 20 { + if err := limiter.Limit(ctx); err != nil { + t.Errorf("Request %d should be allowed (within burst), got error: %v", i+1, err) + } + } + + // 21st request should be rate limited + if err := limiter.Limit(ctx); err == nil { + t.Error("Request 21 should be rate limited") + } else if status.Code(err) != codes.ResourceExhausted { + t.Errorf("Expected ResourceExhausted, got: %v", status.Code(err)) + } +} + +func TestClientLimiter_Limit_MethodOverrides(t *testing.T) { + cfg := &config.Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 100.0, + PerClientBurst: 200, + MethodLimits: map[string]config.MethodLimit{ + "/expensive/Method": { + RPS: 5.0, + Burst: 10, + }, + }, + } + + limiter, err := NewClientLimiter(cfg) + if err != nil { + t.Fatalf("NewClientLimiter() error: %v", err) + } + + ctxRegular := contextWithClientAndMethod("spiffe://example.org/client1", "/regular/Method") + ctxExpensive := contextWithClientAndMethod("spiffe://example.org/client1", "/expensive/Method") + + // Regular method should use per-client limit (burst 200) + for i := range 200 { + if err := limiter.Limit(ctxRegular); err != nil { + t.Errorf("Regular method request %d should be allowed, got error: %v", i+1, err) + } + } + + // Expensive method should use method-specific limit (burst 10) + for i := range 10 { + if err := limiter.Limit(ctxExpensive); err != nil { + t.Errorf("Expensive method request %d should be allowed (within burst), got error: %v", i+1, err) + } + } + + // 11th request to expensive method should be rate limited + if err := limiter.Limit(ctxExpensive); err == nil { + t.Error("Expensive method request 11 should be rate limited") + } else if status.Code(err) != codes.ResourceExhausted { + t.Errorf("Expected ResourceExhausted, got: %v", status.Code(err)) + } +} + +func TestClientLimiter_Limit_TokenRefill(t *testing.T) { + cfg := &config.Config{ + Enabled: true, + GlobalRPS: 10.0, // 10 req/sec = 1 token per 100ms + GlobalBurst: 10, // Burst should be >= RPS + PerClientRPS: 10.0, + PerClientBurst: 10, + MethodLimits: make(map[string]config.MethodLimit), + } + + limiter, err := NewClientLimiter(cfg) + if err != nil { + t.Fatalf("NewClientLimiter() error: %v", err) + } + + ctx := contextWithClientAndMethod("spiffe://example.org/client1", "/test/Method") + + // Exhaust tokens + for i := range 10 { + if err := limiter.Limit(ctx); err != nil { + t.Errorf("Request %d should be allowed, got error: %v", i+1, err) + } + } + + // Should be rate limited now + if err := limiter.Limit(ctx); err == nil { + t.Error("Should be rate limited after exhausting burst") + } + + // Wait for token refill (150ms should give us 1-2 tokens at 10 req/sec) + time.Sleep(150 * time.Millisecond) + + // Should succeed now + if err := limiter.Limit(ctx); err != nil { + t.Errorf("Should be allowed after token refill, got error: %v", err) + } +} + +func TestClientLimiter_Limit_Disabled(t *testing.T) { + cfg := &config.Config{ + Enabled: false, + GlobalRPS: 1.0, // Very low limit + GlobalBurst: 1, + PerClientRPS: 1.0, + PerClientBurst: 1, + } + + limiter, err := NewClientLimiter(cfg) + if err != nil { + t.Fatalf("NewClientLimiter() error: %v", err) + } + + ctx := contextWithClientAndMethod("spiffe://example.org/client1", "/test/Method") + + // All requests should be allowed when disabled + for i := range 100 { + if err := limiter.Limit(ctx); err != nil { + t.Errorf("Request %d should be allowed (rate limiting disabled), got error: %v", i+1, err) + } + } +} + +func TestClientLimiter_Limit_ConcurrentAccess(t *testing.T) { + // This test should be run with: go test -race + cfg := &config.Config{ + Enabled: true, + GlobalRPS: 1000.0, + GlobalBurst: 2000, + PerClientRPS: 1000.0, + PerClientBurst: 2000, + MethodLimits: make(map[string]config.MethodLimit), + } + + limiter, err := NewClientLimiter(cfg) + if err != nil { + t.Fatalf("NewClientLimiter() error: %v", err) + } + + var wg sync.WaitGroup + + // Simulate 100 concurrent clients, each making 100 requests + numClients := 100 + requestsPerClient := 100 + + for i := range numClients { + wg.Add(1) + + go func(clientID int) { + defer wg.Done() + + clientIDStr := fmt.Sprintf("spiffe://example.org/client%d", clientID) + + ctx := contextWithClientAndMethod(clientIDStr, "/test/Method") + for range requestsPerClient { + _ = limiter.Limit(ctx) + } + }(i) + } + + wg.Wait() + + // Verify we created limiters for all clients + count := limiter.GetLimiterCount() + if count != numClients { + t.Errorf("Expected %d limiters, got %d", numClients, count) + } +} + +func TestClientLimiter_GetLimiterCount(t *testing.T) { + cfg := &config.Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 100.0, + PerClientBurst: 200, + MethodLimits: make(map[string]config.MethodLimit), + } + + limiter, err := NewClientLimiter(cfg) + if err != nil { + t.Fatalf("NewClientLimiter() error: %v", err) + } + + // Initially, no limiters created + if count := limiter.GetLimiterCount(); count != 0 { + t.Errorf("Expected 0 limiters initially, got %d", count) + } + + // Make requests from 3 different clients + ctx1 := contextWithClientAndMethod("spiffe://example.org/client1", "/test/Method") + ctx2 := contextWithClientAndMethod("spiffe://example.org/client2", "/test/Method") + ctx3 := contextWithClientAndMethod("spiffe://example.org/client3", "/test/Method") + + _ = limiter.Limit(ctx1) + _ = limiter.Limit(ctx2) + _ = limiter.Limit(ctx3) + + // Should have 3 limiters + if count := limiter.GetLimiterCount(); count != 3 { + t.Errorf("Expected 3 limiters, got %d", count) + } + + // Making more requests from existing clients shouldn't create new limiters + _ = limiter.Limit(ctx1) + _ = limiter.Limit(ctx2) + + if count := limiter.GetLimiterCount(); count != 3 { + t.Errorf("Expected 3 limiters (reused), got %d", count) + } +} + +func TestClientLimiter_MethodSpecificLimiters(t *testing.T) { + cfg := &config.Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 100.0, + PerClientBurst: 200, + MethodLimits: map[string]config.MethodLimit{ + "/method1": {RPS: 10.0, Burst: 20}, + "/method2": {RPS: 20.0, Burst: 40}, + }, + } + + limiter, err := NewClientLimiter(cfg) + if err != nil { + t.Fatalf("NewClientLimiter() error: %v", err) + } + + // Make requests to different methods + ctx1 := contextWithClientAndMethod("spiffe://example.org/client1", "/method1") + ctx2 := contextWithClientAndMethod("spiffe://example.org/client1", "/method2") + ctx3 := contextWithClientAndMethod("spiffe://example.org/client1", "/regular") + + _ = limiter.Limit(ctx1) + _ = limiter.Limit(ctx2) + _ = limiter.Limit(ctx3) + + // Should have 3 limiters: + // - client1:/method1 (method-specific) + // - client1:/method2 (method-specific) + // - client1 (regular per-client) + count := limiter.GetLimiterCount() + if count != 3 { + t.Errorf("Expected 3 limiters (2 method-specific + 1 regular), got %d", count) + } +} + +func TestClientLimiter_ZeroRPS(t *testing.T) { + cfg := &config.Config{ + Enabled: true, + GlobalRPS: 0, // Zero RPS = unlimited + GlobalBurst: 0, + PerClientRPS: 0, + PerClientBurst: 0, + MethodLimits: make(map[string]config.MethodLimit), + } + + limiter, err := NewClientLimiter(cfg) + if err != nil { + t.Fatalf("NewClientLimiter() error: %v", err) + } + + ctx := contextWithClientAndMethod("spiffe://example.org/client1", "/test/Method") + + // All requests should be allowed with zero RPS + for i := range 100 { + if err := limiter.Limit(ctx); err != nil { + t.Errorf("Request %d should be allowed (zero RPS = unlimited), got error: %v", i+1, err) + } + } +} + +// TestClientLimiter_PanicOnInvalidTypeInMap tests the defensive panic +// when an invalid type is stored in the limiters map. +// This should never happen in normal operation but protects against internal bugs. +func TestClientLimiter_PanicOnInvalidTypeInMap(t *testing.T) { + cfg := &config.Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 1500, + } + + limiter, err := NewClientLimiter(cfg) + if err != nil { + t.Fatalf("NewClientLimiter() error = %v", err) + } + + // Intentionally corrupt the limiters map by storing an invalid type + // This simulates an internal bug scenario + // The key should match what getLimiterForRequest uses for per-client limiters + limiter.limiters.Store("spiffe://example.org/corrupted", "invalid-type-not-a-limiter") + + // Test that Limit() panics when encountering the corrupted entry + defer func() { + if r := recover(); r == nil { + t.Error("Limit() should panic when limiters map contains invalid type") + } else { + // Verify panic message contains useful information + panicMsg := fmt.Sprintf("%v", r) + if !contains(panicMsg, "invalid type in limiters map") { + t.Errorf("Panic message should mention invalid type, got: %v", panicMsg) + } + } + }() + + ctx := contextWithClientAndMethod("spiffe://example.org/corrupted", "/test/Method") + _ = limiter.Limit(ctx) +} + +// TestClientLimiter_PanicOnInvalidTypeInLoadOrStore tests the defensive panic +// in the LoadOrStore path when an invalid type is encountered. +func TestClientLimiter_PanicOnInvalidTypeInLoadOrStore(t *testing.T) { + cfg := &config.Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 1500, + } + + limiter, err := NewClientLimiter(cfg) + if err != nil { + t.Fatalf("NewClientLimiter() error = %v", err) + } + + ctx := contextWithClientAndMethod("spiffe://example.org/client1", "/test/Method") + + // First, create a valid limiter for a client + _ = limiter.Limit(ctx) + + // Now corrupt the map for that same client + limiter.limiters.Store("spiffe://example.org/client1", "corrupted-value") + + // Test that subsequent operations panic + defer func() { + if r := recover(); r == nil { + t.Error("Operation should panic when limiters map contains invalid type") + } else { + panicMsg := fmt.Sprintf("%v", r) + if !contains(panicMsg, "invalid type in limiters map") { + t.Errorf("Panic message should mention invalid type, got: %v", panicMsg) + } + } + }() + + // This should trigger the panic when trying to use the corrupted limiter + _ = limiter.Limit(ctx) +} + +// contains checks if a string contains a substring. +func contains(s, substr string) bool { + return len(s) >= len(substr) && (s == substr || len(substr) == 0 || + (len(s) > 0 && len(substr) > 0 && indexOfString(s, substr) >= 0)) +} + +// indexOfString returns the index of substr in s, or -1 if not found. +func indexOfString(s, substr string) int { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return i + } + } + + return -1 +} diff --git a/server/middleware/ratelimit/server.go b/server/middleware/ratelimit/server.go index 626bef8d4..880670cff 100644 --- a/server/middleware/ratelimit/server.go +++ b/server/middleware/ratelimit/server.go @@ -1,57 +1,57 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package ratelimit - -import ( - "github.com/agntcy/dir/server/middleware/ratelimit/config" - grpc_ratelimit "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/ratelimit" - "google.golang.org/grpc" -) - -// ServerOptions creates unary and stream rate limiting interceptors for gRPC server. -// These interceptors enforce rate limits based on client identity (SPIFFE ID) and method. -// -// This uses the go-grpc-middleware/v2 rate limiting interceptors with a custom -// Limiter implementation that supports per-client and per-method rate limiting. -// -// Returns an error if the configuration is invalid (e.g., negative values). -// -// IMPORTANT: These interceptors should be placed AFTER recovery middleware but BEFORE -// authentication/authorization middleware in the interceptor chain. This ensures: -// 1. Panics are caught by recovery middleware -// 2. Rate limiting protects authentication/authorization processing -// 3. DDoS attacks are mitigated before expensive auth operations -// -// Example usage: -// -// serverOpts := []grpc.ServerOption{} -// // Recovery FIRST (outermost) -// serverOpts = append(serverOpts, recovery.ServerOptions()...) -// // Rate limiting AFTER recovery -// if rateLimitCfg.Enabled { -// rateLimitOpts, err := ratelimit.ServerOptions(rateLimitCfg) -// if err != nil { -// return err -// } -// serverOpts = append(serverOpts, rateLimitOpts...) -// } -// // Logging and auth interceptors after rate limiting -// serverOpts = append(serverOpts, logging.ServerOptions(...)...) -// serverOpts = append(serverOpts, authn.GetServerOptions()...) -func ServerOptions(cfg *config.Config) ([]grpc.ServerOption, error) { - // Create the client limiter that implements go-grpc-middleware/v2 Limiter interface - limiter, err := NewClientLimiter(cfg) - if err != nil { - return nil, err - } - - return []grpc.ServerOption{ - grpc.ChainUnaryInterceptor( - grpc_ratelimit.UnaryServerInterceptor(limiter), - ), - grpc.ChainStreamInterceptor( - grpc_ratelimit.StreamServerInterceptor(limiter), - ), - }, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package ratelimit + +import ( + "github.com/agntcy/dir/server/middleware/ratelimit/config" + grpc_ratelimit "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/ratelimit" + "google.golang.org/grpc" +) + +// ServerOptions creates unary and stream rate limiting interceptors for gRPC server. +// These interceptors enforce rate limits based on client identity (SPIFFE ID) and method. +// +// This uses the go-grpc-middleware/v2 rate limiting interceptors with a custom +// Limiter implementation that supports per-client and per-method rate limiting. +// +// Returns an error if the configuration is invalid (e.g., negative values). +// +// IMPORTANT: These interceptors should be placed AFTER recovery middleware but BEFORE +// authentication/authorization middleware in the interceptor chain. This ensures: +// 1. Panics are caught by recovery middleware +// 2. Rate limiting protects authentication/authorization processing +// 3. DDoS attacks are mitigated before expensive auth operations +// +// Example usage: +// +// serverOpts := []grpc.ServerOption{} +// // Recovery FIRST (outermost) +// serverOpts = append(serverOpts, recovery.ServerOptions()...) +// // Rate limiting AFTER recovery +// if rateLimitCfg.Enabled { +// rateLimitOpts, err := ratelimit.ServerOptions(rateLimitCfg) +// if err != nil { +// return err +// } +// serverOpts = append(serverOpts, rateLimitOpts...) +// } +// // Logging and auth interceptors after rate limiting +// serverOpts = append(serverOpts, logging.ServerOptions(...)...) +// serverOpts = append(serverOpts, authn.GetServerOptions()...) +func ServerOptions(cfg *config.Config) ([]grpc.ServerOption, error) { + // Create the client limiter that implements go-grpc-middleware/v2 Limiter interface + limiter, err := NewClientLimiter(cfg) + if err != nil { + return nil, err + } + + return []grpc.ServerOption{ + grpc.ChainUnaryInterceptor( + grpc_ratelimit.UnaryServerInterceptor(limiter), + ), + grpc.ChainStreamInterceptor( + grpc_ratelimit.StreamServerInterceptor(limiter), + ), + }, nil +} diff --git a/server/middleware/ratelimit/server_test.go b/server/middleware/ratelimit/server_test.go index 7e06baa62..d5848bb0b 100644 --- a/server/middleware/ratelimit/server_test.go +++ b/server/middleware/ratelimit/server_test.go @@ -1,134 +1,134 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package ratelimit - -import ( - "testing" - - "github.com/agntcy/dir/server/middleware/ratelimit/config" -) - -// TestServerOptions_ValidConfiguration tests that ServerOptions correctly -// creates interceptors with a valid configuration. -func TestServerOptions_ValidConfiguration(t *testing.T) { - cfg := &config.Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 1500, - } - - opts, err := ServerOptions(cfg) - if err != nil { - t.Errorf("ServerOptions() with valid config should not return error, got: %v", err) - } - - if opts == nil { - t.Error("ServerOptions() should return non-nil options") - } - - // Should return 2 options (unary and stream interceptors) - expectedLen := 2 - if len(opts) != expectedLen { - t.Errorf("ServerOptions() should return %d options, got: %d", expectedLen, len(opts)) - } -} - -// TestServerOptions_DisabledConfiguration tests that ServerOptions works -// correctly when rate limiting is disabled. -func TestServerOptions_DisabledConfiguration(t *testing.T) { - cfg := &config.Config{ - Enabled: false, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 1500, - } - - opts, err := ServerOptions(cfg) - if err != nil { - t.Errorf("ServerOptions() with disabled config should not return error, got: %v", err) - } - - if opts == nil { - t.Error("ServerOptions() should return non-nil options even when disabled") - } - - // Should still return interceptors (they'll just allow all requests) - expectedLen := 2 - if len(opts) != expectedLen { - t.Errorf("ServerOptions() should return %d options, got: %d", expectedLen, len(opts)) - } -} - -// TestServerOptions_InvalidConfiguration tests that ServerOptions returns -// an error with invalid configuration. -func TestServerOptions_InvalidConfiguration(t *testing.T) { - cfg := &config.Config{ - Enabled: true, - GlobalRPS: -10.0, // Invalid: negative - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 1500, - } - - opts, err := ServerOptions(cfg) - if err == nil { - t.Error("ServerOptions() with invalid config should return error") - } - - if opts != nil { - t.Errorf("ServerOptions() with invalid config should return nil options, got: %v", opts) - } -} - -// TestServerOptions_NilConfiguration tests that ServerOptions handles -// nil configuration gracefully. -func TestServerOptions_NilConfiguration(t *testing.T) { - opts, err := ServerOptions(nil) - if err == nil { - t.Error("ServerOptions() with nil config should return error") - } - - if opts != nil { - t.Errorf("ServerOptions() with nil config should return nil options, got: %v", opts) - } -} - -// TestServerOptions_WithMethodLimits tests that ServerOptions correctly -// handles configuration with method-specific limits. -func TestServerOptions_WithMethodLimits(t *testing.T) { - cfg := &config.Config{ - Enabled: true, - GlobalRPS: 100.0, - GlobalBurst: 200, - PerClientRPS: 1000.0, - PerClientBurst: 1500, - MethodLimits: map[string]config.MethodLimit{ - "/test.Service/Method1": { - RPS: 50.0, - Burst: 100, - }, - "/test.Service/Method2": { - RPS: 20.0, - Burst: 40, - }, - }, - } - - opts, err := ServerOptions(cfg) - if err != nil { - t.Errorf("ServerOptions() with method limits should not return error, got: %v", err) - } - - if opts == nil { - t.Error("ServerOptions() should return non-nil options") - } - - expectedLen := 2 - if len(opts) != expectedLen { - t.Errorf("ServerOptions() should return %d options, got: %d", expectedLen, len(opts)) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package ratelimit + +import ( + "testing" + + "github.com/agntcy/dir/server/middleware/ratelimit/config" +) + +// TestServerOptions_ValidConfiguration tests that ServerOptions correctly +// creates interceptors with a valid configuration. +func TestServerOptions_ValidConfiguration(t *testing.T) { + cfg := &config.Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 1500, + } + + opts, err := ServerOptions(cfg) + if err != nil { + t.Errorf("ServerOptions() with valid config should not return error, got: %v", err) + } + + if opts == nil { + t.Error("ServerOptions() should return non-nil options") + } + + // Should return 2 options (unary and stream interceptors) + expectedLen := 2 + if len(opts) != expectedLen { + t.Errorf("ServerOptions() should return %d options, got: %d", expectedLen, len(opts)) + } +} + +// TestServerOptions_DisabledConfiguration tests that ServerOptions works +// correctly when rate limiting is disabled. +func TestServerOptions_DisabledConfiguration(t *testing.T) { + cfg := &config.Config{ + Enabled: false, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 1500, + } + + opts, err := ServerOptions(cfg) + if err != nil { + t.Errorf("ServerOptions() with disabled config should not return error, got: %v", err) + } + + if opts == nil { + t.Error("ServerOptions() should return non-nil options even when disabled") + } + + // Should still return interceptors (they'll just allow all requests) + expectedLen := 2 + if len(opts) != expectedLen { + t.Errorf("ServerOptions() should return %d options, got: %d", expectedLen, len(opts)) + } +} + +// TestServerOptions_InvalidConfiguration tests that ServerOptions returns +// an error with invalid configuration. +func TestServerOptions_InvalidConfiguration(t *testing.T) { + cfg := &config.Config{ + Enabled: true, + GlobalRPS: -10.0, // Invalid: negative + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 1500, + } + + opts, err := ServerOptions(cfg) + if err == nil { + t.Error("ServerOptions() with invalid config should return error") + } + + if opts != nil { + t.Errorf("ServerOptions() with invalid config should return nil options, got: %v", opts) + } +} + +// TestServerOptions_NilConfiguration tests that ServerOptions handles +// nil configuration gracefully. +func TestServerOptions_NilConfiguration(t *testing.T) { + opts, err := ServerOptions(nil) + if err == nil { + t.Error("ServerOptions() with nil config should return error") + } + + if opts != nil { + t.Errorf("ServerOptions() with nil config should return nil options, got: %v", opts) + } +} + +// TestServerOptions_WithMethodLimits tests that ServerOptions correctly +// handles configuration with method-specific limits. +func TestServerOptions_WithMethodLimits(t *testing.T) { + cfg := &config.Config{ + Enabled: true, + GlobalRPS: 100.0, + GlobalBurst: 200, + PerClientRPS: 1000.0, + PerClientBurst: 1500, + MethodLimits: map[string]config.MethodLimit{ + "/test.Service/Method1": { + RPS: 50.0, + Burst: 100, + }, + "/test.Service/Method2": { + RPS: 20.0, + Burst: 40, + }, + }, + } + + opts, err := ServerOptions(cfg) + if err != nil { + t.Errorf("ServerOptions() with method limits should not return error, got: %v", err) + } + + if opts == nil { + t.Error("ServerOptions() should return non-nil options") + } + + expectedLen := 2 + if len(opts) != expectedLen { + t.Errorf("ServerOptions() should return %d options, got: %d", expectedLen, len(opts)) + } +} diff --git a/server/middleware/recovery/handler.go b/server/middleware/recovery/handler.go index 43d6da696..903480862 100644 --- a/server/middleware/recovery/handler.go +++ b/server/middleware/recovery/handler.go @@ -1,65 +1,65 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Package recovery provides gRPC interceptors for panic recovery. -package recovery - -import ( - "context" - "runtime/debug" - - "github.com/agntcy/dir/server/authn" - "github.com/agntcy/dir/utils/logging" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var logger = logging.Logger("recovery") - -// Log field keys for structured logging. -const ( - logFieldPanic = "panic" - logFieldStack = "stack" - logFieldMethod = "method" - logFieldSpiffeID = "spiffe_id" -) - -// Error message returned to clients when panic is recovered. -// This message is intentionally generic to avoid information disclosure. -const internalServerErrorMsg = "internal server error" - -// PanicHandler handles panics in gRPC handlers by logging full context and returning a safe error. -// It extracts SPIFFE ID (if available from authn interceptor), method name, and captures the -// full stack trace for debugging purposes. -// -// The panic details and stack trace are logged server-side only. Clients receive a sanitized -// "internal server error" message to avoid information disclosure. -// -// This handler should be used with go-grpc-middleware/v2 recovery interceptors. -func PanicHandler(ctx context.Context, p interface{}) error { - // Capture stack trace immediately - stack := debug.Stack() - - // Extract method name from context - method, _ := grpc.Method(ctx) - - // Build log fields - fields := []interface{}{ - logFieldPanic, p, - logFieldStack, string(stack), - logFieldMethod, method, - } - - // Extract SPIFFE ID if available (from authn interceptor) - if spiffeID, ok := authn.SpiffeIDFromContext(ctx); ok { - fields = append(fields, logFieldSpiffeID, spiffeID.String()) - } - - // Log panic with all context - logger.Error("panic recovered in gRPC handler", fields...) - - // Return sanitized error to client (don't expose panic details for security) - // This is a gRPC status error that should be returned as-is to the client - return status.Error(codes.Internal, internalServerErrorMsg) //nolint:wrapcheck // Final gRPC error for client -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Package recovery provides gRPC interceptors for panic recovery. +package recovery + +import ( + "context" + "runtime/debug" + + "github.com/agntcy/dir/server/authn" + "github.com/agntcy/dir/utils/logging" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var logger = logging.Logger("recovery") + +// Log field keys for structured logging. +const ( + logFieldPanic = "panic" + logFieldStack = "stack" + logFieldMethod = "method" + logFieldSpiffeID = "spiffe_id" +) + +// Error message returned to clients when panic is recovered. +// This message is intentionally generic to avoid information disclosure. +const internalServerErrorMsg = "internal server error" + +// PanicHandler handles panics in gRPC handlers by logging full context and returning a safe error. +// It extracts SPIFFE ID (if available from authn interceptor), method name, and captures the +// full stack trace for debugging purposes. +// +// The panic details and stack trace are logged server-side only. Clients receive a sanitized +// "internal server error" message to avoid information disclosure. +// +// This handler should be used with go-grpc-middleware/v2 recovery interceptors. +func PanicHandler(ctx context.Context, p interface{}) error { + // Capture stack trace immediately + stack := debug.Stack() + + // Extract method name from context + method, _ := grpc.Method(ctx) + + // Build log fields + fields := []interface{}{ + logFieldPanic, p, + logFieldStack, string(stack), + logFieldMethod, method, + } + + // Extract SPIFFE ID if available (from authn interceptor) + if spiffeID, ok := authn.SpiffeIDFromContext(ctx); ok { + fields = append(fields, logFieldSpiffeID, spiffeID.String()) + } + + // Log panic with all context + logger.Error("panic recovered in gRPC handler", fields...) + + // Return sanitized error to client (don't expose panic details for security) + // This is a gRPC status error that should be returned as-is to the client + return status.Error(codes.Internal, internalServerErrorMsg) //nolint:wrapcheck // Final gRPC error for client +} diff --git a/server/middleware/recovery/handler_test.go b/server/middleware/recovery/handler_test.go index e791c6225..d7d81830b 100644 --- a/server/middleware/recovery/handler_test.go +++ b/server/middleware/recovery/handler_test.go @@ -1,166 +1,166 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package recovery - -import ( - "context" - "errors" - "testing" - - "github.com/agntcy/dir/server/authn" - "github.com/spiffe/go-spiffe/v2/spiffeid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Test constants. -const ( - testMethodUnary = "/agntcy.dir.store.v1.StoreService/GetRecord" - expectedErrorMsg = "internal server error" -) - -// TestPanicHandler tests that PanicHandler recovers from panics and returns proper errors. -func TestPanicHandler(t *testing.T) { - tests := []struct { - name string - panicValue interface{} - expectCode codes.Code - expectMsg string - }{ - { - name: "string panic", - panicValue: "test panic", - expectCode: codes.Internal, - expectMsg: expectedErrorMsg, - }, - { - name: "error panic", - panicValue: errors.New("test error"), - expectCode: codes.Internal, - expectMsg: expectedErrorMsg, - }, - { - name: "nil pointer panic", - panicValue: "runtime error: invalid memory address or nil pointer dereference", - expectCode: codes.Internal, - expectMsg: expectedErrorMsg, - }, - { - name: "integer panic", - panicValue: 42, - expectCode: codes.Internal, - expectMsg: expectedErrorMsg, - }, - { - name: "nil panic", - panicValue: nil, - expectCode: codes.Internal, - expectMsg: expectedErrorMsg, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - - err := PanicHandler(ctx, tt.panicValue) - - require.Error(t, err) - assert.Equal(t, tt.expectCode, status.Code(err)) - assert.Contains(t, err.Error(), tt.expectMsg) - }) - } -} - -// TestPanicHandlerWithMethod tests panic recovery with method name in context. -func TestPanicHandlerWithMethod(t *testing.T) { - // Create context with method name (simulating gRPC context) - ctx := grpc.NewContextWithServerTransportStream( - context.Background(), - &mockServerTransportStream{method: testMethodUnary}, - ) - - err := PanicHandler(ctx, "test panic") - - require.Error(t, err) - assert.Equal(t, codes.Internal, status.Code(err)) - assert.Contains(t, err.Error(), expectedErrorMsg) -} - -// TestPanicHandlerWithSpiffeID tests panic recovery with SPIFFE ID in context. -func TestPanicHandlerWithSpiffeID(t *testing.T) { - // Create SPIFFE ID - spiffeID, err := spiffeid.FromString("spiffe://example.com/test/client") - require.NoError(t, err) - - // Create context with SPIFFE ID (as authn interceptor would set it) - ctx := context.WithValue(context.Background(), authn.SpiffeIDContextKey, spiffeID) - - err = PanicHandler(ctx, "test panic with spiffe id") - - require.Error(t, err) - assert.Equal(t, codes.Internal, status.Code(err)) - assert.Contains(t, err.Error(), expectedErrorMsg) -} - -// TestPanicHandlerWithFullContext tests panic recovery with both method and SPIFFE ID. -func TestPanicHandlerWithFullContext(t *testing.T) { - // Create SPIFFE ID - spiffeID, err := spiffeid.FromString("spiffe://example.com/test/client") - require.NoError(t, err) - - // Create context with both SPIFFE ID and method - ctx := context.WithValue(context.Background(), authn.SpiffeIDContextKey, spiffeID) - ctx = grpc.NewContextWithServerTransportStream( - ctx, - &mockServerTransportStream{method: testMethodUnary}, - ) - - err = PanicHandler(ctx, "panic with full context") - - require.Error(t, err) - assert.Equal(t, codes.Internal, status.Code(err)) - assert.Contains(t, err.Error(), expectedErrorMsg) -} - -// TestPanicHandlerErrorIsInternal verifies that the error code is always Internal. -func TestPanicHandlerErrorIsInternal(t *testing.T) { - panicValues := []interface{}{ - "string", - errors.New("error"), - 42, - struct{ msg string }{"panic"}, - nil, - } - - for _, p := range panicValues { - err := PanicHandler(context.Background(), p) - assert.Equal(t, codes.Internal, status.Code(err), "expected Internal code for panic: %v", p) - } -} - -// mockServerTransportStream is a mock implementation of grpc.ServerTransportStream for testing. -type mockServerTransportStream struct { - method string -} - -func (m *mockServerTransportStream) Method() string { - return m.method -} - -func (m *mockServerTransportStream) SetHeader(md metadata.MD) error { - return nil -} - -func (m *mockServerTransportStream) SendHeader(md metadata.MD) error { - return nil -} - -func (m *mockServerTransportStream) SetTrailer(md metadata.MD) error { - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package recovery + +import ( + "context" + "errors" + "testing" + + "github.com/agntcy/dir/server/authn" + "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Test constants. +const ( + testMethodUnary = "/agntcy.dir.store.v1.StoreService/GetRecord" + expectedErrorMsg = "internal server error" +) + +// TestPanicHandler tests that PanicHandler recovers from panics and returns proper errors. +func TestPanicHandler(t *testing.T) { + tests := []struct { + name string + panicValue interface{} + expectCode codes.Code + expectMsg string + }{ + { + name: "string panic", + panicValue: "test panic", + expectCode: codes.Internal, + expectMsg: expectedErrorMsg, + }, + { + name: "error panic", + panicValue: errors.New("test error"), + expectCode: codes.Internal, + expectMsg: expectedErrorMsg, + }, + { + name: "nil pointer panic", + panicValue: "runtime error: invalid memory address or nil pointer dereference", + expectCode: codes.Internal, + expectMsg: expectedErrorMsg, + }, + { + name: "integer panic", + panicValue: 42, + expectCode: codes.Internal, + expectMsg: expectedErrorMsg, + }, + { + name: "nil panic", + panicValue: nil, + expectCode: codes.Internal, + expectMsg: expectedErrorMsg, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + + err := PanicHandler(ctx, tt.panicValue) + + require.Error(t, err) + assert.Equal(t, tt.expectCode, status.Code(err)) + assert.Contains(t, err.Error(), tt.expectMsg) + }) + } +} + +// TestPanicHandlerWithMethod tests panic recovery with method name in context. +func TestPanicHandlerWithMethod(t *testing.T) { + // Create context with method name (simulating gRPC context) + ctx := grpc.NewContextWithServerTransportStream( + context.Background(), + &mockServerTransportStream{method: testMethodUnary}, + ) + + err := PanicHandler(ctx, "test panic") + + require.Error(t, err) + assert.Equal(t, codes.Internal, status.Code(err)) + assert.Contains(t, err.Error(), expectedErrorMsg) +} + +// TestPanicHandlerWithSpiffeID tests panic recovery with SPIFFE ID in context. +func TestPanicHandlerWithSpiffeID(t *testing.T) { + // Create SPIFFE ID + spiffeID, err := spiffeid.FromString("spiffe://example.com/test/client") + require.NoError(t, err) + + // Create context with SPIFFE ID (as authn interceptor would set it) + ctx := context.WithValue(context.Background(), authn.SpiffeIDContextKey, spiffeID) + + err = PanicHandler(ctx, "test panic with spiffe id") + + require.Error(t, err) + assert.Equal(t, codes.Internal, status.Code(err)) + assert.Contains(t, err.Error(), expectedErrorMsg) +} + +// TestPanicHandlerWithFullContext tests panic recovery with both method and SPIFFE ID. +func TestPanicHandlerWithFullContext(t *testing.T) { + // Create SPIFFE ID + spiffeID, err := spiffeid.FromString("spiffe://example.com/test/client") + require.NoError(t, err) + + // Create context with both SPIFFE ID and method + ctx := context.WithValue(context.Background(), authn.SpiffeIDContextKey, spiffeID) + ctx = grpc.NewContextWithServerTransportStream( + ctx, + &mockServerTransportStream{method: testMethodUnary}, + ) + + err = PanicHandler(ctx, "panic with full context") + + require.Error(t, err) + assert.Equal(t, codes.Internal, status.Code(err)) + assert.Contains(t, err.Error(), expectedErrorMsg) +} + +// TestPanicHandlerErrorIsInternal verifies that the error code is always Internal. +func TestPanicHandlerErrorIsInternal(t *testing.T) { + panicValues := []interface{}{ + "string", + errors.New("error"), + 42, + struct{ msg string }{"panic"}, + nil, + } + + for _, p := range panicValues { + err := PanicHandler(context.Background(), p) + assert.Equal(t, codes.Internal, status.Code(err), "expected Internal code for panic: %v", p) + } +} + +// mockServerTransportStream is a mock implementation of grpc.ServerTransportStream for testing. +type mockServerTransportStream struct { + method string +} + +func (m *mockServerTransportStream) Method() string { + return m.method +} + +func (m *mockServerTransportStream) SetHeader(md metadata.MD) error { + return nil +} + +func (m *mockServerTransportStream) SendHeader(md metadata.MD) error { + return nil +} + +func (m *mockServerTransportStream) SetTrailer(md metadata.MD) error { + return nil +} diff --git a/server/middleware/recovery/options.go b/server/middleware/recovery/options.go index 0020d96dc..163d5a2a3 100644 --- a/server/middleware/recovery/options.go +++ b/server/middleware/recovery/options.go @@ -1,22 +1,22 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package recovery - -import ( - grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery" -) - -// DefaultOptions returns the recommended recovery configuration for production use. -// It uses the custom PanicHandler for comprehensive logging and error handling. -// -// The recovery handler will: -// - Catch panics from handlers and interceptors -// - Log full stack traces with context (method, SPIFFE ID) -// - Return proper gRPC errors (codes.Internal) to clients -// - Keep the server running after panic recovery -func DefaultOptions() []grpc_recovery.Option { - return []grpc_recovery.Option{ - grpc_recovery.WithRecoveryHandlerContext(PanicHandler), - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package recovery + +import ( + grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery" +) + +// DefaultOptions returns the recommended recovery configuration for production use. +// It uses the custom PanicHandler for comprehensive logging and error handling. +// +// The recovery handler will: +// - Catch panics from handlers and interceptors +// - Log full stack traces with context (method, SPIFFE ID) +// - Return proper gRPC errors (codes.Internal) to clients +// - Keep the server running after panic recovery +func DefaultOptions() []grpc_recovery.Option { + return []grpc_recovery.Option{ + grpc_recovery.WithRecoveryHandlerContext(PanicHandler), + } +} diff --git a/server/middleware/recovery/server.go b/server/middleware/recovery/server.go index 2a4bdcfbb..233423cc7 100644 --- a/server/middleware/recovery/server.go +++ b/server/middleware/recovery/server.go @@ -1,36 +1,36 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package recovery - -import ( - grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery" - "google.golang.org/grpc" -) - -// ServerOptions creates unary and stream recovery interceptors for gRPC server. -// These interceptors catch panics and prevent server crashes. -// -// IMPORTANT: These interceptors MUST be the FIRST (outermost) interceptors in the chain -// to catch panics from all other interceptors and handlers. -// -// Example usage: -// -// serverOpts := []grpc.ServerOption{} -// // Recovery FIRST (outermost) -// serverOpts = append(serverOpts, recovery.ServerOptions()...) -// // Other interceptors after recovery -// serverOpts = append(serverOpts, logging.ServerOptions(...)...) -// serverOpts = append(serverOpts, authn.GetServerOptions()...) -func ServerOptions() []grpc.ServerOption { - opts := DefaultOptions() - - return []grpc.ServerOption{ - grpc.ChainUnaryInterceptor( - grpc_recovery.UnaryServerInterceptor(opts...), - ), - grpc.ChainStreamInterceptor( - grpc_recovery.StreamServerInterceptor(opts...), - ), - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package recovery + +import ( + grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery" + "google.golang.org/grpc" +) + +// ServerOptions creates unary and stream recovery interceptors for gRPC server. +// These interceptors catch panics and prevent server crashes. +// +// IMPORTANT: These interceptors MUST be the FIRST (outermost) interceptors in the chain +// to catch panics from all other interceptors and handlers. +// +// Example usage: +// +// serverOpts := []grpc.ServerOption{} +// // Recovery FIRST (outermost) +// serverOpts = append(serverOpts, recovery.ServerOptions()...) +// // Other interceptors after recovery +// serverOpts = append(serverOpts, logging.ServerOptions(...)...) +// serverOpts = append(serverOpts, authn.GetServerOptions()...) +func ServerOptions() []grpc.ServerOption { + opts := DefaultOptions() + + return []grpc.ServerOption{ + grpc.ChainUnaryInterceptor( + grpc_recovery.UnaryServerInterceptor(opts...), + ), + grpc.ChainStreamInterceptor( + grpc_recovery.StreamServerInterceptor(opts...), + ), + } +} diff --git a/server/middleware/recovery/server_test.go b/server/middleware/recovery/server_test.go index 604128de5..e1b069d63 100644 --- a/server/middleware/recovery/server_test.go +++ b/server/middleware/recovery/server_test.go @@ -1,291 +1,291 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package recovery - -import ( - "context" - "errors" - "io" - "testing" - - grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Test constants for server_test.go. -const ( - testMethodUnaryServer = "/test.Service/UnaryMethod" - testMethodStreamServer = "/test.Service/StreamMethod" - expectedErrorMessage = "internal server error" -) - -// TestDefaultOptions verifies that DefaultOptions returns correct configuration. -func TestDefaultOptions(t *testing.T) { - opts := DefaultOptions() - - require.NotNil(t, opts) - assert.Len(t, opts, 1, "expected exactly one option") -} - -// TestDefaultOptionsWithPanicHandler verifies that DefaultOptions uses PanicHandler. -func TestDefaultOptionsWithPanicHandler(t *testing.T) { - // Create interceptor with DefaultOptions - interceptor := grpc_recovery.UnaryServerInterceptor(DefaultOptions()...) - - // Create handler that panics - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - panic("test panic to verify handler") - } - - info := &grpc.UnaryServerInfo{ - FullMethod: testMethodUnaryServer, - } - - resp, err := interceptor(context.Background(), nil, info, handler) - - // Verify that our PanicHandler was used (returns specific error message) - assert.Nil(t, resp) - require.Error(t, err) - assert.Equal(t, codes.Internal, status.Code(err)) - assert.Contains(t, err.Error(), expectedErrorMessage) -} - -// TestServerOptions verifies that ServerOptions returns correct interceptors. -func TestServerOptions(t *testing.T) { - opts := ServerOptions() - - require.NotNil(t, opts) - assert.Len(t, opts, 2, "expected exactly two options (unary + stream)") -} - -// TestUnaryInterceptorCatchesPanic tests that unary interceptor catches panics. -func TestUnaryInterceptorCatchesPanic(t *testing.T) { - // Create interceptor with our options - interceptor := grpc_recovery.UnaryServerInterceptor(DefaultOptions()...) - - // Create handler that panics - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - panic("test panic in unary handler") - } - - // Execute interceptor - info := &grpc.UnaryServerInfo{ - FullMethod: testMethodUnaryServer, - } - - resp, err := interceptor(context.Background(), nil, info, handler) - - // Verify error returned (not panic) - assert.Nil(t, resp) - require.Error(t, err) - assert.Equal(t, codes.Internal, status.Code(err)) - assert.Contains(t, err.Error(), expectedErrorMessage) -} - -// TestUnaryInterceptorNormalExecution tests that interceptor doesn't interfere with normal execution. -func TestUnaryInterceptorNormalExecution(t *testing.T) { - interceptor := grpc_recovery.UnaryServerInterceptor(DefaultOptions()...) - - expectedResponse := &struct{ msg string }{"success"} - - // Create normal handler that doesn't panic - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return expectedResponse, nil - } - - info := &grpc.UnaryServerInfo{ - FullMethod: testMethodUnaryServer, - } - - resp, err := interceptor(context.Background(), nil, info, handler) - - // Verify normal execution - require.NoError(t, err) - assert.Equal(t, expectedResponse, resp) -} - -// TestUnaryInterceptorHandlerError tests that interceptor doesn't affect normal errors. -func TestUnaryInterceptorHandlerError(t *testing.T) { - interceptor := grpc_recovery.UnaryServerInterceptor(DefaultOptions()...) - - expectedError := status.Error(codes.NotFound, "not found") - - // Create handler that returns error (not panic) - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return nil, expectedError //nolint:wrapcheck // Test data - intentionally returning unwrapped error - } - - info := &grpc.UnaryServerInfo{ - FullMethod: testMethodUnaryServer, - } - - resp, err := interceptor(context.Background(), nil, info, handler) - - // Verify error is passed through unchanged - assert.Nil(t, resp) - assert.Equal(t, expectedError, err) - assert.Equal(t, codes.NotFound, status.Code(err)) -} - -// TestStreamInterceptorCatchesPanic tests that stream interceptor catches panics. -func TestStreamInterceptorCatchesPanic(t *testing.T) { - // Create interceptor with our options - interceptor := grpc_recovery.StreamServerInterceptor(DefaultOptions()...) - - // Create handler that panics - handler := func(srv interface{}, stream grpc.ServerStream) error { - panic("test panic in stream handler") - } - - info := &grpc.StreamServerInfo{ - FullMethod: testMethodStreamServer, - IsClientStream: true, - IsServerStream: true, - } - - // Execute interceptor - err := interceptor(nil, &mockServerStream{ctx: context.Background()}, info, handler) - - // Verify error returned (not panic) - require.Error(t, err) - assert.Equal(t, codes.Internal, status.Code(err)) - assert.Contains(t, err.Error(), expectedErrorMessage) -} - -// TestStreamInterceptorNormalExecution tests that interceptor doesn't interfere with normal execution. -func TestStreamInterceptorNormalExecution(t *testing.T) { - interceptor := grpc_recovery.StreamServerInterceptor(DefaultOptions()...) - - // Create normal handler that doesn't panic - handler := func(srv interface{}, stream grpc.ServerStream) error { - return nil - } - - info := &grpc.StreamServerInfo{ - FullMethod: testMethodStreamServer, - IsClientStream: true, - IsServerStream: true, - } - - err := interceptor(nil, &mockServerStream{ctx: context.Background()}, info, handler) - - // Verify normal execution - require.NoError(t, err) -} - -// TestStreamInterceptorHandlerError tests that interceptor doesn't affect normal errors. -func TestStreamInterceptorHandlerError(t *testing.T) { - interceptor := grpc_recovery.StreamServerInterceptor(DefaultOptions()...) - - expectedError := status.Error(codes.Canceled, "canceled") - - // Create handler that returns error (not panic) - handler := func(srv interface{}, stream grpc.ServerStream) error { - return expectedError //nolint:wrapcheck // Test data - intentionally returning unwrapped error - } - - info := &grpc.StreamServerInfo{ - FullMethod: testMethodStreamServer, - IsClientStream: true, - IsServerStream: true, - } - - err := interceptor(nil, &mockServerStream{ctx: context.Background()}, info, handler) - - // Verify error is passed through unchanged - assert.Equal(t, expectedError, err) - assert.Equal(t, codes.Canceled, status.Code(err)) -} - -// TestMultiplePanics tests that interceptor can handle multiple panics in sequence. -func TestMultiplePanics(t *testing.T) { - interceptor := grpc_recovery.UnaryServerInterceptor(DefaultOptions()...) - - info := &grpc.UnaryServerInfo{ - FullMethod: testMethodUnaryServer, - } - - // Test multiple panics in sequence - for i := range 3 { - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - panic("panic number " + string(rune(i))) - } - - resp, err := interceptor(context.Background(), nil, info, handler) - - assert.Nil(t, resp) - require.Error(t, err) - assert.Equal(t, codes.Internal, status.Code(err)) - } -} - -// TestPanicTypes tests various panic types are all handled correctly. -func TestPanicTypes(t *testing.T) { - interceptor := grpc_recovery.UnaryServerInterceptor(DefaultOptions()...) - - info := &grpc.UnaryServerInfo{ - FullMethod: testMethodUnaryServer, - } - - tests := []struct { - name string - panicValue interface{} - }{ - {"string panic", "string panic"}, - {"error panic", errors.New("error panic")}, - {"integer panic", 42}, - {"struct panic", struct{ msg string }{"struct panic"}}, - {"nil panic", nil}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - panic(tt.panicValue) - } - - resp, err := interceptor(context.Background(), nil, info, handler) - - assert.Nil(t, resp) - require.Error(t, err) - assert.Equal(t, codes.Internal, status.Code(err)) - }) - } -} - -// mockServerStream is a mock implementation of grpc.ServerStream for testing. -// It stores the context to return it in Context() method. -// -//nolint:containedctx // Mock implementation requires context storage for testing -type mockServerStream struct { - ctx context.Context -} - -func (m *mockServerStream) SetHeader(md metadata.MD) error { - return nil -} - -func (m *mockServerStream) SendHeader(md metadata.MD) error { - return nil -} - -func (m *mockServerStream) SetTrailer(md metadata.MD) { -} - -func (m *mockServerStream) Context() context.Context { - return m.ctx -} - -func (m *mockServerStream) SendMsg(msg interface{}) error { - return nil -} - -func (m *mockServerStream) RecvMsg(msg interface{}) error { - return io.EOF -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package recovery + +import ( + "context" + "errors" + "io" + "testing" + + grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Test constants for server_test.go. +const ( + testMethodUnaryServer = "/test.Service/UnaryMethod" + testMethodStreamServer = "/test.Service/StreamMethod" + expectedErrorMessage = "internal server error" +) + +// TestDefaultOptions verifies that DefaultOptions returns correct configuration. +func TestDefaultOptions(t *testing.T) { + opts := DefaultOptions() + + require.NotNil(t, opts) + assert.Len(t, opts, 1, "expected exactly one option") +} + +// TestDefaultOptionsWithPanicHandler verifies that DefaultOptions uses PanicHandler. +func TestDefaultOptionsWithPanicHandler(t *testing.T) { + // Create interceptor with DefaultOptions + interceptor := grpc_recovery.UnaryServerInterceptor(DefaultOptions()...) + + // Create handler that panics + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + panic("test panic to verify handler") + } + + info := &grpc.UnaryServerInfo{ + FullMethod: testMethodUnaryServer, + } + + resp, err := interceptor(context.Background(), nil, info, handler) + + // Verify that our PanicHandler was used (returns specific error message) + assert.Nil(t, resp) + require.Error(t, err) + assert.Equal(t, codes.Internal, status.Code(err)) + assert.Contains(t, err.Error(), expectedErrorMessage) +} + +// TestServerOptions verifies that ServerOptions returns correct interceptors. +func TestServerOptions(t *testing.T) { + opts := ServerOptions() + + require.NotNil(t, opts) + assert.Len(t, opts, 2, "expected exactly two options (unary + stream)") +} + +// TestUnaryInterceptorCatchesPanic tests that unary interceptor catches panics. +func TestUnaryInterceptorCatchesPanic(t *testing.T) { + // Create interceptor with our options + interceptor := grpc_recovery.UnaryServerInterceptor(DefaultOptions()...) + + // Create handler that panics + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + panic("test panic in unary handler") + } + + // Execute interceptor + info := &grpc.UnaryServerInfo{ + FullMethod: testMethodUnaryServer, + } + + resp, err := interceptor(context.Background(), nil, info, handler) + + // Verify error returned (not panic) + assert.Nil(t, resp) + require.Error(t, err) + assert.Equal(t, codes.Internal, status.Code(err)) + assert.Contains(t, err.Error(), expectedErrorMessage) +} + +// TestUnaryInterceptorNormalExecution tests that interceptor doesn't interfere with normal execution. +func TestUnaryInterceptorNormalExecution(t *testing.T) { + interceptor := grpc_recovery.UnaryServerInterceptor(DefaultOptions()...) + + expectedResponse := &struct{ msg string }{"success"} + + // Create normal handler that doesn't panic + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return expectedResponse, nil + } + + info := &grpc.UnaryServerInfo{ + FullMethod: testMethodUnaryServer, + } + + resp, err := interceptor(context.Background(), nil, info, handler) + + // Verify normal execution + require.NoError(t, err) + assert.Equal(t, expectedResponse, resp) +} + +// TestUnaryInterceptorHandlerError tests that interceptor doesn't affect normal errors. +func TestUnaryInterceptorHandlerError(t *testing.T) { + interceptor := grpc_recovery.UnaryServerInterceptor(DefaultOptions()...) + + expectedError := status.Error(codes.NotFound, "not found") + + // Create handler that returns error (not panic) + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return nil, expectedError //nolint:wrapcheck // Test data - intentionally returning unwrapped error + } + + info := &grpc.UnaryServerInfo{ + FullMethod: testMethodUnaryServer, + } + + resp, err := interceptor(context.Background(), nil, info, handler) + + // Verify error is passed through unchanged + assert.Nil(t, resp) + assert.Equal(t, expectedError, err) + assert.Equal(t, codes.NotFound, status.Code(err)) +} + +// TestStreamInterceptorCatchesPanic tests that stream interceptor catches panics. +func TestStreamInterceptorCatchesPanic(t *testing.T) { + // Create interceptor with our options + interceptor := grpc_recovery.StreamServerInterceptor(DefaultOptions()...) + + // Create handler that panics + handler := func(srv interface{}, stream grpc.ServerStream) error { + panic("test panic in stream handler") + } + + info := &grpc.StreamServerInfo{ + FullMethod: testMethodStreamServer, + IsClientStream: true, + IsServerStream: true, + } + + // Execute interceptor + err := interceptor(nil, &mockServerStream{ctx: context.Background()}, info, handler) + + // Verify error returned (not panic) + require.Error(t, err) + assert.Equal(t, codes.Internal, status.Code(err)) + assert.Contains(t, err.Error(), expectedErrorMessage) +} + +// TestStreamInterceptorNormalExecution tests that interceptor doesn't interfere with normal execution. +func TestStreamInterceptorNormalExecution(t *testing.T) { + interceptor := grpc_recovery.StreamServerInterceptor(DefaultOptions()...) + + // Create normal handler that doesn't panic + handler := func(srv interface{}, stream grpc.ServerStream) error { + return nil + } + + info := &grpc.StreamServerInfo{ + FullMethod: testMethodStreamServer, + IsClientStream: true, + IsServerStream: true, + } + + err := interceptor(nil, &mockServerStream{ctx: context.Background()}, info, handler) + + // Verify normal execution + require.NoError(t, err) +} + +// TestStreamInterceptorHandlerError tests that interceptor doesn't affect normal errors. +func TestStreamInterceptorHandlerError(t *testing.T) { + interceptor := grpc_recovery.StreamServerInterceptor(DefaultOptions()...) + + expectedError := status.Error(codes.Canceled, "canceled") + + // Create handler that returns error (not panic) + handler := func(srv interface{}, stream grpc.ServerStream) error { + return expectedError //nolint:wrapcheck // Test data - intentionally returning unwrapped error + } + + info := &grpc.StreamServerInfo{ + FullMethod: testMethodStreamServer, + IsClientStream: true, + IsServerStream: true, + } + + err := interceptor(nil, &mockServerStream{ctx: context.Background()}, info, handler) + + // Verify error is passed through unchanged + assert.Equal(t, expectedError, err) + assert.Equal(t, codes.Canceled, status.Code(err)) +} + +// TestMultiplePanics tests that interceptor can handle multiple panics in sequence. +func TestMultiplePanics(t *testing.T) { + interceptor := grpc_recovery.UnaryServerInterceptor(DefaultOptions()...) + + info := &grpc.UnaryServerInfo{ + FullMethod: testMethodUnaryServer, + } + + // Test multiple panics in sequence + for i := range 3 { + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + panic("panic number " + string(rune(i))) + } + + resp, err := interceptor(context.Background(), nil, info, handler) + + assert.Nil(t, resp) + require.Error(t, err) + assert.Equal(t, codes.Internal, status.Code(err)) + } +} + +// TestPanicTypes tests various panic types are all handled correctly. +func TestPanicTypes(t *testing.T) { + interceptor := grpc_recovery.UnaryServerInterceptor(DefaultOptions()...) + + info := &grpc.UnaryServerInfo{ + FullMethod: testMethodUnaryServer, + } + + tests := []struct { + name string + panicValue interface{} + }{ + {"string panic", "string panic"}, + {"error panic", errors.New("error panic")}, + {"integer panic", 42}, + {"struct panic", struct{ msg string }{"struct panic"}}, + {"nil panic", nil}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + panic(tt.panicValue) + } + + resp, err := interceptor(context.Background(), nil, info, handler) + + assert.Nil(t, resp) + require.Error(t, err) + assert.Equal(t, codes.Internal, status.Code(err)) + }) + } +} + +// mockServerStream is a mock implementation of grpc.ServerStream for testing. +// It stores the context to return it in Context() method. +// +//nolint:containedctx // Mock implementation requires context storage for testing +type mockServerStream struct { + ctx context.Context +} + +func (m *mockServerStream) SetHeader(md metadata.MD) error { + return nil +} + +func (m *mockServerStream) SendHeader(md metadata.MD) error { + return nil +} + +func (m *mockServerStream) SetTrailer(md metadata.MD) { +} + +func (m *mockServerStream) Context() context.Context { + return m.ctx +} + +func (m *mockServerStream) SendMsg(msg interface{}) error { + return nil +} + +func (m *mockServerStream) RecvMsg(msg interface{}) error { + return io.EOF +} diff --git a/server/publication/config/config.go b/server/publication/config/config.go index cdca838b7..d31c664ce 100644 --- a/server/publication/config/config.go +++ b/server/publication/config/config.go @@ -1,25 +1,25 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package config - -import "time" - -const ( - DefaultPublicationSchedulerInterval = 1 * time.Hour - DefaultPublicationWorkerCount = 1 - DefaultPublicationWorkerTimeout = 30 * time.Minute -) - -type Config struct { - // Scheduler interval. - // The interval at which the scheduler will check for pending publications. - SchedulerInterval time.Duration `json:"scheduler_interval,omitempty" mapstructure:"scheduler_interval"` - - // Worker count. - // The maximum number of workers that can be running concurrently. - WorkerCount int `json:"worker_count,omitempty" mapstructure:"worker_count"` - - // Worker timeout. - WorkerTimeout time.Duration `json:"worker_timeout,omitempty" mapstructure:"worker_timeout"` -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package config + +import "time" + +const ( + DefaultPublicationSchedulerInterval = 1 * time.Hour + DefaultPublicationWorkerCount = 1 + DefaultPublicationWorkerTimeout = 30 * time.Minute +) + +type Config struct { + // Scheduler interval. + // The interval at which the scheduler will check for pending publications. + SchedulerInterval time.Duration `json:"scheduler_interval,omitempty" mapstructure:"scheduler_interval"` + + // Worker count. + // The maximum number of workers that can be running concurrently. + WorkerCount int `json:"worker_count,omitempty" mapstructure:"worker_count"` + + // Worker timeout. + WorkerTimeout time.Duration `json:"worker_timeout,omitempty" mapstructure:"worker_timeout"` +} diff --git a/server/publication/publication.go b/server/publication/publication.go index acd40166b..99486b118 100644 --- a/server/publication/publication.go +++ b/server/publication/publication.go @@ -1,121 +1,121 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package publication - -import ( - "context" - "sync" - - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/server/publication/config" - publypes "github.com/agntcy/dir/server/publication/types" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/utils/logging" -) - -var logger = logging.Logger("publication") - -// Service manages the publication operations. -type Service struct { - db types.DatabaseAPI - store types.StoreAPI - routing types.RoutingAPI - config config.Config - - scheduler *Scheduler - workers []*Worker - - stopCh chan struct{} - wg sync.WaitGroup -} - -// New creates a new publication service. -func New(db types.DatabaseAPI, store types.StoreAPI, routing types.RoutingAPI, opts types.APIOptions) (*Service, error) { - return &Service{ - db: db, - store: store, - routing: routing, - config: opts.Config().Publication, - stopCh: make(chan struct{}), - }, nil -} - -// CreatePublication creates a new publication task to be processed. -func (s *Service) CreatePublication(_ context.Context, req *routingv1.PublishRequest) (string, error) { - return s.db.CreatePublication(req) //nolint:wrapcheck -} - -// Start begins the publication service operations. -func (s *Service) Start(ctx context.Context) error { - logger.Info("Starting publication service", "workers", s.config.WorkerCount, "interval", s.config.SchedulerInterval) - - // Create work queue - workQueue := make(chan publypes.WorkItem, 100) //nolint:mnd - - // Create and start scheduler - s.scheduler = NewScheduler(s.db, workQueue, s.config.SchedulerInterval) - - // Create and start workers - s.workers = make([]*Worker, s.config.WorkerCount) - for i := range s.config.WorkerCount { - s.workers[i] = NewWorker(i, s.db, s.store, s.routing, workQueue, s.config.WorkerTimeout) - } - - // Start scheduler - s.wg.Add(1) - - go func() { - defer s.wg.Done() - - s.scheduler.Run(ctx, s.stopCh) - }() - - // Start workers - for _, worker := range s.workers { - s.wg.Add(1) - - go func(w *Worker) { - defer s.wg.Done() - - w.Run(ctx, s.stopCh) - }(worker) - } - - logger.Info("Publication service started successfully") - - return nil -} - -// Stop gracefully shuts down the publication service. -func (s *Service) Stop() error { - logger.Info("Stopping publication service") - - // Stop all workers and scheduler - close(s.stopCh) - s.wg.Wait() - - logger.Info("Publication service stopped") - - return nil -} - -// IsReady checks if the publication service is ready to process publication requests. -// Returns true if the scheduler and workers have been started. -func (s *Service) IsReady(_ context.Context) bool { - if s.scheduler == nil { - logger.Debug("Publication service not ready: scheduler not initialized") - - return false - } - - if len(s.workers) == 0 { - logger.Debug("Publication service not ready: no workers initialized") - - return false - } - - logger.Debug("Publication service ready", "workers", len(s.workers)) - - return true -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package publication + +import ( + "context" + "sync" + + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/server/publication/config" + publypes "github.com/agntcy/dir/server/publication/types" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/utils/logging" +) + +var logger = logging.Logger("publication") + +// Service manages the publication operations. +type Service struct { + db types.DatabaseAPI + store types.StoreAPI + routing types.RoutingAPI + config config.Config + + scheduler *Scheduler + workers []*Worker + + stopCh chan struct{} + wg sync.WaitGroup +} + +// New creates a new publication service. +func New(db types.DatabaseAPI, store types.StoreAPI, routing types.RoutingAPI, opts types.APIOptions) (*Service, error) { + return &Service{ + db: db, + store: store, + routing: routing, + config: opts.Config().Publication, + stopCh: make(chan struct{}), + }, nil +} + +// CreatePublication creates a new publication task to be processed. +func (s *Service) CreatePublication(_ context.Context, req *routingv1.PublishRequest) (string, error) { + return s.db.CreatePublication(req) //nolint:wrapcheck +} + +// Start begins the publication service operations. +func (s *Service) Start(ctx context.Context) error { + logger.Info("Starting publication service", "workers", s.config.WorkerCount, "interval", s.config.SchedulerInterval) + + // Create work queue + workQueue := make(chan publypes.WorkItem, 100) //nolint:mnd + + // Create and start scheduler + s.scheduler = NewScheduler(s.db, workQueue, s.config.SchedulerInterval) + + // Create and start workers + s.workers = make([]*Worker, s.config.WorkerCount) + for i := range s.config.WorkerCount { + s.workers[i] = NewWorker(i, s.db, s.store, s.routing, workQueue, s.config.WorkerTimeout) + } + + // Start scheduler + s.wg.Add(1) + + go func() { + defer s.wg.Done() + + s.scheduler.Run(ctx, s.stopCh) + }() + + // Start workers + for _, worker := range s.workers { + s.wg.Add(1) + + go func(w *Worker) { + defer s.wg.Done() + + w.Run(ctx, s.stopCh) + }(worker) + } + + logger.Info("Publication service started successfully") + + return nil +} + +// Stop gracefully shuts down the publication service. +func (s *Service) Stop() error { + logger.Info("Stopping publication service") + + // Stop all workers and scheduler + close(s.stopCh) + s.wg.Wait() + + logger.Info("Publication service stopped") + + return nil +} + +// IsReady checks if the publication service is ready to process publication requests. +// Returns true if the scheduler and workers have been started. +func (s *Service) IsReady(_ context.Context) bool { + if s.scheduler == nil { + logger.Debug("Publication service not ready: scheduler not initialized") + + return false + } + + if len(s.workers) == 0 { + logger.Debug("Publication service not ready: no workers initialized") + + return false + } + + logger.Debug("Publication service ready", "workers", len(s.workers)) + + return true +} diff --git a/server/publication/scheduler.go b/server/publication/scheduler.go index 9523d7051..09c0b40f4 100644 --- a/server/publication/scheduler.go +++ b/server/publication/scheduler.go @@ -1,89 +1,89 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package publication - -import ( - "context" - "time" - - routingv1 "github.com/agntcy/dir/api/routing/v1" - publypes "github.com/agntcy/dir/server/publication/types" - "github.com/agntcy/dir/server/types" -) - -// Scheduler monitors the database for pending publication operations. -type Scheduler struct { - db types.PublicationDatabaseAPI - workQueue chan<- publypes.WorkItem - interval time.Duration -} - -// NewScheduler creates a new scheduler instance. -func NewScheduler(db types.PublicationDatabaseAPI, workQueue chan<- publypes.WorkItem, interval time.Duration) *Scheduler { - return &Scheduler{ - db: db, - workQueue: workQueue, - interval: interval, - } -} - -// Run starts the scheduler loop. -func (s *Scheduler) Run(ctx context.Context, stopCh <-chan struct{}) { - logger.Info("Starting publication scheduler", "interval", s.interval) - - ticker := time.NewTicker(s.interval) - defer ticker.Stop() - - // Process immediately on start - s.processPendingPublications(ctx) - - for { - select { - case <-ctx.Done(): - logger.Info("Scheduler stopping due to context cancellation") - - return - case <-stopCh: - logger.Info("Scheduler stopping due to stop signal") - - return - case <-ticker.C: - s.processPendingPublications(ctx) - } - } -} - -// processPendingPublications finds pending publications and dispatches them to workers. -func (s *Scheduler) processPendingPublications(ctx context.Context) { - logger.Debug("Processing pending publications") - - publications, err := s.db.GetPublicationsByStatus(routingv1.PublicationStatus_PUBLICATION_STATUS_PENDING) - if err != nil { - logger.Error("Failed to get pending publications", "error", err) - - return - } - - for _, publication := range publications { - select { - case <-ctx.Done(): - logger.Info("Stopping publication processing due to context cancellation") - - return - default: - // Try to dispatch work item - select { - case s.workQueue <- publypes.WorkItem{PublicationID: publication.GetID()}: - logger.Debug("Dispatched publication to worker", "publication_id", publication.GetID()) - - // Update status to in progress - if err := s.db.UpdatePublicationStatus(publication.GetID(), routingv1.PublicationStatus_PUBLICATION_STATUS_IN_PROGRESS); err != nil { - logger.Error("Failed to update publication status", "publication_id", publication.GetID(), "error", err) - } - default: - logger.Debug("Work queue is full, skipping publication", "publication_id", publication.GetID()) - } - } - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package publication + +import ( + "context" + "time" + + routingv1 "github.com/agntcy/dir/api/routing/v1" + publypes "github.com/agntcy/dir/server/publication/types" + "github.com/agntcy/dir/server/types" +) + +// Scheduler monitors the database for pending publication operations. +type Scheduler struct { + db types.PublicationDatabaseAPI + workQueue chan<- publypes.WorkItem + interval time.Duration +} + +// NewScheduler creates a new scheduler instance. +func NewScheduler(db types.PublicationDatabaseAPI, workQueue chan<- publypes.WorkItem, interval time.Duration) *Scheduler { + return &Scheduler{ + db: db, + workQueue: workQueue, + interval: interval, + } +} + +// Run starts the scheduler loop. +func (s *Scheduler) Run(ctx context.Context, stopCh <-chan struct{}) { + logger.Info("Starting publication scheduler", "interval", s.interval) + + ticker := time.NewTicker(s.interval) + defer ticker.Stop() + + // Process immediately on start + s.processPendingPublications(ctx) + + for { + select { + case <-ctx.Done(): + logger.Info("Scheduler stopping due to context cancellation") + + return + case <-stopCh: + logger.Info("Scheduler stopping due to stop signal") + + return + case <-ticker.C: + s.processPendingPublications(ctx) + } + } +} + +// processPendingPublications finds pending publications and dispatches them to workers. +func (s *Scheduler) processPendingPublications(ctx context.Context) { + logger.Debug("Processing pending publications") + + publications, err := s.db.GetPublicationsByStatus(routingv1.PublicationStatus_PUBLICATION_STATUS_PENDING) + if err != nil { + logger.Error("Failed to get pending publications", "error", err) + + return + } + + for _, publication := range publications { + select { + case <-ctx.Done(): + logger.Info("Stopping publication processing due to context cancellation") + + return + default: + // Try to dispatch work item + select { + case s.workQueue <- publypes.WorkItem{PublicationID: publication.GetID()}: + logger.Debug("Dispatched publication to worker", "publication_id", publication.GetID()) + + // Update status to in progress + if err := s.db.UpdatePublicationStatus(publication.GetID(), routingv1.PublicationStatus_PUBLICATION_STATUS_IN_PROGRESS); err != nil { + logger.Error("Failed to update publication status", "publication_id", publication.GetID(), "error", err) + } + default: + logger.Debug("Work queue is full, skipping publication", "publication_id", publication.GetID()) + } + } + } +} diff --git a/server/publication/types/types.go b/server/publication/types/types.go index d808718ac..d5785fe33 100644 --- a/server/publication/types/types.go +++ b/server/publication/types/types.go @@ -1,10 +1,10 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package types - -// WorkItem represents a publication task that needs to be processed by a worker. -type WorkItem struct { - // PublicationID is the unique identifier of the publication to process - PublicationID string -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package types + +// WorkItem represents a publication task that needs to be processed by a worker. +type WorkItem struct { + // PublicationID is the unique identifier of the publication to process + PublicationID string +} diff --git a/server/publication/worker.go b/server/publication/worker.go index 7f7396df0..2bdf0b26c 100644 --- a/server/publication/worker.go +++ b/server/publication/worker.go @@ -1,191 +1,191 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package publication - -import ( - "context" - "errors" - "fmt" - "time" - - corev1 "github.com/agntcy/dir/api/core/v1" - routingv1 "github.com/agntcy/dir/api/routing/v1" - databaseutils "github.com/agntcy/dir/server/database/utils" - publypes "github.com/agntcy/dir/server/publication/types" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/server/types/adapters" -) - -// Worker processes publication requests from the work queue. -type Worker struct { - id int - db types.DatabaseAPI - store types.StoreAPI - routing types.RoutingAPI - workQueue <-chan publypes.WorkItem - timeout time.Duration -} - -// NewWorker creates a new worker instance. -func NewWorker(id int, db types.DatabaseAPI, store types.StoreAPI, routing types.RoutingAPI, workQueue <-chan publypes.WorkItem, timeout time.Duration) *Worker { - return &Worker{ - id: id, - db: db, - store: store, - routing: routing, - workQueue: workQueue, - timeout: timeout, - } -} - -// Run starts the worker loop. -func (w *Worker) Run(ctx context.Context, stopCh <-chan struct{}) { - logger.Info("Starting publication worker", "worker_id", w.id) - - for { - select { - case <-ctx.Done(): - logger.Info("Worker stopping due to context cancellation", "worker_id", w.id) - - return - case <-stopCh: - logger.Info("Worker stopping due to stop signal", "worker_id", w.id) - - return - case workItem := <-w.workQueue: - w.processPublication(ctx, workItem) - } - } -} - -// processPublication processes a single publication request. -func (w *Worker) processPublication(ctx context.Context, workItem publypes.WorkItem) { - logger.Info("Processing publication", "worker_id", w.id, "publication_id", workItem.PublicationID) - - // Create a timeout context for this operation - timeoutCtx, cancel := context.WithTimeout(ctx, w.timeout) - defer cancel() - - // Get the publication from database - publicationObj, err := w.db.GetPublicationByID(workItem.PublicationID) - if err != nil { - logger.Error("Failed to get publication", "publication_id", workItem.PublicationID, "error", err) - w.markPublicationFailed(workItem.PublicationID) - - return - } - - request := publicationObj.GetRequest() - if request == nil { - logger.Error("Publication has no request", "publication_id", workItem.PublicationID) - w.markPublicationFailed(workItem.PublicationID) - - return - } - - // Get CIDs to publish based on the request type - cids, err := w.getCIDsFromRequest(timeoutCtx, request) - if err != nil { - logger.Error("Failed to get CIDs from request", "publication_id", workItem.PublicationID, "error", err) - w.markPublicationFailed(workItem.PublicationID) - - return - } - - if len(cids) == 0 { - logger.Info("No CIDs found to publish", "publication_id", workItem.PublicationID) - w.markPublicationCompleted(workItem.PublicationID) - - return - } - - // Announce each CID to the DHT - successCount := 0 - - for _, cid := range cids { - if err := w.announceToDHT(timeoutCtx, cid); err != nil { - logger.Error("Failed to announce CID to DHT", "publication_id", workItem.PublicationID, "cid", cid, "error", err) - } else { - successCount++ - - logger.Debug("Successfully announced CID to DHT", "publication_id", workItem.PublicationID, "cid", cid) - } - } - - logger.Info("Publication processing completed", "worker_id", w.id, "publication_id", workItem.PublicationID, - "total_cids", len(cids), "successful_announcements", successCount) - - // Mark as completed if we announced all CIDs successfully - if successCount == len(cids) { - w.markPublicationCompleted(workItem.PublicationID) - } else { - w.markPublicationFailed(workItem.PublicationID) - } -} - -// getCIDsFromRequest extracts CIDs from the publication request based on its type. -func (w *Worker) getCIDsFromRequest(_ context.Context, request *routingv1.PublishRequest) ([]string, error) { - switch req := request.GetRequest().(type) { - case *routingv1.PublishRequest_RecordRefs: - // Direct CID references - var cids []string - for _, ref := range req.RecordRefs.GetRefs() { - cids = append(cids, ref.GetCid()) - } - - return cids, nil - - case *routingv1.PublishRequest_Queries: - // Convert search query to database filter options - filterOpts, err := databaseutils.QueryToFilters(req.Queries.GetQueries()) - if err != nil { - return nil, fmt.Errorf("failed to convert query to filter options: %w", err) - } - - // Get CIDs using the filter options - return w.db.GetRecordCIDs(filterOpts...) //nolint:wrapcheck - - default: - return nil, errors.New("unknown request type") - } -} - -// announceToDHT announces a single CID to the DHT. -func (w *Worker) announceToDHT(ctx context.Context, cid string) error { - // Create a RecordRef for the CID - recordRef := &corev1.RecordRef{ - Cid: cid, - } - - // Pull the record from the store - record, err := w.store.Pull(ctx, recordRef) - if err != nil { - return fmt.Errorf("failed to pull record from store: %w", err) - } - - // Wrap record with adapter for interface-based publishing - adapter := adapters.NewRecordAdapter(record) - - // Publish the record to the network - err = w.routing.Publish(ctx, adapter) - if err != nil { - return fmt.Errorf("failed to publish record to network: %w", err) - } - - return nil -} - -// markPublicationCompleted marks a publication as completed. -func (w *Worker) markPublicationCompleted(publicationID string) { - if err := w.db.UpdatePublicationStatus(publicationID, routingv1.PublicationStatus_PUBLICATION_STATUS_COMPLETED); err != nil { - logger.Error("Failed to mark publication as completed", "publication_id", publicationID, "error", err) - } -} - -// markPublicationFailed marks a publication as failed. -func (w *Worker) markPublicationFailed(publicationID string) { - if err := w.db.UpdatePublicationStatus(publicationID, routingv1.PublicationStatus_PUBLICATION_STATUS_FAILED); err != nil { - logger.Error("Failed to mark publication as failed", "publication_id", publicationID, "error", err) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package publication + +import ( + "context" + "errors" + "fmt" + "time" + + corev1 "github.com/agntcy/dir/api/core/v1" + routingv1 "github.com/agntcy/dir/api/routing/v1" + databaseutils "github.com/agntcy/dir/server/database/utils" + publypes "github.com/agntcy/dir/server/publication/types" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/server/types/adapters" +) + +// Worker processes publication requests from the work queue. +type Worker struct { + id int + db types.DatabaseAPI + store types.StoreAPI + routing types.RoutingAPI + workQueue <-chan publypes.WorkItem + timeout time.Duration +} + +// NewWorker creates a new worker instance. +func NewWorker(id int, db types.DatabaseAPI, store types.StoreAPI, routing types.RoutingAPI, workQueue <-chan publypes.WorkItem, timeout time.Duration) *Worker { + return &Worker{ + id: id, + db: db, + store: store, + routing: routing, + workQueue: workQueue, + timeout: timeout, + } +} + +// Run starts the worker loop. +func (w *Worker) Run(ctx context.Context, stopCh <-chan struct{}) { + logger.Info("Starting publication worker", "worker_id", w.id) + + for { + select { + case <-ctx.Done(): + logger.Info("Worker stopping due to context cancellation", "worker_id", w.id) + + return + case <-stopCh: + logger.Info("Worker stopping due to stop signal", "worker_id", w.id) + + return + case workItem := <-w.workQueue: + w.processPublication(ctx, workItem) + } + } +} + +// processPublication processes a single publication request. +func (w *Worker) processPublication(ctx context.Context, workItem publypes.WorkItem) { + logger.Info("Processing publication", "worker_id", w.id, "publication_id", workItem.PublicationID) + + // Create a timeout context for this operation + timeoutCtx, cancel := context.WithTimeout(ctx, w.timeout) + defer cancel() + + // Get the publication from database + publicationObj, err := w.db.GetPublicationByID(workItem.PublicationID) + if err != nil { + logger.Error("Failed to get publication", "publication_id", workItem.PublicationID, "error", err) + w.markPublicationFailed(workItem.PublicationID) + + return + } + + request := publicationObj.GetRequest() + if request == nil { + logger.Error("Publication has no request", "publication_id", workItem.PublicationID) + w.markPublicationFailed(workItem.PublicationID) + + return + } + + // Get CIDs to publish based on the request type + cids, err := w.getCIDsFromRequest(timeoutCtx, request) + if err != nil { + logger.Error("Failed to get CIDs from request", "publication_id", workItem.PublicationID, "error", err) + w.markPublicationFailed(workItem.PublicationID) + + return + } + + if len(cids) == 0 { + logger.Info("No CIDs found to publish", "publication_id", workItem.PublicationID) + w.markPublicationCompleted(workItem.PublicationID) + + return + } + + // Announce each CID to the DHT + successCount := 0 + + for _, cid := range cids { + if err := w.announceToDHT(timeoutCtx, cid); err != nil { + logger.Error("Failed to announce CID to DHT", "publication_id", workItem.PublicationID, "cid", cid, "error", err) + } else { + successCount++ + + logger.Debug("Successfully announced CID to DHT", "publication_id", workItem.PublicationID, "cid", cid) + } + } + + logger.Info("Publication processing completed", "worker_id", w.id, "publication_id", workItem.PublicationID, + "total_cids", len(cids), "successful_announcements", successCount) + + // Mark as completed if we announced all CIDs successfully + if successCount == len(cids) { + w.markPublicationCompleted(workItem.PublicationID) + } else { + w.markPublicationFailed(workItem.PublicationID) + } +} + +// getCIDsFromRequest extracts CIDs from the publication request based on its type. +func (w *Worker) getCIDsFromRequest(_ context.Context, request *routingv1.PublishRequest) ([]string, error) { + switch req := request.GetRequest().(type) { + case *routingv1.PublishRequest_RecordRefs: + // Direct CID references + var cids []string + for _, ref := range req.RecordRefs.GetRefs() { + cids = append(cids, ref.GetCid()) + } + + return cids, nil + + case *routingv1.PublishRequest_Queries: + // Convert search query to database filter options + filterOpts, err := databaseutils.QueryToFilters(req.Queries.GetQueries()) + if err != nil { + return nil, fmt.Errorf("failed to convert query to filter options: %w", err) + } + + // Get CIDs using the filter options + return w.db.GetRecordCIDs(filterOpts...) //nolint:wrapcheck + + default: + return nil, errors.New("unknown request type") + } +} + +// announceToDHT announces a single CID to the DHT. +func (w *Worker) announceToDHT(ctx context.Context, cid string) error { + // Create a RecordRef for the CID + recordRef := &corev1.RecordRef{ + Cid: cid, + } + + // Pull the record from the store + record, err := w.store.Pull(ctx, recordRef) + if err != nil { + return fmt.Errorf("failed to pull record from store: %w", err) + } + + // Wrap record with adapter for interface-based publishing + adapter := adapters.NewRecordAdapter(record) + + // Publish the record to the network + err = w.routing.Publish(ctx, adapter) + if err != nil { + return fmt.Errorf("failed to publish record to network: %w", err) + } + + return nil +} + +// markPublicationCompleted marks a publication as completed. +func (w *Worker) markPublicationCompleted(publicationID string) { + if err := w.db.UpdatePublicationStatus(publicationID, routingv1.PublicationStatus_PUBLICATION_STATUS_COMPLETED); err != nil { + logger.Error("Failed to mark publication as completed", "publication_id", publicationID, "error", err) + } +} + +// markPublicationFailed marks a publication as failed. +func (w *Worker) markPublicationFailed(publicationID string) { + if err := w.db.UpdatePublicationStatus(publicationID, routingv1.PublicationStatus_PUBLICATION_STATUS_FAILED); err != nil { + logger.Error("Failed to mark publication as failed", "publication_id", publicationID, "error", err) + } +} diff --git a/server/routing/ROUTING.md b/server/routing/ROUTING.md index c73bc21c4..bb97abddc 100644 --- a/server/routing/ROUTING.md +++ b/server/routing/ROUTING.md @@ -1,677 +1,677 @@ -# Routing System Documentation - -This document provides comprehensive documentation for the routing system, including architecture, operations, and storage interactions. - -## Summary - -The routing system manages record discovery and announcement across both local storage and distributed networks using a **pull-based architecture** designed for scalability to hundreds of peers. It provides three main operations: - -- **Publish**: Announces CID availability to DHT network, triggering pull-based label discovery -- **List**: Efficiently queries local records with optional filtering (local-only) -- **Search**: Discovers remote records using OR logic with minimum threshold matching - -The system uses a **pull-based discovery architecture**: -- **OCI Storage**: Immutable record content (container images/artifacts) -- **Local KV Storage**: Fast indexing and cached remote labels (BadgerDB/In-memory) -- **DHT Storage**: Content provider announcements only (libp2p DHT) -- **RPC Layer**: On-demand content fetching for label extraction - -**Key Architectural Benefits:** -- **Scalable**: Works with hundreds of peers (not limited by DHT k-closest constraints) -- **Reliable**: Uses proven DHT provider system instead of unreliable label propagation -- **Fresh**: Labels extracted directly from content, preventing drift -- **Efficient**: Local caching for fast queries, background maintenance for staleness - ---- - -## Constants - -### Import - -```go -import "github.com/agntcy/dir/server/routing" -``` - -### Timing Constants - -```go -// DHT Record TTL (48 hours) -routing.DHTRecordTTL - -// Label Republishing Interval (36 hours) -routing.LabelRepublishInterval - -// Remote Label Cleanup Interval (48 hours) -routing.RemoteLabelCleanupInterval - -// Provider Record TTL (48 hours) -routing.ProviderRecordTTL - -// DHT Refresh Interval (30 seconds) -routing.RefreshInterval -``` - -### Protocol Constants - -```go -// Protocol prefix for DHT -routing.ProtocolPrefix // "dir" - -// Rendezvous string for peer discovery -routing.ProtocolRendezvous // "dir/connect" -``` - -### Validation Constants - -```go -// Maximum hops for distributed queries -routing.MaxHops // 20 - -// Notification channel buffer size -routing.NotificationChannelSize // 1000 - -// Minimum parts required in enhanced label keys (after string split) -routing.MinLabelKeyParts // 5 - -// Default minimum match score for OR logic (proto-compliant) -routing.DefaultMinMatchScore // 1 -``` - -### Usage Examples - -```go -// Cleanup task using consistent interval -ticker := time.NewTicker(routing.RemoteLabelCleanupInterval) -defer ticker.Stop() - -// DHT configuration with consistent TTL -dht, err := dht.New(ctx, host, - dht.MaxRecordAge(routing.DHTRecordTTL), - dht.ProtocolPrefix(protocol.ID(routing.ProtocolPrefix)), -) - -// Validate enhanced label key format -parts := strings.Split(labelKey, "/") -if len(parts) < routing.MinLabelKeyParts { - return errors.New("invalid enhanced key format: expected ////") -} -``` - ---- - -## Enhanced Key Format - -The routing system uses a self-descriptive key format that embeds all essential information directly in the key structure. - -### Key Structure - -**Format**: `////` - -**Examples**: -``` -/skills/AI/Machine Learning/baeabc123.../12D3KooWExample... -/domains/technology/web/baedef456.../12D3KooWOther... -/modules/search/semantic/baeghi789.../12D3KooWAnother... -``` - -### Benefits - -1. **📖 Self-Documenting**: Keys tell the complete story at a glance -2. **⚡ Efficient Filtering**: PeerID extraction without JSON parsing -3. **🧹 Cleaner Storage**: Minimal JSON metadata (only timestamps) -4. **🔍 Better Debugging**: Database inspection shows relationships immediately -5. **🎯 Consistent**: Same format used in local storage and DHT network - -### Utility Functions - -```go -// Build enhanced keys -key := BuildEnhancedLabelKey("/skills/AI", "CID123", "Peer1") -// → "/skills/AI/CID123/Peer1" - -// Parse enhanced keys -label, cid, peerID, err := ParseEnhancedLabelKey(key) -// → ("/skills/AI", "CID123", "Peer1", nil) - -// Extract components -peerID := ExtractPeerIDFromKey(key) // → "Peer1" -cid := ExtractCIDFromKey(key) // → "CID123" -isLocal := IsLocalKey(key, "Peer1") // → true -``` - -### Storage Examples - -**Local Storage**: -``` -/records/CID123 → (empty) # Local record index -/skills/AI/ML/CID123/Peer1 → {"timestamp": "..."} # Enhanced label metadata -/domains/tech/CID123/Peer1 → {"timestamp": "..."} # Enhanced domain metadata -``` - -**DHT Network**: -``` -/skills/AI/ML/CID123/Peer1 → "CID123" # Enhanced network announcement -/domains/tech/CID123/Peer1 → "CID123" # Enhanced domain announcement -``` - ---- - -## Publish - -The Publish operation announces records for discovery by storing metadata in both local storage and the distributed DHT network. - -### Flow Diagram - -``` - ┌─────────────────────────────────────────────────────────────┐ - │ PUBLISH REQUEST │ - │ (gRPC Controller) │ - └─────────────────────┬───────────────────────────────────────┘ - │ - ▼ - ┌─────────────────────────────────────────────────────────────┐ - │ controller.Publish() │ - │ │ - │ 1. getRecord() - Validates RecordRef │ - │ ├─ store.Lookup(ctx, ref) [READ: OCI Storage] │ - │ └─ store.Pull(ctx, ref) [READ: OCI Storage] │ - │ │ - │ 2. routing.Publish(ctx, ref, record) │ - └─────────────────────┬───────────────────────────────────────┘ - │ - ▼ - ┌─────────────────────────────────────────────────────────────┐ - │ routing.Publish() │ - │ (Main Router) │ - │ │ - │ 1. local.Publish(ctx, ref, record) │ - │ 2. if hasPeersInRoutingTable(): │ - │ remote.Publish(ctx, ref, record) │ - └─────────┬─────────────────────┬─────────────────────────────┘ - │ │ - ┌─────────▼─────────────┐ │ - │ LOCAL PUBLISH │ │ - │ (routing_local.go) │ │ - └─────────┬─────────────┘ │ - │ │ - ▼ │ - ┌─────────────────────────────────────────────┐ │ - │ LOCAL KV STORAGE │ │ - │ (Routing Datastore) │ │ - │ │ │ - │ 1. loadMetrics() [READ: KV] │ │ - │ 2. dstore.Has(recordKey) [READ: KV] │ │ - │ 3. batch.Put(recordKey) [WRITE: KV] │ │ - │ └─ "/records/CID123" → (empty) │ │ - │ 4. For each label: [WRITE: KV] │ │ - │ └─ "/skills/AI/CID123/Peer1" → LabelMetadata │ │ - │ 5. metrics.update() [WRITE: KV] │ │ - │ └─ "/metrics" → JSON │ │ - │ 6. batch.Commit() [COMMIT: KV] │ │ - └─────────────────────────────────────────────┘ │ - │ - ┌──────────────────────▼──────────────────────┐ - │ REMOTE PUBLISH │ - │ (routing_remote.go) │ - └──────────────────────┬──────────────────────┘ - │ - ▼ - ┌─────────────────────────────────────────────┐ - │ DHT STORAGE │ - │ (Distributed Network) │ - │ │ - │ 1. DHT().Provide(CID) [WRITE: DHT] │ - │ └─ Announce CID to network │ - │ └─ Triggers pull-based label discovery │ - │ │ - │ ❌ REMOVED: Individual label announcements │ - │ No more DHT.PutValue() for labels │ - │ Labels discovered via content pulling │ - └─────────────────────────────────────────────┘ -``` - -### Storage Operations - -**OCI Storage (Object Storage):** -- `READ`: `store.Lookup(RecordRef)` - Verify record exists -- `READ`: `store.Pull(RecordRef)` - Get full record content - -**Local KV Storage (Routing Datastore):** -- `READ`: `loadMetrics("/metrics")` - Get current metrics -- `READ`: `dstore.Has("/records/CID123")` - Check if already published -- `WRITE`: `"/records/CID123" → (empty)` - Mark as local record -- `WRITE`: `"/skills/AI/ML/CID123/Peer1" → LabelMetadata` - Store enhanced label metadata -- `WRITE`: `"/domains/tech/CID123/Peer1" → LabelMetadata` - Store enhanced domain metadata -- `WRITE`: `"/modules/search/CID123/Peer1" → LabelMetadata` - Store enhanced module metadata -- `WRITE`: `"/metrics" → JSON` - Update metrics - -**DHT Storage (Distributed Network):** -- `WRITE`: `DHT().Provide(CID123)` - Announce CID provider to network -- ❌ **REMOVED**: Individual label announcements via `DHT.PutValue()` -- **Pull-Based Discovery**: Remote peers discover labels by pulling content directly - -**Remote Peer Pull-Based Flow (Triggered by CID Provider Announcements):** -- `TRIGGER`: DHT provider notification received -- `RPC`: `service.Pull(ctx, peerID, recordRef)` - Fetch content from announcing peer -- `EXTRACT`: `GetLabels(record)` - Extract all labels from content -- `CACHE`: Store enhanced keys locally: `"/skills/AI/CID123/RemotePeerID" → LabelMetadata` - ---- - -## List - -The List operation efficiently queries local records with optional filtering. It's designed as a local-only operation that never accesses the network or OCI storage. - -### Flow Diagram - -``` - ┌─────────────────────────────────────────────────────────────┐ - │ LIST REQUEST │ - │ (gRPC Controller) │ - │ + RecordQuery[] (optional) │ - │ + Limit (optional) │ - └─────────────────────┬───────────────────────────────────────┘ - │ - ▼ - ┌─────────────────────────────────────────────────────────────┐ - │ controller.List() │ - │ │ - │ 1. routing.List(ctx, req) │ - │ 2. Stream ListResponse items to client │ - │ └─ NO OCI Storage access needed! │ - └─────────────────────┬───────────────────────────────────────┘ - │ - ▼ - ┌─────────────────────────────────────────────────────────────┐ - │ routing.List() │ - │ (Main Router) │ - │ │ - │ ✅ Always local-only operation │ - │ return local.List(ctx, req) │ - │ │ - │ ❌ NO remote.List() - Network not involved │ - └─────────────────────┬───────────────────────────────────────┘ - │ - ▼ - ┌─────────────────────────────────────────────────────────────┐ - │ LOCAL LIST ONLY │ - │ (routing_local.go) │ - └─────────────────────┬───────────────────────────────────────┘ - │ - ▼ - ┌─────────────────────────────────────────────────────────────────────────────┐ - │ LOCAL KV STORAGE │ - │ (Routing Datastore) │ - │ │ - │ STEP 1: Get Local Record CIDs │ - │ ├─ READ: dstore.Query("/records/") [READ: KV] │ - │ │ └─ Returns: "/records/CID123", "/records/CID456", ... │ - │ │ └─ ✅ Pre-filtered: Only LOCAL records │ - │ │ - │ STEP 2: For Each CID, Check Query Matching │ - │ ├─ matchesAllQueries(cid, queries): │ - │ │ │ │ - │ │ └─ getRecordLabelsEfficiently(cid): │ - │ │ ├─ READ: dstore.Query("/skills/") [READ: KV] │ - │ │ │ └─ Find: "/skills/AI/ML/CID123/Peer1" │ - │ │ │ └─ Extract: "/skills/AI/ML" │ - │ │ ├─ READ: dstore.Query("/domains/") [READ: KV] │ - │ │ │ └─ Find: "/domains/tech/CID123/Peer1" │ - │ │ │ └─ Extract: "/domains/tech" │ - │ │ └─ READ: dstore.Query("/modules/") [READ: KV] │ - │ │ └─ Find: "/modules/search/CID123/Peer1" │ - │ │ └─ Extract: "/modules/search" │ - │ │ │ - │ │ └─ queryMatchesLabels(query, labels): │ - │ │ └─ Check if ALL queries match labels (AND logic) │ - │ │ │ - │ └─ If matches: Return {RecordRef: CID123, Labels: [...]} │ - │ │ - │ ❌ NO OCI Storage access - Labels extracted from KV keys! │ - │ ❌ NO DHT Storage access - Local-only operation! │ - └─────────────────────────────────────────────────────────────────────────────┘ -``` - -### Storage Operations - -**OCI Storage (Object Storage):** -- ❌ **NO ACCESS** - List doesn't need record content! - -**Local KV Storage (Routing Datastore):** -- `READ`: `"/records/*"` - Get all local record CIDs -- `READ`: `"/skills/*"` - Extract skill labels for each CID -- `READ`: `"/domains/*"` - Extract domain labels for each CID -- `READ`: `"/modules/*"` - Extract module labels for each CID - -**DHT Storage (Distributed Network):** -- ❌ **NO ACCESS** - List is local-only operation! - -### Performance Characteristics - -**List vs Publish Storage Comparison:** -``` -PUBLISH: LIST: -├─ OCI: 2 reads (validate) ├─ OCI: 0 reads ✅ -├─ Local KV: 1 read + 5+ writes ├─ Local KV: 4+ reads only ✅ -└─ DHT: 0 reads + 4+ writes └─ DHT: 0 reads ✅ - -Result: List is much lighter! -``` - -**Key Optimizations:** -1. **No OCI Access**: Labels extracted from KV keys, not record content -2. **Local-Only**: No network/DHT interaction required -3. **Efficient Filtering**: Uses `/records/` index as starting point -4. **Key-Based Labels**: No expensive record parsing - -**Read Pattern**: `O(1 + 3×N)` KV reads where N = number of local records - ---- - -## Search - -The Search operation discovers remote records from other peers using **pull-based label caching** and **OR logic with minimum threshold**. It's designed for network-wide discovery at scale (hundreds of peers) and filters out local records, returning only records from remote peers that match at least `minMatchScore` queries. - -### Pull-Based Discovery Flow - -``` -PHASE 1: REMOTE PEER PUBLISHES CONTENT - ┌─────────────────────────────────────────────────────────────┐ - │ Remote Peer: DHT.Provide(CID) │ - │ │ - │ 1. Remote peer publishes content │ - │ 2. DHT().Provide(CID) announces availability │ - │ 3. Provider announcement propagates to all peers │ - └─────────────────────┬───────────────────────────────────────┘ - │ - ▼ -PHASE 2: LOCAL PEER DISCOVERS AND CACHES - ┌─────────────────────────────────────────────────────────────┐ - │ handleCIDProviderNotification() │ - │ (routing_remote.go) │ - │ │ - │ 1. Receive: CID provider notification │ - │ 2. Check: hasRemoteRecordCached() → false (new record) │ - │ 3. Pull: service.Pull(ctx, peerID, recordRef) │ - │ └─ RPC call to remote peer │ - │ 4. Extract: GetLabels(record) │ - │ └─ Parse skills, domains, modules from content │ - │ 5. Cache: Enhanced keys locally │ - │ ├─ "/skills/AI/CID123/RemotePeer" → LabelMetadata │ - │ ├─ "/domains/research/CID123/RemotePeer" → LabelMetadata│ - │ └─ "/modules/runtime/CID123/RemotePeer" → LabelMetadata│ - └─────────────────────┬───────────────────────────────────────┘ - │ - ▼ -PHASE 3: USER SEARCHES FOR REMOTE RECORDS - ┌─────────────────────────────────────────────────────────────┐ - │ SEARCH REQUEST │ - │ (gRPC Controller) │ - │ + RecordQuery[] (skills/domains/modules) │ - │ + MinMatchScore (OR logic threshold) │ - └─────────────────────┬───────────────────────────────────────┘ - │ - ▼ - ┌─────────────────────────────────────────────────────────────────────────────┐ - │ LOCAL KV STORAGE │ - │ (Cached Remote Labels) │ - │ │ - │ STEP 1: Query Cached Remote Labels (Pull-Based Discovery Results) │ - │ ├─ READ: dstore.Query("/skills/") [READ: KV] │ - │ │ └─ Find: "/skills/AI/CID123/RemotePeer1" (cached via pull) │ - │ ├─ READ: dstore.Query("/domains/") [READ: KV] │ - │ │ └─ Find: "/domains/research/CID123/RemotePeer1" (cached via pull) │ - │ └─ READ: dstore.Query("/modules/") [READ: KV] │ - │ └─ Find: "/modules/runtime/CID123/RemotePeer1" (cached via pull) │ - │ │ - │ STEP 2: Filter for REMOTE Records Only │ - │ ├─ ParseEnhancedLabelKey(key) → (label, cid, peerID) │ - │ ├─ if peerID == localPeerID: continue (skip local) │ - │ └─ ✅ Only process records from remote peers │ - │ │ - │ STEP 3: Apply OR Logic with Minimum Threshold │ - │ ├─ calculateMatchScore(cid, queries, peerID): │ - │ │ ├─ For each query: check if it matches ANY label (OR logic) │ - │ │ ├─ Count matching queries → score │ - │ │ └─ Return: (matchingQueries[], score) │ - │ ├─ if score >= minMatchScore: include result ✅ │ - │ │ └─ Records returned if they match ≥N queries (OR relationship) │ - │ ├─ Apply deduplicateQueries() for consistent scoring │ - │ └─ Apply limit and duplicate CID filtering │ - │ │ - │ STEP 4: Return SearchResponse with Match Details │ - │ └─ {RecordRef: CID, Peer: RemotePeer, MatchQueries: [...], MatchScore: N} │ - │ │ - │ ✅ Uses cached labels from pull-based discovery │ - │ ✅ Fresh data (labels extracted directly from content) │ - │ ❌ NO DHT label queries - Uses local cache only │ - └─────────────────────────────────────────────────────────────────────────────┘ -``` - -### Storage Operations - -**Pull-Based Label Discovery (Background Process):** -- `RPC`: `service.Pull(ctx, remotePeerID, recordRef)` - Fetch content from remote peer -- `EXTRACT`: `GetLabels(record)` - Extract skills/domains/modules from content -- `CACHE`: Store enhanced keys locally for fast search - -**Search Query Execution (User Request):** - -**OCI Storage (Object Storage):** -- ❌ **NO ACCESS** - Search uses cached labels, not record content - -**Local KV Storage (Routing Datastore):** -- `READ`: `"/skills/*"` - Query cached remote skill labels (via pull-based discovery) -- `READ`: `"/domains/*"` - Query cached remote domain labels (via pull-based discovery) -- `READ`: `"/modules/*"` - Query cached remote module labels (via pull-based discovery) -- **Filter**: Only process keys where `peerID != localPeerID` - -**DHT Storage (Distributed Network):** -- ❌ **NO DIRECT ACCESS** - Search uses locally cached data from pull-based discovery - -**RPC Layer (Pull-Based Discovery):** -- `service.Pull(remotePeerID, recordRef)` - On-demand content fetching for new providers -- `service.Lookup(remotePeerID, recordRef)` - Metadata validation for announced content - -### Search vs List Comparison - -| Aspect | **List** | **Search** | -|--------|----------|------------| -| **Scope** | Local records only | Remote records only | -| **Data Source** | `/records/` index | Cached remote labels (pull-based) | -| **Filtering** | `peerID == localPeerID` | `peerID != localPeerID` | -| **Query Logic** | ✅ AND relationship (all must match) | ✅ OR relationship with minMatchScore threshold | -| **Discovery Method** | Direct local storage | Pull-based caching from DHT provider events | -| **Network Access** | ❌ None | ✅ RPC content pulling (background) | -| **Scalability** | Single peer | Hundreds of peers via pull-based discovery | -| **Response Type** | `ListResponse` | `SearchResponse` | -| **Additional Fields** | Labels only | + Peer info, match score, matching queries | -| **Content Freshness** | Always current | Fresh via on-demand content pulling | - -### Performance Characteristics - -**Pull-Based Discovery Performance:** -``` -BACKGROUND LABEL CACHING (per new CID provider announcement): -├─ RPC: 1 content pull from remote peer ✅ (only for new records) -├─ Local Processing: Label extraction from content ✅ -├─ Local KV: N writes (N = number of labels) ✅ -└─ Result: Fresh labels cached locally ✅ - -SEARCH EXECUTION (per user query): -├─ Local KV: 3+ reads (cached remote labels) ✅ -├─ Query deduplication and OR logic processing ✅ -├─ No network access needed ✅ (uses cache) -└─ Result: Fast search with fresh data ✅ -``` - -**Key Optimizations:** -1. **Scalable Caching**: Pull-based discovery works with hundreds of peers -2. **Fresh Content**: Labels extracted directly from source content -3. **Efficient Search**: Query cached labels, no real-time network access -4. **Content Validation**: RPC calls validate remote peer availability -5. **Background Processing**: Label discovery doesn't block user queries -6. **Query Deduplication**: Server-side defense against client bugs -7. **OR Logic Scoring**: Flexible matching with minimum threshold - -**Read Pattern**: -- **Discovery**: `O(1)` RPC call per new remote record -- **Search**: `O(4×M)` KV reads where M = number of cached remote labels (skills, domains, modules, locators) - -### OR Logic with Minimum Threshold - -**Core Concept:** -The Search API uses **OR logic** where records are returned if they match **at least N queries** (where N = `minMatchScore`). This provides flexible, scored matching for complex search scenarios. - -**Match Scoring Algorithm:** -```go -score := 0 -for each query in searchQueries { - if QueryMatchesLabels(query, recordLabels) { - score++ // OR logic: any match increments score - } -} -return score >= minMatchScore // Threshold filtering -``` - -**Production Safety:** -- **Default Behavior**: `minMatchScore = 0` defaults to `1` per proto specification -- **Empty Queries**: Rejected with helpful error (prevents expensive full scans) -- **Query Deduplication**: Server-side deduplication ensures consistent scoring - -### Query Types and Matching - -**Supported Query Types:** -1. **SKILL** (`RECORD_QUERY_TYPE_SKILL`) -2. **LOCATOR** (`RECORD_QUERY_TYPE_LOCATOR`) -3. **DOMAIN** (`RECORD_QUERY_TYPE_DOMAIN`) -4. **MODULE** (`RECORD_QUERY_TYPE_MODULE`) - -**Matching Rules:** - -**Skills & Domains & Modules (Hierarchical Matching):** -``` -Query: "AI" matches: -✅ /skills/AI (exact match) -✅ /skills/AI/ML (prefix match) -✅ /skills/AI/NLP/ChatBot (prefix match) -❌ /skills/Machine Learning (no match) -``` - -**Locators (Exact Matching Only):** -``` -Query: "docker-image" matches: -✅ /locators/docker-image (exact match only) -❌ /locators/docker-image/latest (no prefix matching) -``` - -### OR Logic Examples - -**Example 1: Flexible Matching** -```bash -# Query: Find records with AI OR Python skills, need at least 1 match -dirctl routing search --skill "AI" --skill "Python" --min-score 1 - -# Results: -# Record A: [AI] → Score: 1/2 → ✅ Returned (≥ minScore=1) -# Record B: [Python] → Score: 1/2 → ✅ Returned (≥ minScore=1) -# Record C: [AI, Python] → Score: 2/2 → ✅ Returned (≥ minScore=1) -# Record D: [Java] → Score: 0/2 → ❌ Filtered out (< minScore=1) -``` - -**Example 2: Strict Matching** -```bash -# Query: Find records with BOTH AI AND Python skills -dirctl routing search --skill "AI" --skill "Python" --min-score 2 - -# Results: -# Record A: [AI] → Score: 1/2 → ❌ Filtered out (< minScore=2) -# Record B: [Python] → Score: 1/2 → ❌ Filtered out (< minScore=2) -# Record C: [AI, Python] → Score: 2/2 → ✅ Returned (≥ minScore=2) -``` - -**Example 3: Mixed Query Types** -```bash -# Query: Multi-type search with threshold -dirctl routing search \ - --skill "AI" \ - --domain "research" \ - --module "runtime/python" \ - --min-score 2 - -# Results: -# Record A: [skills/AI, domains/research] → Score: 2/3 → ✅ Returned -# Record B: [skills/AI] → Score: 1/3 → ❌ Filtered out -# Record C: [domains/research, modules/runtime/python] → Score: 2/3 → ✅ Returned -``` - -### Pull-Based Discovery Benefits - -**Scalability:** -- **Not limited by DHT k-closest peers** (typically ~20) -- **Provider announcements reach all peers** via DHT.Provide() -- **On-demand content pulling** scales to hundreds of peers - -**Reliability:** -- **Uses working DHT components** (provider system, not broken label propagation) -- **Direct content fetching** bypasses DHT propagation issues -- **Fresh labels** always match actual content - -**Performance:** -- **Background caching** doesn't block user queries -- **Local cache queries** are fast (no network access during search) -- **Automatic cache management** via background tasks - ---- - -## Pull-Based Architecture Summary - -### Key Architectural Changes - -**Previous Architecture (Removed):** -- ❌ DHT.PutValue() for individual label announcements -- ❌ handleLabelNotification() event system -- ❌ Complex announcement type routing (CID vs Label) -- ❌ Limited by DHT k-closest peer constraints (~20 peers) - -**New Pull-Based Architecture:** -- ✅ DHT.Provide() for CID provider announcements only -- ✅ handleCIDProviderNotification() with content pulling -- ✅ Unified announcement handling (all are CID provider events) -- ✅ Scalable to hundreds of peers via RPC content fetching - -### Production Benefits - -**Scalability:** -- **Large Networks**: Not constrained by DHT k-closest limitations -- **Efficient Discovery**: Provider announcements reach all peers reliably -- **On-Demand Fetching**: Only pull content when discovery happens - -**Reliability:** -- **Proven Components**: Uses working DHT provider system -- **Fresh Data**: Labels extracted directly from content source -- **Self-Healing**: Failed pulls don't break the system - -**Performance:** -- **Fast Queries**: Local cache provides sub-millisecond search -- **Background Processing**: Label discovery doesn't block user operations -- **Automatic Maintenance**: Background republishing and cleanup - -**API Robustness:** -- **Query Deduplication**: Server defends against client bugs -- **Production Safety**: Proper defaults and validation -- **Complete Query Support**: Skills, locators, domains, modules all supported -- **OR Logic**: Flexible matching with minimum threshold control - -### Migration Notes - -**No Breaking Changes:** -- **API Interface**: Search/List APIs unchanged for existing clients -- **Enhanced Key Format**: Unchanged, maintains compatibility -- **Background Tasks**: Adapted for provider republishing, not removed - -**Improved Behavior:** -- **More Reliable**: Pull-based discovery vs unreliable label propagation -- **Better Scaling**: Hundreds of peers vs ~20 peer DHT limitation -- **Fresher Data**: Labels from content vs potentially stale DHT cache -- **OR Logic**: Proto-compliant search behavior with flexible matching +# Routing System Documentation + +This document provides comprehensive documentation for the routing system, including architecture, operations, and storage interactions. + +## Summary + +The routing system manages record discovery and announcement across both local storage and distributed networks using a **pull-based architecture** designed for scalability to hundreds of peers. It provides three main operations: + +- **Publish**: Announces CID availability to DHT network, triggering pull-based label discovery +- **List**: Efficiently queries local records with optional filtering (local-only) +- **Search**: Discovers remote records using OR logic with minimum threshold matching + +The system uses a **pull-based discovery architecture**: +- **OCI Storage**: Immutable record content (container images/artifacts) +- **Local KV Storage**: Fast indexing and cached remote labels (BadgerDB/In-memory) +- **DHT Storage**: Content provider announcements only (libp2p DHT) +- **RPC Layer**: On-demand content fetching for label extraction + +**Key Architectural Benefits:** +- **Scalable**: Works with hundreds of peers (not limited by DHT k-closest constraints) +- **Reliable**: Uses proven DHT provider system instead of unreliable label propagation +- **Fresh**: Labels extracted directly from content, preventing drift +- **Efficient**: Local caching for fast queries, background maintenance for staleness + +--- + +## Constants + +### Import + +```go +import "github.com/agntcy/dir/server/routing" +``` + +### Timing Constants + +```go +// DHT Record TTL (48 hours) +routing.DHTRecordTTL + +// Label Republishing Interval (36 hours) +routing.LabelRepublishInterval + +// Remote Label Cleanup Interval (48 hours) +routing.RemoteLabelCleanupInterval + +// Provider Record TTL (48 hours) +routing.ProviderRecordTTL + +// DHT Refresh Interval (30 seconds) +routing.RefreshInterval +``` + +### Protocol Constants + +```go +// Protocol prefix for DHT +routing.ProtocolPrefix // "dir" + +// Rendezvous string for peer discovery +routing.ProtocolRendezvous // "dir/connect" +``` + +### Validation Constants + +```go +// Maximum hops for distributed queries +routing.MaxHops // 20 + +// Notification channel buffer size +routing.NotificationChannelSize // 1000 + +// Minimum parts required in enhanced label keys (after string split) +routing.MinLabelKeyParts // 5 + +// Default minimum match score for OR logic (proto-compliant) +routing.DefaultMinMatchScore // 1 +``` + +### Usage Examples + +```go +// Cleanup task using consistent interval +ticker := time.NewTicker(routing.RemoteLabelCleanupInterval) +defer ticker.Stop() + +// DHT configuration with consistent TTL +dht, err := dht.New(ctx, host, + dht.MaxRecordAge(routing.DHTRecordTTL), + dht.ProtocolPrefix(protocol.ID(routing.ProtocolPrefix)), +) + +// Validate enhanced label key format +parts := strings.Split(labelKey, "/") +if len(parts) < routing.MinLabelKeyParts { + return errors.New("invalid enhanced key format: expected ////") +} +``` + +--- + +## Enhanced Key Format + +The routing system uses a self-descriptive key format that embeds all essential information directly in the key structure. + +### Key Structure + +**Format**: `////` + +**Examples**: +``` +/skills/AI/Machine Learning/baeabc123.../12D3KooWExample... +/domains/technology/web/baedef456.../12D3KooWOther... +/modules/search/semantic/baeghi789.../12D3KooWAnother... +``` + +### Benefits + +1. **📖 Self-Documenting**: Keys tell the complete story at a glance +2. **⚡ Efficient Filtering**: PeerID extraction without JSON parsing +3. **🧹 Cleaner Storage**: Minimal JSON metadata (only timestamps) +4. **🔍 Better Debugging**: Database inspection shows relationships immediately +5. **🎯 Consistent**: Same format used in local storage and DHT network + +### Utility Functions + +```go +// Build enhanced keys +key := BuildEnhancedLabelKey("/skills/AI", "CID123", "Peer1") +// → "/skills/AI/CID123/Peer1" + +// Parse enhanced keys +label, cid, peerID, err := ParseEnhancedLabelKey(key) +// → ("/skills/AI", "CID123", "Peer1", nil) + +// Extract components +peerID := ExtractPeerIDFromKey(key) // → "Peer1" +cid := ExtractCIDFromKey(key) // → "CID123" +isLocal := IsLocalKey(key, "Peer1") // → true +``` + +### Storage Examples + +**Local Storage**: +``` +/records/CID123 → (empty) # Local record index +/skills/AI/ML/CID123/Peer1 → {"timestamp": "..."} # Enhanced label metadata +/domains/tech/CID123/Peer1 → {"timestamp": "..."} # Enhanced domain metadata +``` + +**DHT Network**: +``` +/skills/AI/ML/CID123/Peer1 → "CID123" # Enhanced network announcement +/domains/tech/CID123/Peer1 → "CID123" # Enhanced domain announcement +``` + +--- + +## Publish + +The Publish operation announces records for discovery by storing metadata in both local storage and the distributed DHT network. + +### Flow Diagram + +``` + ┌─────────────────────────────────────────────────────────────┐ + │ PUBLISH REQUEST │ + │ (gRPC Controller) │ + └─────────────────────┬───────────────────────────────────────┘ + │ + ▼ + ┌─────────────────────────────────────────────────────────────┐ + │ controller.Publish() │ + │ │ + │ 1. getRecord() - Validates RecordRef │ + │ ├─ store.Lookup(ctx, ref) [READ: OCI Storage] │ + │ └─ store.Pull(ctx, ref) [READ: OCI Storage] │ + │ │ + │ 2. routing.Publish(ctx, ref, record) │ + └─────────────────────┬───────────────────────────────────────┘ + │ + ▼ + ┌─────────────────────────────────────────────────────────────┐ + │ routing.Publish() │ + │ (Main Router) │ + │ │ + │ 1. local.Publish(ctx, ref, record) │ + │ 2. if hasPeersInRoutingTable(): │ + │ remote.Publish(ctx, ref, record) │ + └─────────┬─────────────────────┬─────────────────────────────┘ + │ │ + ┌─────────▼─────────────┐ │ + │ LOCAL PUBLISH │ │ + │ (routing_local.go) │ │ + └─────────┬─────────────┘ │ + │ │ + ▼ │ + ┌─────────────────────────────────────────────┐ │ + │ LOCAL KV STORAGE │ │ + │ (Routing Datastore) │ │ + │ │ │ + │ 1. loadMetrics() [READ: KV] │ │ + │ 2. dstore.Has(recordKey) [READ: KV] │ │ + │ 3. batch.Put(recordKey) [WRITE: KV] │ │ + │ └─ "/records/CID123" → (empty) │ │ + │ 4. For each label: [WRITE: KV] │ │ + │ └─ "/skills/AI/CID123/Peer1" → LabelMetadata │ │ + │ 5. metrics.update() [WRITE: KV] │ │ + │ └─ "/metrics" → JSON │ │ + │ 6. batch.Commit() [COMMIT: KV] │ │ + └─────────────────────────────────────────────┘ │ + │ + ┌──────────────────────▼──────────────────────┐ + │ REMOTE PUBLISH │ + │ (routing_remote.go) │ + └──────────────────────┬──────────────────────┘ + │ + ▼ + ┌─────────────────────────────────────────────┐ + │ DHT STORAGE │ + │ (Distributed Network) │ + │ │ + │ 1. DHT().Provide(CID) [WRITE: DHT] │ + │ └─ Announce CID to network │ + │ └─ Triggers pull-based label discovery │ + │ │ + │ ❌ REMOVED: Individual label announcements │ + │ No more DHT.PutValue() for labels │ + │ Labels discovered via content pulling │ + └─────────────────────────────────────────────┘ +``` + +### Storage Operations + +**OCI Storage (Object Storage):** +- `READ`: `store.Lookup(RecordRef)` - Verify record exists +- `READ`: `store.Pull(RecordRef)` - Get full record content + +**Local KV Storage (Routing Datastore):** +- `READ`: `loadMetrics("/metrics")` - Get current metrics +- `READ`: `dstore.Has("/records/CID123")` - Check if already published +- `WRITE`: `"/records/CID123" → (empty)` - Mark as local record +- `WRITE`: `"/skills/AI/ML/CID123/Peer1" → LabelMetadata` - Store enhanced label metadata +- `WRITE`: `"/domains/tech/CID123/Peer1" → LabelMetadata` - Store enhanced domain metadata +- `WRITE`: `"/modules/search/CID123/Peer1" → LabelMetadata` - Store enhanced module metadata +- `WRITE`: `"/metrics" → JSON` - Update metrics + +**DHT Storage (Distributed Network):** +- `WRITE`: `DHT().Provide(CID123)` - Announce CID provider to network +- ❌ **REMOVED**: Individual label announcements via `DHT.PutValue()` +- **Pull-Based Discovery**: Remote peers discover labels by pulling content directly + +**Remote Peer Pull-Based Flow (Triggered by CID Provider Announcements):** +- `TRIGGER`: DHT provider notification received +- `RPC`: `service.Pull(ctx, peerID, recordRef)` - Fetch content from announcing peer +- `EXTRACT`: `GetLabels(record)` - Extract all labels from content +- `CACHE`: Store enhanced keys locally: `"/skills/AI/CID123/RemotePeerID" → LabelMetadata` + +--- + +## List + +The List operation efficiently queries local records with optional filtering. It's designed as a local-only operation that never accesses the network or OCI storage. + +### Flow Diagram + +``` + ┌─────────────────────────────────────────────────────────────┐ + │ LIST REQUEST │ + │ (gRPC Controller) │ + │ + RecordQuery[] (optional) │ + │ + Limit (optional) │ + └─────────────────────┬───────────────────────────────────────┘ + │ + ▼ + ┌─────────────────────────────────────────────────────────────┐ + │ controller.List() │ + │ │ + │ 1. routing.List(ctx, req) │ + │ 2. Stream ListResponse items to client │ + │ └─ NO OCI Storage access needed! │ + └─────────────────────┬───────────────────────────────────────┘ + │ + ▼ + ┌─────────────────────────────────────────────────────────────┐ + │ routing.List() │ + │ (Main Router) │ + │ │ + │ ✅ Always local-only operation │ + │ return local.List(ctx, req) │ + │ │ + │ ❌ NO remote.List() - Network not involved │ + └─────────────────────┬───────────────────────────────────────┘ + │ + ▼ + ┌─────────────────────────────────────────────────────────────┐ + │ LOCAL LIST ONLY │ + │ (routing_local.go) │ + └─────────────────────┬───────────────────────────────────────┘ + │ + ▼ + ┌─────────────────────────────────────────────────────────────────────────────┐ + │ LOCAL KV STORAGE │ + │ (Routing Datastore) │ + │ │ + │ STEP 1: Get Local Record CIDs │ + │ ├─ READ: dstore.Query("/records/") [READ: KV] │ + │ │ └─ Returns: "/records/CID123", "/records/CID456", ... │ + │ │ └─ ✅ Pre-filtered: Only LOCAL records │ + │ │ + │ STEP 2: For Each CID, Check Query Matching │ + │ ├─ matchesAllQueries(cid, queries): │ + │ │ │ │ + │ │ └─ getRecordLabelsEfficiently(cid): │ + │ │ ├─ READ: dstore.Query("/skills/") [READ: KV] │ + │ │ │ └─ Find: "/skills/AI/ML/CID123/Peer1" │ + │ │ │ └─ Extract: "/skills/AI/ML" │ + │ │ ├─ READ: dstore.Query("/domains/") [READ: KV] │ + │ │ │ └─ Find: "/domains/tech/CID123/Peer1" │ + │ │ │ └─ Extract: "/domains/tech" │ + │ │ └─ READ: dstore.Query("/modules/") [READ: KV] │ + │ │ └─ Find: "/modules/search/CID123/Peer1" │ + │ │ └─ Extract: "/modules/search" │ + │ │ │ + │ │ └─ queryMatchesLabels(query, labels): │ + │ │ └─ Check if ALL queries match labels (AND logic) │ + │ │ │ + │ └─ If matches: Return {RecordRef: CID123, Labels: [...]} │ + │ │ + │ ❌ NO OCI Storage access - Labels extracted from KV keys! │ + │ ❌ NO DHT Storage access - Local-only operation! │ + └─────────────────────────────────────────────────────────────────────────────┘ +``` + +### Storage Operations + +**OCI Storage (Object Storage):** +- ❌ **NO ACCESS** - List doesn't need record content! + +**Local KV Storage (Routing Datastore):** +- `READ`: `"/records/*"` - Get all local record CIDs +- `READ`: `"/skills/*"` - Extract skill labels for each CID +- `READ`: `"/domains/*"` - Extract domain labels for each CID +- `READ`: `"/modules/*"` - Extract module labels for each CID + +**DHT Storage (Distributed Network):** +- ❌ **NO ACCESS** - List is local-only operation! + +### Performance Characteristics + +**List vs Publish Storage Comparison:** +``` +PUBLISH: LIST: +├─ OCI: 2 reads (validate) ├─ OCI: 0 reads ✅ +├─ Local KV: 1 read + 5+ writes ├─ Local KV: 4+ reads only ✅ +└─ DHT: 0 reads + 4+ writes └─ DHT: 0 reads ✅ + +Result: List is much lighter! +``` + +**Key Optimizations:** +1. **No OCI Access**: Labels extracted from KV keys, not record content +2. **Local-Only**: No network/DHT interaction required +3. **Efficient Filtering**: Uses `/records/` index as starting point +4. **Key-Based Labels**: No expensive record parsing + +**Read Pattern**: `O(1 + 3×N)` KV reads where N = number of local records + +--- + +## Search + +The Search operation discovers remote records from other peers using **pull-based label caching** and **OR logic with minimum threshold**. It's designed for network-wide discovery at scale (hundreds of peers) and filters out local records, returning only records from remote peers that match at least `minMatchScore` queries. + +### Pull-Based Discovery Flow + +``` +PHASE 1: REMOTE PEER PUBLISHES CONTENT + ┌─────────────────────────────────────────────────────────────┐ + │ Remote Peer: DHT.Provide(CID) │ + │ │ + │ 1. Remote peer publishes content │ + │ 2. DHT().Provide(CID) announces availability │ + │ 3. Provider announcement propagates to all peers │ + └─────────────────────┬───────────────────────────────────────┘ + │ + ▼ +PHASE 2: LOCAL PEER DISCOVERS AND CACHES + ┌─────────────────────────────────────────────────────────────┐ + │ handleCIDProviderNotification() │ + │ (routing_remote.go) │ + │ │ + │ 1. Receive: CID provider notification │ + │ 2. Check: hasRemoteRecordCached() → false (new record) │ + │ 3. Pull: service.Pull(ctx, peerID, recordRef) │ + │ └─ RPC call to remote peer │ + │ 4. Extract: GetLabels(record) │ + │ └─ Parse skills, domains, modules from content │ + │ 5. Cache: Enhanced keys locally │ + │ ├─ "/skills/AI/CID123/RemotePeer" → LabelMetadata │ + │ ├─ "/domains/research/CID123/RemotePeer" → LabelMetadata│ + │ └─ "/modules/runtime/CID123/RemotePeer" → LabelMetadata│ + └─────────────────────┬───────────────────────────────────────┘ + │ + ▼ +PHASE 3: USER SEARCHES FOR REMOTE RECORDS + ┌─────────────────────────────────────────────────────────────┐ + │ SEARCH REQUEST │ + │ (gRPC Controller) │ + │ + RecordQuery[] (skills/domains/modules) │ + │ + MinMatchScore (OR logic threshold) │ + └─────────────────────┬───────────────────────────────────────┘ + │ + ▼ + ┌─────────────────────────────────────────────────────────────────────────────┐ + │ LOCAL KV STORAGE │ + │ (Cached Remote Labels) │ + │ │ + │ STEP 1: Query Cached Remote Labels (Pull-Based Discovery Results) │ + │ ├─ READ: dstore.Query("/skills/") [READ: KV] │ + │ │ └─ Find: "/skills/AI/CID123/RemotePeer1" (cached via pull) │ + │ ├─ READ: dstore.Query("/domains/") [READ: KV] │ + │ │ └─ Find: "/domains/research/CID123/RemotePeer1" (cached via pull) │ + │ └─ READ: dstore.Query("/modules/") [READ: KV] │ + │ └─ Find: "/modules/runtime/CID123/RemotePeer1" (cached via pull) │ + │ │ + │ STEP 2: Filter for REMOTE Records Only │ + │ ├─ ParseEnhancedLabelKey(key) → (label, cid, peerID) │ + │ ├─ if peerID == localPeerID: continue (skip local) │ + │ └─ ✅ Only process records from remote peers │ + │ │ + │ STEP 3: Apply OR Logic with Minimum Threshold │ + │ ├─ calculateMatchScore(cid, queries, peerID): │ + │ │ ├─ For each query: check if it matches ANY label (OR logic) │ + │ │ ├─ Count matching queries → score │ + │ │ └─ Return: (matchingQueries[], score) │ + │ ├─ if score >= minMatchScore: include result ✅ │ + │ │ └─ Records returned if they match ≥N queries (OR relationship) │ + │ ├─ Apply deduplicateQueries() for consistent scoring │ + │ └─ Apply limit and duplicate CID filtering │ + │ │ + │ STEP 4: Return SearchResponse with Match Details │ + │ └─ {RecordRef: CID, Peer: RemotePeer, MatchQueries: [...], MatchScore: N} │ + │ │ + │ ✅ Uses cached labels from pull-based discovery │ + │ ✅ Fresh data (labels extracted directly from content) │ + │ ❌ NO DHT label queries - Uses local cache only │ + └─────────────────────────────────────────────────────────────────────────────┘ +``` + +### Storage Operations + +**Pull-Based Label Discovery (Background Process):** +- `RPC`: `service.Pull(ctx, remotePeerID, recordRef)` - Fetch content from remote peer +- `EXTRACT`: `GetLabels(record)` - Extract skills/domains/modules from content +- `CACHE`: Store enhanced keys locally for fast search + +**Search Query Execution (User Request):** + +**OCI Storage (Object Storage):** +- ❌ **NO ACCESS** - Search uses cached labels, not record content + +**Local KV Storage (Routing Datastore):** +- `READ`: `"/skills/*"` - Query cached remote skill labels (via pull-based discovery) +- `READ`: `"/domains/*"` - Query cached remote domain labels (via pull-based discovery) +- `READ`: `"/modules/*"` - Query cached remote module labels (via pull-based discovery) +- **Filter**: Only process keys where `peerID != localPeerID` + +**DHT Storage (Distributed Network):** +- ❌ **NO DIRECT ACCESS** - Search uses locally cached data from pull-based discovery + +**RPC Layer (Pull-Based Discovery):** +- `service.Pull(remotePeerID, recordRef)` - On-demand content fetching for new providers +- `service.Lookup(remotePeerID, recordRef)` - Metadata validation for announced content + +### Search vs List Comparison + +| Aspect | **List** | **Search** | +|--------|----------|------------| +| **Scope** | Local records only | Remote records only | +| **Data Source** | `/records/` index | Cached remote labels (pull-based) | +| **Filtering** | `peerID == localPeerID` | `peerID != localPeerID` | +| **Query Logic** | ✅ AND relationship (all must match) | ✅ OR relationship with minMatchScore threshold | +| **Discovery Method** | Direct local storage | Pull-based caching from DHT provider events | +| **Network Access** | ❌ None | ✅ RPC content pulling (background) | +| **Scalability** | Single peer | Hundreds of peers via pull-based discovery | +| **Response Type** | `ListResponse` | `SearchResponse` | +| **Additional Fields** | Labels only | + Peer info, match score, matching queries | +| **Content Freshness** | Always current | Fresh via on-demand content pulling | + +### Performance Characteristics + +**Pull-Based Discovery Performance:** +``` +BACKGROUND LABEL CACHING (per new CID provider announcement): +├─ RPC: 1 content pull from remote peer ✅ (only for new records) +├─ Local Processing: Label extraction from content ✅ +├─ Local KV: N writes (N = number of labels) ✅ +└─ Result: Fresh labels cached locally ✅ + +SEARCH EXECUTION (per user query): +├─ Local KV: 3+ reads (cached remote labels) ✅ +├─ Query deduplication and OR logic processing ✅ +├─ No network access needed ✅ (uses cache) +└─ Result: Fast search with fresh data ✅ +``` + +**Key Optimizations:** +1. **Scalable Caching**: Pull-based discovery works with hundreds of peers +2. **Fresh Content**: Labels extracted directly from source content +3. **Efficient Search**: Query cached labels, no real-time network access +4. **Content Validation**: RPC calls validate remote peer availability +5. **Background Processing**: Label discovery doesn't block user queries +6. **Query Deduplication**: Server-side defense against client bugs +7. **OR Logic Scoring**: Flexible matching with minimum threshold + +**Read Pattern**: +- **Discovery**: `O(1)` RPC call per new remote record +- **Search**: `O(4×M)` KV reads where M = number of cached remote labels (skills, domains, modules, locators) + +### OR Logic with Minimum Threshold + +**Core Concept:** +The Search API uses **OR logic** where records are returned if they match **at least N queries** (where N = `minMatchScore`). This provides flexible, scored matching for complex search scenarios. + +**Match Scoring Algorithm:** +```go +score := 0 +for each query in searchQueries { + if QueryMatchesLabels(query, recordLabels) { + score++ // OR logic: any match increments score + } +} +return score >= minMatchScore // Threshold filtering +``` + +**Production Safety:** +- **Default Behavior**: `minMatchScore = 0` defaults to `1` per proto specification +- **Empty Queries**: Rejected with helpful error (prevents expensive full scans) +- **Query Deduplication**: Server-side deduplication ensures consistent scoring + +### Query Types and Matching + +**Supported Query Types:** +1. **SKILL** (`RECORD_QUERY_TYPE_SKILL`) +2. **LOCATOR** (`RECORD_QUERY_TYPE_LOCATOR`) +3. **DOMAIN** (`RECORD_QUERY_TYPE_DOMAIN`) +4. **MODULE** (`RECORD_QUERY_TYPE_MODULE`) + +**Matching Rules:** + +**Skills & Domains & Modules (Hierarchical Matching):** +``` +Query: "AI" matches: +✅ /skills/AI (exact match) +✅ /skills/AI/ML (prefix match) +✅ /skills/AI/NLP/ChatBot (prefix match) +❌ /skills/Machine Learning (no match) +``` + +**Locators (Exact Matching Only):** +``` +Query: "docker-image" matches: +✅ /locators/docker-image (exact match only) +❌ /locators/docker-image/latest (no prefix matching) +``` + +### OR Logic Examples + +**Example 1: Flexible Matching** +```bash +# Query: Find records with AI OR Python skills, need at least 1 match +dirctl routing search --skill "AI" --skill "Python" --min-score 1 + +# Results: +# Record A: [AI] → Score: 1/2 → ✅ Returned (≥ minScore=1) +# Record B: [Python] → Score: 1/2 → ✅ Returned (≥ minScore=1) +# Record C: [AI, Python] → Score: 2/2 → ✅ Returned (≥ minScore=1) +# Record D: [Java] → Score: 0/2 → ❌ Filtered out (< minScore=1) +``` + +**Example 2: Strict Matching** +```bash +# Query: Find records with BOTH AI AND Python skills +dirctl routing search --skill "AI" --skill "Python" --min-score 2 + +# Results: +# Record A: [AI] → Score: 1/2 → ❌ Filtered out (< minScore=2) +# Record B: [Python] → Score: 1/2 → ❌ Filtered out (< minScore=2) +# Record C: [AI, Python] → Score: 2/2 → ✅ Returned (≥ minScore=2) +``` + +**Example 3: Mixed Query Types** +```bash +# Query: Multi-type search with threshold +dirctl routing search \ + --skill "AI" \ + --domain "research" \ + --module "runtime/python" \ + --min-score 2 + +# Results: +# Record A: [skills/AI, domains/research] → Score: 2/3 → ✅ Returned +# Record B: [skills/AI] → Score: 1/3 → ❌ Filtered out +# Record C: [domains/research, modules/runtime/python] → Score: 2/3 → ✅ Returned +``` + +### Pull-Based Discovery Benefits + +**Scalability:** +- **Not limited by DHT k-closest peers** (typically ~20) +- **Provider announcements reach all peers** via DHT.Provide() +- **On-demand content pulling** scales to hundreds of peers + +**Reliability:** +- **Uses working DHT components** (provider system, not broken label propagation) +- **Direct content fetching** bypasses DHT propagation issues +- **Fresh labels** always match actual content + +**Performance:** +- **Background caching** doesn't block user queries +- **Local cache queries** are fast (no network access during search) +- **Automatic cache management** via background tasks + +--- + +## Pull-Based Architecture Summary + +### Key Architectural Changes + +**Previous Architecture (Removed):** +- ❌ DHT.PutValue() for individual label announcements +- ❌ handleLabelNotification() event system +- ❌ Complex announcement type routing (CID vs Label) +- ❌ Limited by DHT k-closest peer constraints (~20 peers) + +**New Pull-Based Architecture:** +- ✅ DHT.Provide() for CID provider announcements only +- ✅ handleCIDProviderNotification() with content pulling +- ✅ Unified announcement handling (all are CID provider events) +- ✅ Scalable to hundreds of peers via RPC content fetching + +### Production Benefits + +**Scalability:** +- **Large Networks**: Not constrained by DHT k-closest limitations +- **Efficient Discovery**: Provider announcements reach all peers reliably +- **On-Demand Fetching**: Only pull content when discovery happens + +**Reliability:** +- **Proven Components**: Uses working DHT provider system +- **Fresh Data**: Labels extracted directly from content source +- **Self-Healing**: Failed pulls don't break the system + +**Performance:** +- **Fast Queries**: Local cache provides sub-millisecond search +- **Background Processing**: Label discovery doesn't block user operations +- **Automatic Maintenance**: Background republishing and cleanup + +**API Robustness:** +- **Query Deduplication**: Server defends against client bugs +- **Production Safety**: Proper defaults and validation +- **Complete Query Support**: Skills, locators, domains, modules all supported +- **OR Logic**: Flexible matching with minimum threshold control + +### Migration Notes + +**No Breaking Changes:** +- **API Interface**: Search/List APIs unchanged for existing clients +- **Enhanced Key Format**: Unchanged, maintains compatibility +- **Background Tasks**: Adapted for provider republishing, not removed + +**Improved Behavior:** +- **More Reliable**: Pull-based discovery vs unreliable label propagation +- **Better Scaling**: Hundreds of peers vs ~20 peer DHT limitation +- **Fresher Data**: Labels from content vs potentially stale DHT cache +- **OR Logic**: Proto-compliant search behavior with flexible matching diff --git a/server/routing/cleanup_core_test.go b/server/routing/cleanup_core_test.go index 0a2eb7b34..97d27fd81 100644 --- a/server/routing/cleanup_core_test.go +++ b/server/routing/cleanup_core_test.go @@ -1,287 +1,287 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package routing - -import ( - "context" - "os" - "testing" - "time" - - "github.com/agntcy/dir/server/datastore" - "github.com/agntcy/dir/server/types" - ipfsdatastore "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/query" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// Test the core cleanup logic without complex server dependencies. -func TestCleanup_CoreLogic(t *testing.T) { - ctx := t.Context() - - dstore, cleanup := setupCleanupCoreTestDatastore(t) - defer cleanup() - - t.Run("cleanup_labels_for_specific_cid", func(t *testing.T) { - testCID := "test-cid-123" - localPeerID := testLocalPeerID - - // Setup test data: record + labels - recordKey := ipfsdatastore.NewKey("/records/" + testCID) - err := dstore.Put(ctx, recordKey, []byte{}) - require.NoError(t, err) - - // Add labels for this CID (local and remote) - testLabels := []struct { - label string - peerID string - shouldBeDeleted bool - }{ - {"/skills/AI", localPeerID, true}, // Local - should be deleted - {"/skills/ML", localPeerID, true}, // Local - should be deleted - {"/skills/AI", "remote-peer", false}, // Remote - should be kept - } - - for _, tl := range testLabels { - enhancedKey := BuildEnhancedLabelKey(types.Label(tl.label), testCID, tl.peerID) - err = dstore.Put(ctx, ipfsdatastore.NewKey(enhancedKey), []byte("metadata")) - require.NoError(t, err) - } - - // Test cleanup logic - success := simulateCleanupLabelsForCID(ctx, dstore, testCID, localPeerID) - assert.True(t, success) - - // Verify record key was deleted - exists, err := dstore.Has(ctx, recordKey) - require.NoError(t, err) - assert.False(t, exists) - - // Verify label cleanup - for _, tl := range testLabels { - enhancedKey := BuildEnhancedLabelKey(types.Label(tl.label), testCID, tl.peerID) - exists, err := dstore.Has(ctx, ipfsdatastore.NewKey(enhancedKey)) - require.NoError(t, err) - - if tl.shouldBeDeleted { - assert.False(t, exists, "Local label should be deleted") - } else { - assert.True(t, exists, "Remote label should be kept") - } - } - }) - - t.Run("stale_label_detection", func(t *testing.T) { - // Test the core logic of stale label detection - now := time.Now() - - testCases := []struct { - name string - metadata *types.LabelMetadata - isStale bool - }{ - { - name: "fresh_label", - metadata: &types.LabelMetadata{ - Timestamp: now.Add(-time.Hour), - LastSeen: now.Add(-time.Hour), - }, - isStale: false, - }, - { - name: "stale_label", - metadata: &types.LabelMetadata{ - Timestamp: now.Add(-MaxLabelAge - time.Hour), - LastSeen: now.Add(-MaxLabelAge - time.Hour), - }, - isStale: true, - }, - { - name: "borderline_fresh", - metadata: &types.LabelMetadata{ - Timestamp: now.Add(-MaxLabelAge + time.Minute), - LastSeen: now.Add(-MaxLabelAge + time.Minute), - }, - isStale: false, - }, - { - name: "borderline_stale", - metadata: &types.LabelMetadata{ - Timestamp: now.Add(-MaxLabelAge - time.Minute), - LastSeen: now.Add(-MaxLabelAge - time.Minute), - }, - isStale: true, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := tc.metadata.IsStale(MaxLabelAge) - assert.Equal(t, tc.isStale, result) - }) - } - }) - - t.Run("remote_label_filter_logic", func(t *testing.T) { - // Test the remote label filtering logic - localPeerID := testLocalPeerID - - testCases := []struct { - name string - key string - expected bool // true if should be included (is remote) - }{ - { - name: "remote_label", - key: "/skills/AI/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi/remote-peer", - expected: true, - }, - { - name: "local_label", - key: "/skills/AI/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi/" + testLocalPeerID, - expected: false, - }, - { - name: "malformed_key_treated_as_remote", - key: "/invalid-key", - expected: true, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Test the core filtering logic - keyPeerID := ExtractPeerIDFromKey(tc.key) - isRemote := (keyPeerID != localPeerID) || (keyPeerID == "") - assert.Equal(t, tc.expected, isRemote) - }) - } - }) - - t.Run("batch_deletion_efficiency", func(t *testing.T) { - // Test that batch operations work correctly - testCID := "batch-test-cid" - localPeerID := testLocalPeerID - - // Setup multiple labels to delete - labelsToDelete := []string{ - "/skills/AI", - "/skills/ML", - "/domains/tech", - "/modules/nlp", - } - - // Store record and labels - recordKey := ipfsdatastore.NewKey("/records/" + testCID) - err := dstore.Put(ctx, recordKey, []byte{}) - require.NoError(t, err) - - for _, label := range labelsToDelete { - enhancedKey := BuildEnhancedLabelKey(types.Label(label), testCID, localPeerID) - err = dstore.Put(ctx, ipfsdatastore.NewKey(enhancedKey), []byte("metadata")) - require.NoError(t, err) - } - - // Count keys before cleanup - allResults, err := dstore.Query(ctx, query.Query{}) - require.NoError(t, err) - - var keysBefore []string - for result := range allResults.Next() { - keysBefore = append(keysBefore, result.Key) - } - - allResults.Close() - - // Run cleanup - success := simulateCleanupLabelsForCID(ctx, dstore, testCID, localPeerID) - assert.True(t, success) - - // Count keys after cleanup - allResults, err = dstore.Query(ctx, query.Query{}) - require.NoError(t, err) - - var keysAfter []string - for result := range allResults.Next() { - keysAfter = append(keysAfter, result.Key) - } - - allResults.Close() - - // Should have deleted record + all labels (5 keys total) - expectedDeleted := 1 + len(labelsToDelete) // 1 record + 4 labels - actualDeleted := len(keysBefore) - len(keysAfter) - assert.Equal(t, expectedDeleted, actualDeleted) - }) -} - -// Simplified cleanup logic for testing (without server dependency). -func simulateCleanupLabelsForCID(ctx context.Context, dstore types.Datastore, cid string, localPeerID string) bool { - batch, err := dstore.Batch(ctx) - if err != nil { - return false - } - - keysDeleted := 0 - - // Remove the /records/ key - recordKey := ipfsdatastore.NewKey("/records/" + cid) - if err := batch.Delete(ctx, recordKey); err == nil { - keysDeleted++ - } - - // Find and remove all label keys for this CID using shared namespace iteration - entries, err := QueryAllNamespaces(ctx, dstore) - if err != nil { - return false - } - - for _, entry := range entries { - // Parse enhanced key - _, keyCID, keyPeerID, err := ParseEnhancedLabelKey(entry.Key) - if err != nil { - // Delete malformed keys - if err := batch.Delete(ctx, ipfsdatastore.NewKey(entry.Key)); err == nil { - keysDeleted++ - } - - continue - } - - // Check if this key matches our CID and is from local peer - if keyCID == cid && keyPeerID == localPeerID { - labelKey := ipfsdatastore.NewKey(entry.Key) - if err := batch.Delete(ctx, labelKey); err == nil { - keysDeleted++ - } - } - } - - // Commit the batch deletion - if err := batch.Commit(ctx); err != nil { - return false - } - - return keysDeleted > 0 -} - -// Helper function for cleanup testing. -func setupCleanupCoreTestDatastore(t *testing.T) (types.Datastore, func()) { - t.Helper() - - dsOpts := []datastore.Option{ - datastore.WithFsProvider("/tmp/test-cleanup-core-" + t.Name()), - } - - dstore, err := datastore.New(dsOpts...) - require.NoError(t, err) - - cleanup := func() { - _ = dstore.Close() - _ = os.RemoveAll("/tmp/test-cleanup-core-" + t.Name()) - } - - return dstore, cleanup -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package routing + +import ( + "context" + "os" + "testing" + "time" + + "github.com/agntcy/dir/server/datastore" + "github.com/agntcy/dir/server/types" + ipfsdatastore "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Test the core cleanup logic without complex server dependencies. +func TestCleanup_CoreLogic(t *testing.T) { + ctx := t.Context() + + dstore, cleanup := setupCleanupCoreTestDatastore(t) + defer cleanup() + + t.Run("cleanup_labels_for_specific_cid", func(t *testing.T) { + testCID := "test-cid-123" + localPeerID := testLocalPeerID + + // Setup test data: record + labels + recordKey := ipfsdatastore.NewKey("/records/" + testCID) + err := dstore.Put(ctx, recordKey, []byte{}) + require.NoError(t, err) + + // Add labels for this CID (local and remote) + testLabels := []struct { + label string + peerID string + shouldBeDeleted bool + }{ + {"/skills/AI", localPeerID, true}, // Local - should be deleted + {"/skills/ML", localPeerID, true}, // Local - should be deleted + {"/skills/AI", "remote-peer", false}, // Remote - should be kept + } + + for _, tl := range testLabels { + enhancedKey := BuildEnhancedLabelKey(types.Label(tl.label), testCID, tl.peerID) + err = dstore.Put(ctx, ipfsdatastore.NewKey(enhancedKey), []byte("metadata")) + require.NoError(t, err) + } + + // Test cleanup logic + success := simulateCleanupLabelsForCID(ctx, dstore, testCID, localPeerID) + assert.True(t, success) + + // Verify record key was deleted + exists, err := dstore.Has(ctx, recordKey) + require.NoError(t, err) + assert.False(t, exists) + + // Verify label cleanup + for _, tl := range testLabels { + enhancedKey := BuildEnhancedLabelKey(types.Label(tl.label), testCID, tl.peerID) + exists, err := dstore.Has(ctx, ipfsdatastore.NewKey(enhancedKey)) + require.NoError(t, err) + + if tl.shouldBeDeleted { + assert.False(t, exists, "Local label should be deleted") + } else { + assert.True(t, exists, "Remote label should be kept") + } + } + }) + + t.Run("stale_label_detection", func(t *testing.T) { + // Test the core logic of stale label detection + now := time.Now() + + testCases := []struct { + name string + metadata *types.LabelMetadata + isStale bool + }{ + { + name: "fresh_label", + metadata: &types.LabelMetadata{ + Timestamp: now.Add(-time.Hour), + LastSeen: now.Add(-time.Hour), + }, + isStale: false, + }, + { + name: "stale_label", + metadata: &types.LabelMetadata{ + Timestamp: now.Add(-MaxLabelAge - time.Hour), + LastSeen: now.Add(-MaxLabelAge - time.Hour), + }, + isStale: true, + }, + { + name: "borderline_fresh", + metadata: &types.LabelMetadata{ + Timestamp: now.Add(-MaxLabelAge + time.Minute), + LastSeen: now.Add(-MaxLabelAge + time.Minute), + }, + isStale: false, + }, + { + name: "borderline_stale", + metadata: &types.LabelMetadata{ + Timestamp: now.Add(-MaxLabelAge - time.Minute), + LastSeen: now.Add(-MaxLabelAge - time.Minute), + }, + isStale: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := tc.metadata.IsStale(MaxLabelAge) + assert.Equal(t, tc.isStale, result) + }) + } + }) + + t.Run("remote_label_filter_logic", func(t *testing.T) { + // Test the remote label filtering logic + localPeerID := testLocalPeerID + + testCases := []struct { + name string + key string + expected bool // true if should be included (is remote) + }{ + { + name: "remote_label", + key: "/skills/AI/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi/remote-peer", + expected: true, + }, + { + name: "local_label", + key: "/skills/AI/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi/" + testLocalPeerID, + expected: false, + }, + { + name: "malformed_key_treated_as_remote", + key: "/invalid-key", + expected: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Test the core filtering logic + keyPeerID := ExtractPeerIDFromKey(tc.key) + isRemote := (keyPeerID != localPeerID) || (keyPeerID == "") + assert.Equal(t, tc.expected, isRemote) + }) + } + }) + + t.Run("batch_deletion_efficiency", func(t *testing.T) { + // Test that batch operations work correctly + testCID := "batch-test-cid" + localPeerID := testLocalPeerID + + // Setup multiple labels to delete + labelsToDelete := []string{ + "/skills/AI", + "/skills/ML", + "/domains/tech", + "/modules/nlp", + } + + // Store record and labels + recordKey := ipfsdatastore.NewKey("/records/" + testCID) + err := dstore.Put(ctx, recordKey, []byte{}) + require.NoError(t, err) + + for _, label := range labelsToDelete { + enhancedKey := BuildEnhancedLabelKey(types.Label(label), testCID, localPeerID) + err = dstore.Put(ctx, ipfsdatastore.NewKey(enhancedKey), []byte("metadata")) + require.NoError(t, err) + } + + // Count keys before cleanup + allResults, err := dstore.Query(ctx, query.Query{}) + require.NoError(t, err) + + var keysBefore []string + for result := range allResults.Next() { + keysBefore = append(keysBefore, result.Key) + } + + allResults.Close() + + // Run cleanup + success := simulateCleanupLabelsForCID(ctx, dstore, testCID, localPeerID) + assert.True(t, success) + + // Count keys after cleanup + allResults, err = dstore.Query(ctx, query.Query{}) + require.NoError(t, err) + + var keysAfter []string + for result := range allResults.Next() { + keysAfter = append(keysAfter, result.Key) + } + + allResults.Close() + + // Should have deleted record + all labels (5 keys total) + expectedDeleted := 1 + len(labelsToDelete) // 1 record + 4 labels + actualDeleted := len(keysBefore) - len(keysAfter) + assert.Equal(t, expectedDeleted, actualDeleted) + }) +} + +// Simplified cleanup logic for testing (without server dependency). +func simulateCleanupLabelsForCID(ctx context.Context, dstore types.Datastore, cid string, localPeerID string) bool { + batch, err := dstore.Batch(ctx) + if err != nil { + return false + } + + keysDeleted := 0 + + // Remove the /records/ key + recordKey := ipfsdatastore.NewKey("/records/" + cid) + if err := batch.Delete(ctx, recordKey); err == nil { + keysDeleted++ + } + + // Find and remove all label keys for this CID using shared namespace iteration + entries, err := QueryAllNamespaces(ctx, dstore) + if err != nil { + return false + } + + for _, entry := range entries { + // Parse enhanced key + _, keyCID, keyPeerID, err := ParseEnhancedLabelKey(entry.Key) + if err != nil { + // Delete malformed keys + if err := batch.Delete(ctx, ipfsdatastore.NewKey(entry.Key)); err == nil { + keysDeleted++ + } + + continue + } + + // Check if this key matches our CID and is from local peer + if keyCID == cid && keyPeerID == localPeerID { + labelKey := ipfsdatastore.NewKey(entry.Key) + if err := batch.Delete(ctx, labelKey); err == nil { + keysDeleted++ + } + } + } + + // Commit the batch deletion + if err := batch.Commit(ctx); err != nil { + return false + } + + return keysDeleted > 0 +} + +// Helper function for cleanup testing. +func setupCleanupCoreTestDatastore(t *testing.T) (types.Datastore, func()) { + t.Helper() + + dsOpts := []datastore.Option{ + datastore.WithFsProvider("/tmp/test-cleanup-core-" + t.Name()), + } + + dstore, err := datastore.New(dsOpts...) + require.NoError(t, err) + + cleanup := func() { + _ = dstore.Close() + _ = os.RemoveAll("/tmp/test-cleanup-core-" + t.Name()) + } + + return dstore, cleanup +} diff --git a/server/routing/cleanup_tasks.go b/server/routing/cleanup_tasks.go index 921a91a23..daab4c460 100644 --- a/server/routing/cleanup_tasks.go +++ b/server/routing/cleanup_tasks.go @@ -1,428 +1,428 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package routing - -import ( - "context" - "encoding/json" - "fmt" - "path" - "sync" - "time" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/server/routing/internal/p2p" - "github.com/agntcy/dir/server/routing/pubsub" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/server/types/adapters" - "github.com/agntcy/dir/utils/logging" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/query" -) - -var cleanupLogger = logging.Logger("routing/cleanup") - -// remoteLabelFilter identifies remote labels by checking if they lack a corresponding local record. -// Remote labels are those that don't have a matching "/records/CID" key in the datastore. -// -//nolint:containedctx -type remoteLabelFilter struct { - dstore types.Datastore - ctx context.Context - localPeerID string -} - -func (f *remoteLabelFilter) Filter(e query.Entry) bool { - // With enhanced keys, we can check PeerID directly from the key - // Key format: /skills/AI/CID123/Peer1 - keyPeerID := ExtractPeerIDFromKey(e.Key) - if keyPeerID == "" { - // Invalid key format, assume remote to be safe - return true - } - - // It's remote if the PeerID in the key is not our local peer - return keyPeerID != f.localPeerID -} - -// CleanupManager handles all background cleanup and republishing tasks for the routing system. -// This includes CID provider republishing, GossipSub label republishing, stale remote label cleanup, and orphaned record cleanup. -type CleanupManager struct { - dstore types.Datastore - storeAPI types.StoreAPI - server *p2p.Server - publishFunc pubsub.PublishEventHandler // Publishing callback (captures routeRemote state) -} - -// NewCleanupManager creates a new cleanup manager with the required dependencies. -// The publishFunc is injected from routeRemote.Publish to avoid circular dependencies -// while still providing access to DHT and GossipSub publishing logic. -// -// Parameters: -// - dstore: Datastore for label storage -// - storeAPI: Store API for record operations -// - server: P2P server for DHT operations -// - publishFunc: Callback for publishing (from routeRemote.Publish, see pubsub.PublishEventHandler) -func NewCleanupManager( - dstore types.Datastore, - storeAPI types.StoreAPI, - server *p2p.Server, - publishFunc pubsub.PublishEventHandler, -) *CleanupManager { - return &CleanupManager{ - dstore: dstore, - storeAPI: storeAPI, - server: server, - publishFunc: publishFunc, - } -} - -// StartLabelRepublishTask starts a background task that periodically republishes local -// CID provider announcements to keep content discoverable (provider records expire after ProviderRecordTTL). -// The wg parameter is used to track this goroutine in the parent's WaitGroup. -func (c *CleanupManager) StartLabelRepublishTask(ctx context.Context, wg *sync.WaitGroup) { - ticker := time.NewTicker(RepublishInterval) - - cleanupLogger.Info("Started CID provider republishing task", "interval", RepublishInterval) - - defer func() { - ticker.Stop() - wg.Done() - cleanupLogger.Debug("CID provider republishing task stopped") - }() - - for { - select { - case <-ctx.Done(): - cleanupLogger.Info("CID provider republishing task stopping (context cancelled)") - - return - case <-ticker.C: - c.republishLocalProviders(ctx) - } - } -} - -// StartRemoteLabelCleanupTask starts a background task that periodically cleans up stale remote labels. -// This is critical for the pull-based architecture to remove cached labels from offline or deleted remote content. -// The wg parameter is used to track this goroutine in the parent's WaitGroup. -func (c *CleanupManager) StartRemoteLabelCleanupTask(ctx context.Context, wg *sync.WaitGroup) { - ticker := time.NewTicker(CleanupInterval) - - cleanupLogger.Info("Starting remote label cleanup task", "interval", CleanupInterval) - - defer func() { - ticker.Stop() - wg.Done() - cleanupLogger.Debug("Remote label cleanup task stopped") - }() - - for { - select { - case <-ctx.Done(): - cleanupLogger.Info("Remote label cleanup task stopping (context cancelled)") - - return - case <-ticker.C: - if err := c.cleanupStaleRemoteLabels(ctx); err != nil { - cleanupLogger.Error("Failed to cleanup stale remote labels", "error", err) - } - } - } -} - -// republishLocalProviders republishes all local CID provider announcements and labels -// to ensure they remain discoverable. This maintains both DHT provider records and -// GossipSub label announcements for optimal network propagation. -func (c *CleanupManager) republishLocalProviders(ctx context.Context) { - cleanupLogger.Info("Starting CID provider and label republishing cycle") - - // Query all local records from the datastore - results, err := c.dstore.Query(ctx, query.Query{ - Prefix: "/records/", - }) - if err != nil { - cleanupLogger.Error("Failed to query local records for republishing", "error", err) - - return - } - defer results.Close() - - republishedCount := 0 - labelRepublishedCount := 0 - errorCount := 0 - - var orphanedCIDs []string - - for result := range results.Next() { - if result.Error != nil { - cleanupLogger.Warn("Error reading local record for republishing", "error", result.Error) - - continue - } - - // Extract CID from record key: /records/CID123 → CID123 - cidStr := path.Base(result.Key) - if cidStr == "" { - continue - } - - // Verify the record still exists in storage - ref := &corev1.RecordRef{Cid: cidStr} - - _, err := c.storeAPI.Lookup(ctx, ref) - if err != nil { - cleanupLogger.Warn("Record no longer exists in storage, marking as orphaned", "cid", cidStr, "error", err) - orphanedCIDs = append(orphanedCIDs, cidStr) - errorCount++ - - continue - } - - // Pull the record from storage for republishing - record, err := c.storeAPI.Pull(ctx, ref) - if err != nil { - cleanupLogger.Warn("Failed to pull record for republishing", - "cid", cidStr, - "error", err) - - errorCount++ - - continue - } - - // Wrap record with adapter for interface-based publishing - adapter := adapters.NewRecordAdapter(record) - - // Use injected publishing function (handles both DHT and GossipSub) - // This reuses routeRemote.Publish logic without circular dependency - if err := c.publishFunc(ctx, adapter); err != nil { - cleanupLogger.Warn("Failed to republish record to network", - "cid", cidStr, - "error", err) - - errorCount++ - - continue - } - - cleanupLogger.Debug("Successfully republished record to network", "cid", cidStr) - - republishedCount++ - labelRepublishedCount++ // Count label republishing (done inside publishFunc) - } - - // Clean up orphaned local records and their labels - if len(orphanedCIDs) > 0 { - cleanedCount := c.cleanupOrphanedLocalLabels(ctx, orphanedCIDs) - cleanupLogger.Info("Cleaned up orphaned local records", "count", cleanedCount) - } - - cleanupLogger.Info("Completed republishing cycle", - "dhtRepublished", republishedCount, - "gossipSubRepublished", labelRepublishedCount, - "errors", errorCount, - "orphaned", len(orphanedCIDs)) -} - -// cleanupStaleRemoteLabels removes remote labels that haven't been seen recently. -func (c *CleanupManager) cleanupStaleRemoteLabels(ctx context.Context) error { - localPeerID := c.server.Host().ID().String() - - cleanupLogger.Debug("Starting stale remote label cleanup") - - // Query all label keys with remote filter - // We'll query each namespace separately and combine results - var allResults []query.Result - - for _, namespace := range types.AllLabelTypes() { - nsResults, err := c.dstore.Query(ctx, query.Query{ - Prefix: namespace.Prefix(), - Filters: []query.Filter{ - &remoteLabelFilter{ - dstore: c.dstore, - ctx: ctx, - localPeerID: localPeerID, - }, - }, - }) - if err != nil { - cleanupLogger.Warn("Failed to query namespace", "namespace", namespace, "error", err) - - continue - } - - // Collect results from this namespace - for result := range nsResults.Next() { - allResults = append(allResults, result) - } - - nsResults.Close() - } - - var staleKeys []datastore.Key - - // Check each remote label for staleness - for _, result := range allResults { - if result.Error != nil { - cleanupLogger.Warn("Error reading label entry", "key", result.Key, "error", result.Error) - - continue - } - - // Parse enhanced key to get peer information - _, _, keyPeerID, err := ParseEnhancedLabelKey(result.Key) - if err != nil { - cleanupLogger.Warn("Failed to parse enhanced label key, marking for deletion", - "key", result.Key, "error", err) - - staleKeys = append(staleKeys, datastore.NewKey(result.Key)) - - continue - } - - var metadata types.LabelMetadata - if err := json.Unmarshal(result.Value, &metadata); err != nil { - cleanupLogger.Warn("Failed to parse label metadata, marking for deletion", - "key", result.Key, "error", err) - - staleKeys = append(staleKeys, datastore.NewKey(result.Key)) - - continue - } - - // Validate metadata before checking staleness - if err := metadata.Validate(); err != nil { - cleanupLogger.Warn("Invalid label metadata found during cleanup, marking for deletion", - "key", result.Key, "error", err) - - staleKeys = append(staleKeys, datastore.NewKey(result.Key)) - - continue - } - - // Check if label is stale using the IsStale method - if metadata.IsStale(MaxLabelAge) { - cleanupLogger.Debug("Found stale remote label", - "key", result.Key, "age", metadata.Age(), "peer", keyPeerID) - - staleKeys = append(staleKeys, datastore.NewKey(result.Key)) - } - } - - // Delete stale labels in batch - if len(staleKeys) > 0 { - batch, err := c.dstore.Batch(ctx) - if err != nil { - return fmt.Errorf("failed to create batch for cleanup: %w", err) - } - - for _, key := range staleKeys { - if err := batch.Delete(ctx, key); err != nil { - cleanupLogger.Warn("Failed to delete stale label", "key", key.String(), "error", err) - } - } - - if err := batch.Commit(ctx); err != nil { - return fmt.Errorf("failed to commit stale label cleanup: %w", err) - } - - cleanupLogger.Info("Cleaned up stale remote labels", "count", len(staleKeys)) - } else { - cleanupLogger.Debug("No stale remote labels found") - } - - return nil -} - -// cleanupOrphanedLocalLabels removes local records and labels for CIDs that no longer exist in storage. -func (c *CleanupManager) cleanupOrphanedLocalLabels(ctx context.Context, orphanedCIDs []string) int { - cleanedCount := 0 - - for _, cid := range orphanedCIDs { - if c.cleanupLabelsForCID(ctx, cid) { - cleanedCount++ - } - } - - return cleanedCount -} - -// cleanupLabelsForCID removes all local records and labels associated with a specific CID. -func (c *CleanupManager) cleanupLabelsForCID(ctx context.Context, cid string) bool { - batch, err := c.dstore.Batch(ctx) - if err != nil { - cleanupLogger.Error("Failed to create cleanup batch", "cid", cid, "error", err) - - return false - } - - keysDeleted := 0 - - // Remove the /records/ key - recordKey := datastore.NewKey("/records/" + cid) - if err := batch.Delete(ctx, recordKey); err != nil { - cleanupLogger.Warn("Failed to delete record key", "key", recordKey.String(), "error", err) - } else { - keysDeleted++ - } - - // Find and remove all label keys for this CID across all namespaces - localPeerID := c.server.Host().ID().String() - - for _, namespace := range types.AllLabelTypes() { - // Query labels in this namespace that match our CID - labelResults, err := c.dstore.Query(ctx, query.Query{ - Prefix: namespace.Prefix(), - }) - if err != nil { - cleanupLogger.Warn("Failed to query labels for cleanup", "namespace", namespace, "cid", cid, "error", err) - - continue - } - - defer labelResults.Close() - - for result := range labelResults.Next() { - // Parse enhanced key to get CID and PeerID - _, keyCID, keyPeerID, err := ParseEnhancedLabelKey(result.Key) - if err != nil { - cleanupLogger.Warn("Failed to parse enhanced label key during cleanup, deleting", - "key", result.Key, "error", err) - // Delete malformed keys - if err := batch.Delete(ctx, datastore.NewKey(result.Key)); err == nil { - keysDeleted++ - } - - continue - } - - // Check if this key matches our CID and is from local peer - if keyCID == cid && keyPeerID == localPeerID { - // Delete this local label - labelKey := datastore.NewKey(result.Key) - if err := batch.Delete(ctx, labelKey); err != nil { - cleanupLogger.Warn("Failed to delete label key", "key", labelKey.String(), "error", err) - } else { - keysDeleted++ - - cleanupLogger.Debug("Scheduled orphaned label for deletion", "key", result.Key) - } - } - } - } - - // Commit the batch deletion - if err := batch.Commit(ctx); err != nil { - cleanupLogger.Error("Failed to commit orphaned label cleanup", "cid", cid, "error", err) - - return false - } - - if keysDeleted > 0 { - cleanupLogger.Debug("Successfully cleaned up orphaned labels", "cid", cid, "keysDeleted", keysDeleted) - } - - return keysDeleted > 0 -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package routing + +import ( + "context" + "encoding/json" + "fmt" + "path" + "sync" + "time" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/server/routing/internal/p2p" + "github.com/agntcy/dir/server/routing/pubsub" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/server/types/adapters" + "github.com/agntcy/dir/utils/logging" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" +) + +var cleanupLogger = logging.Logger("routing/cleanup") + +// remoteLabelFilter identifies remote labels by checking if they lack a corresponding local record. +// Remote labels are those that don't have a matching "/records/CID" key in the datastore. +// +//nolint:containedctx +type remoteLabelFilter struct { + dstore types.Datastore + ctx context.Context + localPeerID string +} + +func (f *remoteLabelFilter) Filter(e query.Entry) bool { + // With enhanced keys, we can check PeerID directly from the key + // Key format: /skills/AI/CID123/Peer1 + keyPeerID := ExtractPeerIDFromKey(e.Key) + if keyPeerID == "" { + // Invalid key format, assume remote to be safe + return true + } + + // It's remote if the PeerID in the key is not our local peer + return keyPeerID != f.localPeerID +} + +// CleanupManager handles all background cleanup and republishing tasks for the routing system. +// This includes CID provider republishing, GossipSub label republishing, stale remote label cleanup, and orphaned record cleanup. +type CleanupManager struct { + dstore types.Datastore + storeAPI types.StoreAPI + server *p2p.Server + publishFunc pubsub.PublishEventHandler // Publishing callback (captures routeRemote state) +} + +// NewCleanupManager creates a new cleanup manager with the required dependencies. +// The publishFunc is injected from routeRemote.Publish to avoid circular dependencies +// while still providing access to DHT and GossipSub publishing logic. +// +// Parameters: +// - dstore: Datastore for label storage +// - storeAPI: Store API for record operations +// - server: P2P server for DHT operations +// - publishFunc: Callback for publishing (from routeRemote.Publish, see pubsub.PublishEventHandler) +func NewCleanupManager( + dstore types.Datastore, + storeAPI types.StoreAPI, + server *p2p.Server, + publishFunc pubsub.PublishEventHandler, +) *CleanupManager { + return &CleanupManager{ + dstore: dstore, + storeAPI: storeAPI, + server: server, + publishFunc: publishFunc, + } +} + +// StartLabelRepublishTask starts a background task that periodically republishes local +// CID provider announcements to keep content discoverable (provider records expire after ProviderRecordTTL). +// The wg parameter is used to track this goroutine in the parent's WaitGroup. +func (c *CleanupManager) StartLabelRepublishTask(ctx context.Context, wg *sync.WaitGroup) { + ticker := time.NewTicker(RepublishInterval) + + cleanupLogger.Info("Started CID provider republishing task", "interval", RepublishInterval) + + defer func() { + ticker.Stop() + wg.Done() + cleanupLogger.Debug("CID provider republishing task stopped") + }() + + for { + select { + case <-ctx.Done(): + cleanupLogger.Info("CID provider republishing task stopping (context cancelled)") + + return + case <-ticker.C: + c.republishLocalProviders(ctx) + } + } +} + +// StartRemoteLabelCleanupTask starts a background task that periodically cleans up stale remote labels. +// This is critical for the pull-based architecture to remove cached labels from offline or deleted remote content. +// The wg parameter is used to track this goroutine in the parent's WaitGroup. +func (c *CleanupManager) StartRemoteLabelCleanupTask(ctx context.Context, wg *sync.WaitGroup) { + ticker := time.NewTicker(CleanupInterval) + + cleanupLogger.Info("Starting remote label cleanup task", "interval", CleanupInterval) + + defer func() { + ticker.Stop() + wg.Done() + cleanupLogger.Debug("Remote label cleanup task stopped") + }() + + for { + select { + case <-ctx.Done(): + cleanupLogger.Info("Remote label cleanup task stopping (context cancelled)") + + return + case <-ticker.C: + if err := c.cleanupStaleRemoteLabels(ctx); err != nil { + cleanupLogger.Error("Failed to cleanup stale remote labels", "error", err) + } + } + } +} + +// republishLocalProviders republishes all local CID provider announcements and labels +// to ensure they remain discoverable. This maintains both DHT provider records and +// GossipSub label announcements for optimal network propagation. +func (c *CleanupManager) republishLocalProviders(ctx context.Context) { + cleanupLogger.Info("Starting CID provider and label republishing cycle") + + // Query all local records from the datastore + results, err := c.dstore.Query(ctx, query.Query{ + Prefix: "/records/", + }) + if err != nil { + cleanupLogger.Error("Failed to query local records for republishing", "error", err) + + return + } + defer results.Close() + + republishedCount := 0 + labelRepublishedCount := 0 + errorCount := 0 + + var orphanedCIDs []string + + for result := range results.Next() { + if result.Error != nil { + cleanupLogger.Warn("Error reading local record for republishing", "error", result.Error) + + continue + } + + // Extract CID from record key: /records/CID123 → CID123 + cidStr := path.Base(result.Key) + if cidStr == "" { + continue + } + + // Verify the record still exists in storage + ref := &corev1.RecordRef{Cid: cidStr} + + _, err := c.storeAPI.Lookup(ctx, ref) + if err != nil { + cleanupLogger.Warn("Record no longer exists in storage, marking as orphaned", "cid", cidStr, "error", err) + orphanedCIDs = append(orphanedCIDs, cidStr) + errorCount++ + + continue + } + + // Pull the record from storage for republishing + record, err := c.storeAPI.Pull(ctx, ref) + if err != nil { + cleanupLogger.Warn("Failed to pull record for republishing", + "cid", cidStr, + "error", err) + + errorCount++ + + continue + } + + // Wrap record with adapter for interface-based publishing + adapter := adapters.NewRecordAdapter(record) + + // Use injected publishing function (handles both DHT and GossipSub) + // This reuses routeRemote.Publish logic without circular dependency + if err := c.publishFunc(ctx, adapter); err != nil { + cleanupLogger.Warn("Failed to republish record to network", + "cid", cidStr, + "error", err) + + errorCount++ + + continue + } + + cleanupLogger.Debug("Successfully republished record to network", "cid", cidStr) + + republishedCount++ + labelRepublishedCount++ // Count label republishing (done inside publishFunc) + } + + // Clean up orphaned local records and their labels + if len(orphanedCIDs) > 0 { + cleanedCount := c.cleanupOrphanedLocalLabels(ctx, orphanedCIDs) + cleanupLogger.Info("Cleaned up orphaned local records", "count", cleanedCount) + } + + cleanupLogger.Info("Completed republishing cycle", + "dhtRepublished", republishedCount, + "gossipSubRepublished", labelRepublishedCount, + "errors", errorCount, + "orphaned", len(orphanedCIDs)) +} + +// cleanupStaleRemoteLabels removes remote labels that haven't been seen recently. +func (c *CleanupManager) cleanupStaleRemoteLabels(ctx context.Context) error { + localPeerID := c.server.Host().ID().String() + + cleanupLogger.Debug("Starting stale remote label cleanup") + + // Query all label keys with remote filter + // We'll query each namespace separately and combine results + var allResults []query.Result + + for _, namespace := range types.AllLabelTypes() { + nsResults, err := c.dstore.Query(ctx, query.Query{ + Prefix: namespace.Prefix(), + Filters: []query.Filter{ + &remoteLabelFilter{ + dstore: c.dstore, + ctx: ctx, + localPeerID: localPeerID, + }, + }, + }) + if err != nil { + cleanupLogger.Warn("Failed to query namespace", "namespace", namespace, "error", err) + + continue + } + + // Collect results from this namespace + for result := range nsResults.Next() { + allResults = append(allResults, result) + } + + nsResults.Close() + } + + var staleKeys []datastore.Key + + // Check each remote label for staleness + for _, result := range allResults { + if result.Error != nil { + cleanupLogger.Warn("Error reading label entry", "key", result.Key, "error", result.Error) + + continue + } + + // Parse enhanced key to get peer information + _, _, keyPeerID, err := ParseEnhancedLabelKey(result.Key) + if err != nil { + cleanupLogger.Warn("Failed to parse enhanced label key, marking for deletion", + "key", result.Key, "error", err) + + staleKeys = append(staleKeys, datastore.NewKey(result.Key)) + + continue + } + + var metadata types.LabelMetadata + if err := json.Unmarshal(result.Value, &metadata); err != nil { + cleanupLogger.Warn("Failed to parse label metadata, marking for deletion", + "key", result.Key, "error", err) + + staleKeys = append(staleKeys, datastore.NewKey(result.Key)) + + continue + } + + // Validate metadata before checking staleness + if err := metadata.Validate(); err != nil { + cleanupLogger.Warn("Invalid label metadata found during cleanup, marking for deletion", + "key", result.Key, "error", err) + + staleKeys = append(staleKeys, datastore.NewKey(result.Key)) + + continue + } + + // Check if label is stale using the IsStale method + if metadata.IsStale(MaxLabelAge) { + cleanupLogger.Debug("Found stale remote label", + "key", result.Key, "age", metadata.Age(), "peer", keyPeerID) + + staleKeys = append(staleKeys, datastore.NewKey(result.Key)) + } + } + + // Delete stale labels in batch + if len(staleKeys) > 0 { + batch, err := c.dstore.Batch(ctx) + if err != nil { + return fmt.Errorf("failed to create batch for cleanup: %w", err) + } + + for _, key := range staleKeys { + if err := batch.Delete(ctx, key); err != nil { + cleanupLogger.Warn("Failed to delete stale label", "key", key.String(), "error", err) + } + } + + if err := batch.Commit(ctx); err != nil { + return fmt.Errorf("failed to commit stale label cleanup: %w", err) + } + + cleanupLogger.Info("Cleaned up stale remote labels", "count", len(staleKeys)) + } else { + cleanupLogger.Debug("No stale remote labels found") + } + + return nil +} + +// cleanupOrphanedLocalLabels removes local records and labels for CIDs that no longer exist in storage. +func (c *CleanupManager) cleanupOrphanedLocalLabels(ctx context.Context, orphanedCIDs []string) int { + cleanedCount := 0 + + for _, cid := range orphanedCIDs { + if c.cleanupLabelsForCID(ctx, cid) { + cleanedCount++ + } + } + + return cleanedCount +} + +// cleanupLabelsForCID removes all local records and labels associated with a specific CID. +func (c *CleanupManager) cleanupLabelsForCID(ctx context.Context, cid string) bool { + batch, err := c.dstore.Batch(ctx) + if err != nil { + cleanupLogger.Error("Failed to create cleanup batch", "cid", cid, "error", err) + + return false + } + + keysDeleted := 0 + + // Remove the /records/ key + recordKey := datastore.NewKey("/records/" + cid) + if err := batch.Delete(ctx, recordKey); err != nil { + cleanupLogger.Warn("Failed to delete record key", "key", recordKey.String(), "error", err) + } else { + keysDeleted++ + } + + // Find and remove all label keys for this CID across all namespaces + localPeerID := c.server.Host().ID().String() + + for _, namespace := range types.AllLabelTypes() { + // Query labels in this namespace that match our CID + labelResults, err := c.dstore.Query(ctx, query.Query{ + Prefix: namespace.Prefix(), + }) + if err != nil { + cleanupLogger.Warn("Failed to query labels for cleanup", "namespace", namespace, "cid", cid, "error", err) + + continue + } + + defer labelResults.Close() + + for result := range labelResults.Next() { + // Parse enhanced key to get CID and PeerID + _, keyCID, keyPeerID, err := ParseEnhancedLabelKey(result.Key) + if err != nil { + cleanupLogger.Warn("Failed to parse enhanced label key during cleanup, deleting", + "key", result.Key, "error", err) + // Delete malformed keys + if err := batch.Delete(ctx, datastore.NewKey(result.Key)); err == nil { + keysDeleted++ + } + + continue + } + + // Check if this key matches our CID and is from local peer + if keyCID == cid && keyPeerID == localPeerID { + // Delete this local label + labelKey := datastore.NewKey(result.Key) + if err := batch.Delete(ctx, labelKey); err != nil { + cleanupLogger.Warn("Failed to delete label key", "key", labelKey.String(), "error", err) + } else { + keysDeleted++ + + cleanupLogger.Debug("Scheduled orphaned label for deletion", "key", result.Key) + } + } + } + } + + // Commit the batch deletion + if err := batch.Commit(ctx); err != nil { + cleanupLogger.Error("Failed to commit orphaned label cleanup", "cid", cid, "error", err) + + return false + } + + if keysDeleted > 0 { + cleanupLogger.Debug("Successfully cleaned up orphaned labels", "cid", cid, "keysDeleted", keysDeleted) + } + + return keysDeleted > 0 +} diff --git a/server/routing/config/config.go b/server/routing/config/config.go index 108f75bc7..f9b4d4b38 100644 --- a/server/routing/config/config.go +++ b/server/routing/config/config.go @@ -1,65 +1,65 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package config - -import "time" - -var ( - DefaultListenAddress = "/ip4/0.0.0.0/tcp/8999" - DefaultBootstrapPeers = []string{ - // TODO: once we deploy our bootstrap nodes, we should update this - } - - // GossipSub default (only enable/disable is configurable). - DefaultGossipSubEnabled = true -) - -type Config struct { - // Address to use for routing - ListenAddress string `json:"listen_address,omitempty" mapstructure:"listen_address"` - - // Address to use for sync operations - DirectoryAPIAddress string `json:"directory_api_address,omitempty" mapstructure:"directory_api_address"` - - // Peers to use for bootstrapping. - // We can choose between public and private peers. - BootstrapPeers []string `json:"bootstrap_peers,omitempty" mapstructure:"bootstrap_peers"` - - // Path to asymmetric private key - KeyPath string `json:"key_path,omitempty" mapstructure:"key_path"` - - // Path to the routing datastore. - // If empty, the routing data will be stored in memory. - // If not empty, this dir will be used to store the routing data on disk. - DatastoreDir string `json:"datastore_dir,omitempty" mapstructure:"datastore_dir"` - - // Refresh interval for DHT routing tables. - // If not set or zero, uses the default RefreshInterval constant. - // This is primarily used for testing with faster intervals. - RefreshInterval time.Duration `json:"refresh_interval,omitempty" mapstructure:"refresh_interval"` - - // GossipSub configuration for label announcements - GossipSub GossipSubConfig `json:"gossipsub,omitempty" mapstructure:"gossipsub"` -} - -// GossipSubConfig configures GossipSub-based label announcements. -// Protocol parameters (topic name, message size limits) are NOT configurable -// and are defined in server/routing/pubsub/constants.go to ensure network-wide -// compatibility. Only the enable/disable flag is configurable. -// -// Benefits when enabled: -// - Reaches ALL subscribed peers (not just k-closest in DHT) -// - Minimal bandwidth (~100B vs KB-MB for full record) -// - Fast propagation (~5-20ms vs ~100-500ms for DHT) -// - High cache hit rate (90%+ vs 30% with pull-based) -type GossipSubConfig struct { - // Enabled controls whether GossipSub label announcements are used. - // When true: Labels are announced via GossipSub (efficient, wide propagation) - // When false: Falls back to DHT+Pull mechanism (existing behavior) - // Default: true (recommended for production) - // - // Note: Protocol parameters (topic, message size) are hardcoded in - // server/routing/pubsub/constants.go for network compatibility. - Enabled bool `json:"enabled,omitempty" mapstructure:"enabled"` -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package config + +import "time" + +var ( + DefaultListenAddress = "/ip4/0.0.0.0/tcp/8999" + DefaultBootstrapPeers = []string{ + // TODO: once we deploy our bootstrap nodes, we should update this + } + + // GossipSub default (only enable/disable is configurable). + DefaultGossipSubEnabled = true +) + +type Config struct { + // Address to use for routing + ListenAddress string `json:"listen_address,omitempty" mapstructure:"listen_address"` + + // Address to use for sync operations + DirectoryAPIAddress string `json:"directory_api_address,omitempty" mapstructure:"directory_api_address"` + + // Peers to use for bootstrapping. + // We can choose between public and private peers. + BootstrapPeers []string `json:"bootstrap_peers,omitempty" mapstructure:"bootstrap_peers"` + + // Path to asymmetric private key + KeyPath string `json:"key_path,omitempty" mapstructure:"key_path"` + + // Path to the routing datastore. + // If empty, the routing data will be stored in memory. + // If not empty, this dir will be used to store the routing data on disk. + DatastoreDir string `json:"datastore_dir,omitempty" mapstructure:"datastore_dir"` + + // Refresh interval for DHT routing tables. + // If not set or zero, uses the default RefreshInterval constant. + // This is primarily used for testing with faster intervals. + RefreshInterval time.Duration `json:"refresh_interval,omitempty" mapstructure:"refresh_interval"` + + // GossipSub configuration for label announcements + GossipSub GossipSubConfig `json:"gossipsub,omitempty" mapstructure:"gossipsub"` +} + +// GossipSubConfig configures GossipSub-based label announcements. +// Protocol parameters (topic name, message size limits) are NOT configurable +// and are defined in server/routing/pubsub/constants.go to ensure network-wide +// compatibility. Only the enable/disable flag is configurable. +// +// Benefits when enabled: +// - Reaches ALL subscribed peers (not just k-closest in DHT) +// - Minimal bandwidth (~100B vs KB-MB for full record) +// - Fast propagation (~5-20ms vs ~100-500ms for DHT) +// - High cache hit rate (90%+ vs 30% with pull-based) +type GossipSubConfig struct { + // Enabled controls whether GossipSub label announcements are used. + // When true: Labels are announced via GossipSub (efficient, wide propagation) + // When false: Falls back to DHT+Pull mechanism (existing behavior) + // Default: true (recommended for production) + // + // Note: Protocol parameters (topic, message size) are hardcoded in + // server/routing/pubsub/constants.go for network compatibility. + Enabled bool `json:"enabled,omitempty" mapstructure:"enabled"` +} diff --git a/server/routing/constants.go b/server/routing/constants.go index 0406661fc..1a07e7a98 100644 --- a/server/routing/constants.go +++ b/server/routing/constants.go @@ -1,55 +1,55 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package routing - -import "time" - -// DHT and routing timing constants that should be used consistently across the codebase. -// These constants ensure proper coordination between DHT expiration, republishing, and cleanup tasks. -const ( - // RecordTTL defines how long DHT records persist before expiring. - // This is configured via dht.MaxRecordAge() and affects all PutValue operations. - // Default DHT TTL is 36h, but we use 48h for better network resilience. - RecordTTL = 48 * time.Hour - // RepublishInterval defines how often we republish CID provider announcements to prevent expiration. - // Provider records typically expire after 24h, but we use a longer interval for robustness. - // This ensures our content remains discoverable by triggering pull-based label caching. - RepublishInterval = 36 * time.Hour - // CleanupInterval defines how often we clean up stale announcements. - // This should match DHTRecordTTL to stay consistent with DHT behavior and prevent - // our local cache from having stale entries that no longer exist in the DHT. - CleanupInterval = 48 * time.Hour - // RefreshInterval defines how often DHT routing tables are refreshed. - // This is a shorter interval for maintaining network connectivity. - RefreshInterval = 30 * time.Second -) - -// Protocol constants for libp2p DHT and discovery. -const ( - // ProtocolPrefix is the prefix used for DHT protocol identification. - ProtocolPrefix = "dir" - - // ProtocolRendezvous is the rendezvous string used for peer discovery. - ProtocolRendezvous = "dir/connect" -) - -// Validation rules and limits. -const ( - // MaxHops defines the maximum number of hops allowed in distributed queries. - MaxHops = 20 - - // NotificationChannelSize defines the buffer size for announcement notifications. - NotificationChannelSize = 1000 - - // MaxLabelAge defines when remote label announcements are considered stale. - // Labels older than this will be cleaned up during periodic cleanup cycles. - MaxLabelAge = 72 * time.Hour - - // DefaultMinMatchScore defines the minimum allowed match score for production safety. - // Per proto specification: "If not set, it will return records that match at least one query". - // Any value below this threshold is automatically corrected to this value. - DefaultMinMatchScore = 1 -) - -const ResultChannelBufferSize = 100 +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package routing + +import "time" + +// DHT and routing timing constants that should be used consistently across the codebase. +// These constants ensure proper coordination between DHT expiration, republishing, and cleanup tasks. +const ( + // RecordTTL defines how long DHT records persist before expiring. + // This is configured via dht.MaxRecordAge() and affects all PutValue operations. + // Default DHT TTL is 36h, but we use 48h for better network resilience. + RecordTTL = 48 * time.Hour + // RepublishInterval defines how often we republish CID provider announcements to prevent expiration. + // Provider records typically expire after 24h, but we use a longer interval for robustness. + // This ensures our content remains discoverable by triggering pull-based label caching. + RepublishInterval = 36 * time.Hour + // CleanupInterval defines how often we clean up stale announcements. + // This should match DHTRecordTTL to stay consistent with DHT behavior and prevent + // our local cache from having stale entries that no longer exist in the DHT. + CleanupInterval = 48 * time.Hour + // RefreshInterval defines how often DHT routing tables are refreshed. + // This is a shorter interval for maintaining network connectivity. + RefreshInterval = 30 * time.Second +) + +// Protocol constants for libp2p DHT and discovery. +const ( + // ProtocolPrefix is the prefix used for DHT protocol identification. + ProtocolPrefix = "dir" + + // ProtocolRendezvous is the rendezvous string used for peer discovery. + ProtocolRendezvous = "dir/connect" +) + +// Validation rules and limits. +const ( + // MaxHops defines the maximum number of hops allowed in distributed queries. + MaxHops = 20 + + // NotificationChannelSize defines the buffer size for announcement notifications. + NotificationChannelSize = 1000 + + // MaxLabelAge defines when remote label announcements are considered stale. + // Labels older than this will be cleaned up during periodic cleanup cycles. + MaxLabelAge = 72 * time.Hour + + // DefaultMinMatchScore defines the minimum allowed match score for production safety. + // Per proto specification: "If not set, it will return records that match at least one query". + // Any value below this threshold is automatically corrected to this value. + DefaultMinMatchScore = 1 +) + +const ResultChannelBufferSize = 100 diff --git a/server/routing/handler.go b/server/routing/handler.go index bbe01e900..6b5082b1c 100644 --- a/server/routing/handler.go +++ b/server/routing/handler.go @@ -1,107 +1,107 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package routing - -import ( - "context" - "fmt" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/utils/logging" - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-kad-dht/providers" - "github.com/libp2p/go-libp2p/core/peer" - mh "github.com/multiformats/go-multihash" -) - -var ( - _ providers.ProviderStore = &handler{} - handlerLogger = logging.Logger("routing/handler") -) - -type handler struct { - *providers.ProviderManager - hostID string - notifyCh chan<- *handlerSync -} - -type handlerSync struct { - Ref *corev1.RecordRef - Peer peer.AddrInfo -} - -func (h *handler) AddProvider(ctx context.Context, key []byte, prov peer.AddrInfo) error { - if err := h.handleAnnounce(ctx, key, prov); err != nil { - // log this error only - handlerLogger.Error("Failed to handle announce", "error", err) - } - - if err := h.ProviderManager.AddProvider(ctx, key, prov); err != nil { - return fmt.Errorf("failed to add provider: %w", err) - } - - return nil -} - -func (h *handler) GetProviders(ctx context.Context, key []byte) ([]peer.AddrInfo, error) { - providers, err := h.ProviderManager.GetProviders(ctx, key) - if err != nil { - return nil, fmt.Errorf("failed to get providers: %w", err) - } - - return providers, nil -} - -// handleAnnounce tries to parse the data from provider in order to update the local routing data -// about the content and peer. -// nolint:unparam -func (h *handler) handleAnnounce(ctx context.Context, key []byte, prov peer.AddrInfo) error { - keyStr := string(key) - handlerLogger.Debug("Received announcement event", "key", keyStr, "provider", prov) - - // validate if the provider is not the same as the host - if peer.ID(h.hostID) == prov.ID { - handlerLogger.Info("Ignoring announcement event from self", "provider", prov) - - return nil - } - - // All announcements are now treated as CID provider announcements - // Labels are discovered via pull-based mechanism when content is fetched - return h.handleCIDProviderAnnouncement(ctx, key, prov) -} - -// handleCIDProviderAnnouncement handles CID provider announcements (existing logic). -func (h *handler) handleCIDProviderAnnouncement(_ context.Context, key []byte, prov peer.AddrInfo) error { - // get ref cid from request - // if this fails, it may mean that it's not DIR-constructed CID - cast, err := mh.Cast(key) - if err != nil { - handlerLogger.Error("Failed to cast key to multihash", "error", err) - - return nil - } - - // create CID from multihash - ref := &corev1.RecordRef{ - Cid: cid.NewCidV1(1, cast).String(), - } - - // Validate that we have a non-empty CID - if ref.GetCid() == "" { - handlerLogger.Info("Ignoring announcement event for empty CID") - - return nil - } - - handlerLogger.Info("CID provider announcement event", "ref", ref, "provider", prov, "host", h.hostID) - - // notify the channel - h.notifyCh <- &handlerSync{ - Ref: ref, - Peer: prov, - } - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package routing + +import ( + "context" + "fmt" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/utils/logging" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-kad-dht/providers" + "github.com/libp2p/go-libp2p/core/peer" + mh "github.com/multiformats/go-multihash" +) + +var ( + _ providers.ProviderStore = &handler{} + handlerLogger = logging.Logger("routing/handler") +) + +type handler struct { + *providers.ProviderManager + hostID string + notifyCh chan<- *handlerSync +} + +type handlerSync struct { + Ref *corev1.RecordRef + Peer peer.AddrInfo +} + +func (h *handler) AddProvider(ctx context.Context, key []byte, prov peer.AddrInfo) error { + if err := h.handleAnnounce(ctx, key, prov); err != nil { + // log this error only + handlerLogger.Error("Failed to handle announce", "error", err) + } + + if err := h.ProviderManager.AddProvider(ctx, key, prov); err != nil { + return fmt.Errorf("failed to add provider: %w", err) + } + + return nil +} + +func (h *handler) GetProviders(ctx context.Context, key []byte) ([]peer.AddrInfo, error) { + providers, err := h.ProviderManager.GetProviders(ctx, key) + if err != nil { + return nil, fmt.Errorf("failed to get providers: %w", err) + } + + return providers, nil +} + +// handleAnnounce tries to parse the data from provider in order to update the local routing data +// about the content and peer. +// nolint:unparam +func (h *handler) handleAnnounce(ctx context.Context, key []byte, prov peer.AddrInfo) error { + keyStr := string(key) + handlerLogger.Debug("Received announcement event", "key", keyStr, "provider", prov) + + // validate if the provider is not the same as the host + if peer.ID(h.hostID) == prov.ID { + handlerLogger.Info("Ignoring announcement event from self", "provider", prov) + + return nil + } + + // All announcements are now treated as CID provider announcements + // Labels are discovered via pull-based mechanism when content is fetched + return h.handleCIDProviderAnnouncement(ctx, key, prov) +} + +// handleCIDProviderAnnouncement handles CID provider announcements (existing logic). +func (h *handler) handleCIDProviderAnnouncement(_ context.Context, key []byte, prov peer.AddrInfo) error { + // get ref cid from request + // if this fails, it may mean that it's not DIR-constructed CID + cast, err := mh.Cast(key) + if err != nil { + handlerLogger.Error("Failed to cast key to multihash", "error", err) + + return nil + } + + // create CID from multihash + ref := &corev1.RecordRef{ + Cid: cid.NewCidV1(1, cast).String(), + } + + // Validate that we have a non-empty CID + if ref.GetCid() == "" { + handlerLogger.Info("Ignoring announcement event for empty CID") + + return nil + } + + handlerLogger.Info("CID provider announcement event", "ref", ref, "provider", prov, "host", h.hostID) + + // notify the channel + h.notifyCh <- &handlerSync{ + Ref: ref, + Peer: prov, + } + + return nil +} diff --git a/server/routing/handler_test.go b/server/routing/handler_test.go index e628486f7..cae854f5d 100644 --- a/server/routing/handler_test.go +++ b/server/routing/handler_test.go @@ -1,73 +1,73 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:testifylint -package routing - -import ( - "testing" - "time" - - typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/assert" -) - -// Testing 2 nodes, A -> B -// stores and announces an record. -// A discovers it retrieves the key metadata from B. -func TestHandler(t *testing.T) { - // Test data - testRecord := corev1.New(&typesv1alpha0.Record{ - Name: "test-handler-agent", - Skills: []*typesv1alpha0.Skill{ - {CategoryName: toPtr("category1"), ClassName: toPtr("class1")}, - }, - Locators: []*typesv1alpha0.Locator{ - {Type: "type1", Url: "url1"}, - }, - }) - testRef := &corev1.RecordRef{Cid: testRecord.GetCid()} - - // create demo network - firstNode := newTestServer(t, t.Context(), nil) - secondNode := newTestServer(t, t.Context(), firstNode.remote.server.P2pAddrs()) - - // wait for connection - time.Sleep(2 * time.Second) - <-firstNode.remote.server.DHT().RefreshRoutingTable() - <-secondNode.remote.server.DHT().RefreshRoutingTable() - - // publish the key on second node and wait on the first - cidStr := testRef.GetCid() - decodedCID, err := cid.Decode(cidStr) - assert.NoError(t, err) - - // push the data - _, err = secondNode.remote.storeAPI.Push(t.Context(), testRecord) - assert.NoError(t, err) - - // announce the key - err = secondNode.remote.server.DHT().Provide(t.Context(), decodedCID, true) - assert.NoError(t, err) - - // wait for sync - time.Sleep(2 * time.Second) - <-firstNode.remote.server.DHT().RefreshRoutingTable() - <-secondNode.remote.server.DHT().RefreshRoutingTable() - - // check on first - found := false - - peerCh := firstNode.remote.server.DHT().FindProvidersAsync(t.Context(), decodedCID, 1) - for peer := range peerCh { - if peer.ID == secondNode.remote.server.Host().ID() { - found = true - - break - } - } - - assert.True(t, found) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:testifylint +package routing + +import ( + "testing" + "time" + + typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/assert" +) + +// Testing 2 nodes, A -> B +// stores and announces an record. +// A discovers it retrieves the key metadata from B. +func TestHandler(t *testing.T) { + // Test data + testRecord := corev1.New(&typesv1alpha0.Record{ + Name: "test-handler-agent", + Skills: []*typesv1alpha0.Skill{ + {CategoryName: toPtr("category1"), ClassName: toPtr("class1")}, + }, + Locators: []*typesv1alpha0.Locator{ + {Type: "type1", Url: "url1"}, + }, + }) + testRef := &corev1.RecordRef{Cid: testRecord.GetCid()} + + // create demo network + firstNode := newTestServer(t, t.Context(), nil) + secondNode := newTestServer(t, t.Context(), firstNode.remote.server.P2pAddrs()) + + // wait for connection + time.Sleep(2 * time.Second) + <-firstNode.remote.server.DHT().RefreshRoutingTable() + <-secondNode.remote.server.DHT().RefreshRoutingTable() + + // publish the key on second node and wait on the first + cidStr := testRef.GetCid() + decodedCID, err := cid.Decode(cidStr) + assert.NoError(t, err) + + // push the data + _, err = secondNode.remote.storeAPI.Push(t.Context(), testRecord) + assert.NoError(t, err) + + // announce the key + err = secondNode.remote.server.DHT().Provide(t.Context(), decodedCID, true) + assert.NoError(t, err) + + // wait for sync + time.Sleep(2 * time.Second) + <-firstNode.remote.server.DHT().RefreshRoutingTable() + <-secondNode.remote.server.DHT().RefreshRoutingTable() + + // check on first + found := false + + peerCh := firstNode.remote.server.DHT().FindProvidersAsync(t.Context(), decodedCID, 1) + for peer := range peerCh { + if peer.ID == secondNode.remote.server.Host().ID() { + found = true + + break + } + } + + assert.True(t, found) +} diff --git a/server/routing/internal/p2p/constants.go b/server/routing/internal/p2p/constants.go index 7d3db9594..edc3b9f42 100644 --- a/server/routing/internal/p2p/constants.go +++ b/server/routing/internal/p2p/constants.go @@ -1,44 +1,44 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package p2p - -import "time" - -// Connection Manager constants for libp2p peer connection management. -// These constants ensure healthy peer connectivity while preventing resource exhaustion. -const ( - // ConnMgrLowWater is the minimum number of connections to maintain. - // Below this, the connection manager will not prune any peers. - // Value accounts for: DHT routing table (~20) + GossipSub mesh (~10) + buffer (~20). - ConnMgrLowWater = 50 - - // ConnMgrHighWater is the maximum number of connections before pruning starts. - // When this limit is reached, low-priority peers are pruned to bring count down. - // Provides headroom for: DHT discovery + mesh dynamics + temporary connections. - ConnMgrHighWater = 200 - - // ConnMgrGracePeriod is the duration new connections are protected from pruning. - // This gives new connections time to prove useful before being eligible for removal. - ConnMgrGracePeriod = 2 * time.Minute -) - -// Peer priority constants for Connection Manager tagging. -// Higher values indicate higher priority and are less likely to be pruned. -const ( - // PeerPriorityBootstrap is the priority for bootstrap peers. - // Bootstrap peers are also protected (never pruned) in addition to this high priority. - PeerPriorityBootstrap = 100 - - // PeerPriorityGossipSubMesh is the priority for GossipSub mesh peers. - // Mesh peers are critical for fast label propagation and should be kept. - PeerPriorityGossipSubMesh = 50 -) - -// MeshPeerTaggingInterval defines how often GossipSub mesh peers are re-tagged -// to protect them from Connection Manager pruning as mesh topology changes. -const MeshPeerTaggingInterval = 30 * time.Second - -// mDNS service name for local network peer discovery. -// This is used to identify DIR peers on the same LAN. -const MDNSServiceName = "agntcy-dir-local-discovery" +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package p2p + +import "time" + +// Connection Manager constants for libp2p peer connection management. +// These constants ensure healthy peer connectivity while preventing resource exhaustion. +const ( + // ConnMgrLowWater is the minimum number of connections to maintain. + // Below this, the connection manager will not prune any peers. + // Value accounts for: DHT routing table (~20) + GossipSub mesh (~10) + buffer (~20). + ConnMgrLowWater = 50 + + // ConnMgrHighWater is the maximum number of connections before pruning starts. + // When this limit is reached, low-priority peers are pruned to bring count down. + // Provides headroom for: DHT discovery + mesh dynamics + temporary connections. + ConnMgrHighWater = 200 + + // ConnMgrGracePeriod is the duration new connections are protected from pruning. + // This gives new connections time to prove useful before being eligible for removal. + ConnMgrGracePeriod = 2 * time.Minute +) + +// Peer priority constants for Connection Manager tagging. +// Higher values indicate higher priority and are less likely to be pruned. +const ( + // PeerPriorityBootstrap is the priority for bootstrap peers. + // Bootstrap peers are also protected (never pruned) in addition to this high priority. + PeerPriorityBootstrap = 100 + + // PeerPriorityGossipSubMesh is the priority for GossipSub mesh peers. + // Mesh peers are critical for fast label propagation and should be kept. + PeerPriorityGossipSubMesh = 50 +) + +// MeshPeerTaggingInterval defines how often GossipSub mesh peers are re-tagged +// to protect them from Connection Manager pruning as mesh topology changes. +const MeshPeerTaggingInterval = 30 * time.Second + +// mDNS service name for local network peer discovery. +// This is used to identify DIR peers on the same LAN. +const MDNSServiceName = "agntcy-dir-local-discovery" diff --git a/server/routing/internal/p2p/dht.go b/server/routing/internal/p2p/dht.go index b0eeb7bdf..4d1cb3161 100644 --- a/server/routing/internal/p2p/dht.go +++ b/server/routing/internal/p2p/dht.go @@ -1,84 +1,84 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package p2p - -import ( - "context" - "sync" - "time" - - dht "github.com/libp2p/go-libp2p-kad-dht" - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" -) - -// newDHT creates a DHT to be served over libp2p host. -// DHT will serve as a bootstrap peer if no bootstrap peers provided. -func newDHT(ctx context.Context, host host.Host, bootstrapPeers []peer.AddrInfo, refreshPeriod time.Duration, options ...dht.Option) (*dht.IpfsDHT, error) { - // If no bootstrap nodes provided, we are the bootstrap node. - if len(bootstrapPeers) == 0 { - options = append(options, dht.Mode(dht.ModeServer)) - } else { - options = append(options, dht.BootstrapPeers(bootstrapPeers...)) - } - - // Set refresh period - if refreshPeriod > 0 { - options = append(options, dht.RoutingTableRefreshPeriod(refreshPeriod)) - } - - // Create DHT - kdht, err := dht.New(ctx, host, options...) - if err != nil { - return nil, err //nolint:wrapcheck - } - - // Bootstrap DHT - if err = kdht.Bootstrap(ctx); err != nil { - return nil, err //nolint:wrapcheck - } - - // Sync with bootstrap nodes - var wg sync.WaitGroup - for _, p := range bootstrapPeers { - wg.Add(1) - - go func(p peer.AddrInfo) { - defer wg.Done() - - if err := host.Connect(ctx, p); err != nil { - logger.Error("Error while connecting to node", "node", p.ID, "error", err) - - return - } - - logger.Info("Successfully connected to bootstrap node", "node", p.ID) - }(p) - } - - wg.Wait() - - // Tag and protect bootstrap peers to prevent Connection Manager from pruning them. - // Bootstrap peers are critical for network entry and should never be disconnected. - if host.ConnManager() != nil { - for _, p := range bootstrapPeers { - // Check if we're actually connected (connection might have failed) - if host.Network().Connectedness(p.ID) == network.Connected { - // Tag with high priority - host.ConnManager().TagPeer(p.ID, "bootstrap", PeerPriorityBootstrap) - - // Protect (never disconnect) - host.ConnManager().Protect(p.ID, "bootstrap") - - logger.Info("Protected bootstrap peer", - "peer", p.ID.String(), - "tag", "bootstrap", - "priority", PeerPriorityBootstrap) - } - } - } - - return kdht, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package p2p + +import ( + "context" + "sync" + "time" + + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" +) + +// newDHT creates a DHT to be served over libp2p host. +// DHT will serve as a bootstrap peer if no bootstrap peers provided. +func newDHT(ctx context.Context, host host.Host, bootstrapPeers []peer.AddrInfo, refreshPeriod time.Duration, options ...dht.Option) (*dht.IpfsDHT, error) { + // If no bootstrap nodes provided, we are the bootstrap node. + if len(bootstrapPeers) == 0 { + options = append(options, dht.Mode(dht.ModeServer)) + } else { + options = append(options, dht.BootstrapPeers(bootstrapPeers...)) + } + + // Set refresh period + if refreshPeriod > 0 { + options = append(options, dht.RoutingTableRefreshPeriod(refreshPeriod)) + } + + // Create DHT + kdht, err := dht.New(ctx, host, options...) + if err != nil { + return nil, err //nolint:wrapcheck + } + + // Bootstrap DHT + if err = kdht.Bootstrap(ctx); err != nil { + return nil, err //nolint:wrapcheck + } + + // Sync with bootstrap nodes + var wg sync.WaitGroup + for _, p := range bootstrapPeers { + wg.Add(1) + + go func(p peer.AddrInfo) { + defer wg.Done() + + if err := host.Connect(ctx, p); err != nil { + logger.Error("Error while connecting to node", "node", p.ID, "error", err) + + return + } + + logger.Info("Successfully connected to bootstrap node", "node", p.ID) + }(p) + } + + wg.Wait() + + // Tag and protect bootstrap peers to prevent Connection Manager from pruning them. + // Bootstrap peers are critical for network entry and should never be disconnected. + if host.ConnManager() != nil { + for _, p := range bootstrapPeers { + // Check if we're actually connected (connection might have failed) + if host.Network().Connectedness(p.ID) == network.Connected { + // Tag with high priority + host.ConnManager().TagPeer(p.ID, "bootstrap", PeerPriorityBootstrap) + + // Protect (never disconnect) + host.ConnManager().Protect(p.ID, "bootstrap") + + logger.Info("Protected bootstrap peer", + "peer", p.ID.String(), + "tag", "bootstrap", + "priority", PeerPriorityBootstrap) + } + } + } + + return kdht, nil +} diff --git a/server/routing/internal/p2p/host.go b/server/routing/internal/p2p/host.go index af7abc79e..18572a359 100644 --- a/server/routing/internal/p2p/host.go +++ b/server/routing/internal/p2p/host.go @@ -1,107 +1,107 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package p2p - -import ( - "fmt" - - "github.com/libp2p/go-libp2p" - "github.com/libp2p/go-libp2p/core/crypto" - "github.com/libp2p/go-libp2p/core/host" - connmgr "github.com/libp2p/go-libp2p/p2p/net/connmgr" - libp2ptls "github.com/libp2p/go-libp2p/p2p/security/tls" - ma "github.com/multiformats/go-multiaddr" -) - -const ( - DirProtocol = "dir" - DirProtocolCode = 65535 -) - -// Add dir protocol to the host. -// -//nolint:mnd -func init() { - err := ma.AddProtocol(ma.Protocol{ - Name: DirProtocol, - Code: DirProtocolCode, - VCode: ma.CodeToVarint(DirProtocolCode), - Size: ma.LengthPrefixedVarSize, - Transcoder: ma.NewTranscoderFromFunctions( - // String to bytes encoder - func(s string) ([]byte, error) { - return []byte(s), nil - }, - // Bytes to string decoder - func(b []byte) (string, error) { - return string(b), nil - }, - // Validator (optional) - nil, - ), - }) - if err != nil { - panic(fmt.Errorf("failed to add dir protocol: %w", err)) - } -} - -// newHost creates a new host libp2p host. -func newHost(listenAddr, dirAPIAddr string, key crypto.PrivKey) (host.Host, error) { - // Create connection manager to limit and manage peer connections. - // This prevents resource exhaustion and enables smart peer pruning based on priority. - connMgr, err := connmgr.NewConnManager( - ConnMgrLowWater, // Minimum connections (DHT + GossipSub + buffer) - ConnMgrHighWater, // Maximum connections (prevents resource exhaustion) - connmgr.WithGracePeriod(ConnMgrGracePeriod), // Protect new connections - ) - if err != nil { - return nil, fmt.Errorf("failed to create p2p host connection manager: %w", err) - } - // Create host - host, err := libp2p.New( - // Add directory API address to the host address factory - libp2p.AddrsFactory( - func(addrs []ma.Multiaddr) []ma.Multiaddr { - // Only add the dir address if dirAPIAddr is not empty - if dirAPIAddr != "" { - dirAddr := ma.StringCast("/dir/" + dirAPIAddr) - - return append(addrs, dirAddr) - } - - return addrs - }, - ), - // Use the keypair we generated - libp2p.Identity(key), - // Multiple listen addresses - libp2p.ListenAddrStrings(listenAddr), - // support TLS connections - libp2p.Security(libp2ptls.ID, libp2ptls.New), - // support any other default transports (TCP) - libp2p.DefaultTransports, - // support any other default multiplexer - libp2p.DefaultMuxers, - // Let's prevent our peer from having too many - // connections by attaching a connection manager. - libp2p.ConnectionManager(connMgr), - // Enable hole punching to upgrade relay connections to direct. - // When two NAT'd peers connect via relay, hole punching attempts to - // establish a direct connection through simultaneous dialing (DCUtR protocol). - // Success rate: ~70-80%. Falls back to relay if hole punching fails. - libp2p.EnableHolePunching(), - // Attempt to open ports using uPNP for NATed hosts. - libp2p.NATPortMap(), - // Enable AutoNAT service to help other peers detect if they are behind NAT. - // This is the server-side component that responds to NAT detection requests. - // Note: AutoNAT client (for detecting our own NAT status) runs automatically. - // This service is highly rate-limited and should not cause any performance issues. - libp2p.EnableNATService(), - ) - if err != nil { - return nil, fmt.Errorf("failed to create p2p host: %w", err) - } - - return host, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package p2p + +import ( + "fmt" + + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/host" + connmgr "github.com/libp2p/go-libp2p/p2p/net/connmgr" + libp2ptls "github.com/libp2p/go-libp2p/p2p/security/tls" + ma "github.com/multiformats/go-multiaddr" +) + +const ( + DirProtocol = "dir" + DirProtocolCode = 65535 +) + +// Add dir protocol to the host. +// +//nolint:mnd +func init() { + err := ma.AddProtocol(ma.Protocol{ + Name: DirProtocol, + Code: DirProtocolCode, + VCode: ma.CodeToVarint(DirProtocolCode), + Size: ma.LengthPrefixedVarSize, + Transcoder: ma.NewTranscoderFromFunctions( + // String to bytes encoder + func(s string) ([]byte, error) { + return []byte(s), nil + }, + // Bytes to string decoder + func(b []byte) (string, error) { + return string(b), nil + }, + // Validator (optional) + nil, + ), + }) + if err != nil { + panic(fmt.Errorf("failed to add dir protocol: %w", err)) + } +} + +// newHost creates a new host libp2p host. +func newHost(listenAddr, dirAPIAddr string, key crypto.PrivKey) (host.Host, error) { + // Create connection manager to limit and manage peer connections. + // This prevents resource exhaustion and enables smart peer pruning based on priority. + connMgr, err := connmgr.NewConnManager( + ConnMgrLowWater, // Minimum connections (DHT + GossipSub + buffer) + ConnMgrHighWater, // Maximum connections (prevents resource exhaustion) + connmgr.WithGracePeriod(ConnMgrGracePeriod), // Protect new connections + ) + if err != nil { + return nil, fmt.Errorf("failed to create p2p host connection manager: %w", err) + } + // Create host + host, err := libp2p.New( + // Add directory API address to the host address factory + libp2p.AddrsFactory( + func(addrs []ma.Multiaddr) []ma.Multiaddr { + // Only add the dir address if dirAPIAddr is not empty + if dirAPIAddr != "" { + dirAddr := ma.StringCast("/dir/" + dirAPIAddr) + + return append(addrs, dirAddr) + } + + return addrs + }, + ), + // Use the keypair we generated + libp2p.Identity(key), + // Multiple listen addresses + libp2p.ListenAddrStrings(listenAddr), + // support TLS connections + libp2p.Security(libp2ptls.ID, libp2ptls.New), + // support any other default transports (TCP) + libp2p.DefaultTransports, + // support any other default multiplexer + libp2p.DefaultMuxers, + // Let's prevent our peer from having too many + // connections by attaching a connection manager. + libp2p.ConnectionManager(connMgr), + // Enable hole punching to upgrade relay connections to direct. + // When two NAT'd peers connect via relay, hole punching attempts to + // establish a direct connection through simultaneous dialing (DCUtR protocol). + // Success rate: ~70-80%. Falls back to relay if hole punching fails. + libp2p.EnableHolePunching(), + // Attempt to open ports using uPNP for NATed hosts. + libp2p.NATPortMap(), + // Enable AutoNAT service to help other peers detect if they are behind NAT. + // This is the server-side component that responds to NAT detection requests. + // Note: AutoNAT client (for detecting our own NAT status) runs automatically. + // This service is highly rate-limited and should not cause any performance issues. + libp2p.EnableNATService(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create p2p host: %w", err) + } + + return host, nil +} diff --git a/server/routing/internal/p2p/mockrpc/rpc.go b/server/routing/internal/p2p/mockrpc/rpc.go index bc40b1639..ffd240eb5 100644 --- a/server/routing/internal/p2p/mockrpc/rpc.go +++ b/server/routing/internal/p2p/mockrpc/rpc.go @@ -1,168 +1,168 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package mockrpc - -import ( - "context" - "fmt" - "time" - - "github.com/agntcy/dir/utils/logging" - rpc "github.com/libp2p/go-libp2p-gorpc" - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/protocol" -) - -const ( - EchoService = "EchoRPCAPI" - EchoServiceFuncEcho = "Echo" -) - -var logger = logging.Logger("mockrpc") - -type EchoRPCAPI struct { - service *Service -} - -type Envelope struct { - Message string -} - -func (r *EchoRPCAPI) Echo(_ context.Context, in Envelope, out *Envelope) error { - *out = r.service.ReceiveEcho(in) - - return nil -} - -type Service struct { - rpcServer *rpc.Server - rpcClient *rpc.Client - host host.Host - protocol protocol.ID - listenCh chan<- string - ignored peerMap -} - -func Start(ctx context.Context, host host.Host, protocol protocol.ID, listenCh chan<- string, ignored []peer.AddrInfo) error { - service := &Service{ - host: host, - protocol: protocol, - listenCh: listenCh, - ignored: newPeerMap(append(peer.AddrInfosToIDs(ignored), host.ID())), - } - - err := service.SetupRPC() - if err != nil { - return err - } - - // send dummy message - go service.StartMessaging(ctx) - - return nil -} - -func (s *Service) StartMessaging(ctx context.Context) { - ticker := time.NewTicker(time.Second * 1) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - s.Echo(ctx, "Message: Hello from "+s.host.ID().String()) - } - } -} - -func (s *Service) SetupRPC() error { - echoRPCAPI := EchoRPCAPI{service: s} - - s.rpcServer = rpc.NewServer(s.host, s.protocol) - - err := s.rpcServer.Register(&echoRPCAPI) - if err != nil { - return err //nolint:wrapcheck - } - - s.rpcClient = rpc.NewClientWithServer(s.host, s.protocol, s.rpcServer) - - return nil -} - -func (s *Service) Echo(ctx context.Context, message string) { - peers := filterPeers(s.host.Peerstore().Peers(), s.ignored) - replies := make([]*Envelope, len(peers)) - - // Send message to all peers - errs := s.rpcClient.MultiCall( - newCtxsN(ctx, len(peers)), - peers, - EchoService, - EchoServiceFuncEcho, - Envelope{Message: message}, - copyEnvelopesToIfaces(replies), - ) - - // Check responses from peers - for i, err := range errs { - if err != nil { - logger.Error("Error calling Echo", "peer", peers[i].String(), "error", err) - } else { - logger.Info("Echoed", "peer", peers[i].String(), "message", replies[i].Message) - } - } -} - -func (s *Service) ReceiveEcho(e Envelope) Envelope { - msg := fmt.Sprintf("Peer %s echoing: %s", s.host.ID(), e.Message) - s.listenCh <- msg - - return Envelope{Message: msg} -} - -func newCtxsN(ctx context.Context, n int) []context.Context { - ctxs := make([]context.Context, 0, n) - for range n { - ctxs = append(ctxs, ctx) - } - - return ctxs -} - -func copyEnvelopesToIfaces(in []*Envelope) []interface{} { - ifaces := make([]interface{}, len(in)) - - for i := range in { - in[i] = &Envelope{} - ifaces[i] = in[i] - } - - return ifaces -} - -type peerMap map[peer.ID]struct{} - -func newPeerMap(peers peer.IDSlice) peerMap { - peerMap := peerMap{} - for _, peer := range peers { - peerMap[peer] = struct{}{} - } - - return peerMap -} - -func filterPeers(peers peer.IDSlice, ignored peerMap) peer.IDSlice { - var filtered peer.IDSlice - - for _, p := range peers { - if _, exists := ignored[p]; !exists { - filtered = append(filtered, p) - } - } - - return filtered -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package mockrpc + +import ( + "context" + "fmt" + "time" + + "github.com/agntcy/dir/utils/logging" + rpc "github.com/libp2p/go-libp2p-gorpc" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" +) + +const ( + EchoService = "EchoRPCAPI" + EchoServiceFuncEcho = "Echo" +) + +var logger = logging.Logger("mockrpc") + +type EchoRPCAPI struct { + service *Service +} + +type Envelope struct { + Message string +} + +func (r *EchoRPCAPI) Echo(_ context.Context, in Envelope, out *Envelope) error { + *out = r.service.ReceiveEcho(in) + + return nil +} + +type Service struct { + rpcServer *rpc.Server + rpcClient *rpc.Client + host host.Host + protocol protocol.ID + listenCh chan<- string + ignored peerMap +} + +func Start(ctx context.Context, host host.Host, protocol protocol.ID, listenCh chan<- string, ignored []peer.AddrInfo) error { + service := &Service{ + host: host, + protocol: protocol, + listenCh: listenCh, + ignored: newPeerMap(append(peer.AddrInfosToIDs(ignored), host.ID())), + } + + err := service.SetupRPC() + if err != nil { + return err + } + + // send dummy message + go service.StartMessaging(ctx) + + return nil +} + +func (s *Service) StartMessaging(ctx context.Context) { + ticker := time.NewTicker(time.Second * 1) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + s.Echo(ctx, "Message: Hello from "+s.host.ID().String()) + } + } +} + +func (s *Service) SetupRPC() error { + echoRPCAPI := EchoRPCAPI{service: s} + + s.rpcServer = rpc.NewServer(s.host, s.protocol) + + err := s.rpcServer.Register(&echoRPCAPI) + if err != nil { + return err //nolint:wrapcheck + } + + s.rpcClient = rpc.NewClientWithServer(s.host, s.protocol, s.rpcServer) + + return nil +} + +func (s *Service) Echo(ctx context.Context, message string) { + peers := filterPeers(s.host.Peerstore().Peers(), s.ignored) + replies := make([]*Envelope, len(peers)) + + // Send message to all peers + errs := s.rpcClient.MultiCall( + newCtxsN(ctx, len(peers)), + peers, + EchoService, + EchoServiceFuncEcho, + Envelope{Message: message}, + copyEnvelopesToIfaces(replies), + ) + + // Check responses from peers + for i, err := range errs { + if err != nil { + logger.Error("Error calling Echo", "peer", peers[i].String(), "error", err) + } else { + logger.Info("Echoed", "peer", peers[i].String(), "message", replies[i].Message) + } + } +} + +func (s *Service) ReceiveEcho(e Envelope) Envelope { + msg := fmt.Sprintf("Peer %s echoing: %s", s.host.ID(), e.Message) + s.listenCh <- msg + + return Envelope{Message: msg} +} + +func newCtxsN(ctx context.Context, n int) []context.Context { + ctxs := make([]context.Context, 0, n) + for range n { + ctxs = append(ctxs, ctx) + } + + return ctxs +} + +func copyEnvelopesToIfaces(in []*Envelope) []interface{} { + ifaces := make([]interface{}, len(in)) + + for i := range in { + in[i] = &Envelope{} + ifaces[i] = in[i] + } + + return ifaces +} + +type peerMap map[peer.ID]struct{} + +func newPeerMap(peers peer.IDSlice) peerMap { + peerMap := peerMap{} + for _, peer := range peers { + peerMap[peer] = struct{}{} + } + + return peerMap +} + +func filterPeers(peers peer.IDSlice, ignored peerMap) peer.IDSlice { + var filtered peer.IDSlice + + for _, p := range peers { + if _, exists := ignored[p]; !exists { + filtered = append(filtered, p) + } + } + + return filtered +} diff --git a/server/routing/internal/p2p/mockstream/stream.go b/server/routing/internal/p2p/mockstream/stream.go index 265085039..e26a24bff 100644 --- a/server/routing/internal/p2p/mockstream/stream.go +++ b/server/routing/internal/p2p/mockstream/stream.go @@ -1,77 +1,77 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package mockstream - -import ( - "bufio" - "context" - - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/protocol" -) - -func HandleStream(ctx context.Context, listenCh chan<- string) func(s network.Stream) { - return func(s network.Stream) { - // Create a buffer stream for non-blocking read and write. - rw := bufio.NewReadWriter(bufio.NewReader(s), bufio.NewWriter(s)) - - go readData(ctx, rw, listenCh, s.Close) - go writeData(ctx, rw) - - go func() { - <-ctx.Done() - s.Close() - }() - } -} - -func StartDataStream(ctx context.Context, h host.Host, protoc string, listenCh chan<- string) { - for { - select { - case <-ctx.Done(): - return - - default: - for _, p := range h.Peerstore().Peers() { - s, err := h.NewStream(ctx, p, protocol.ID(protoc)) - if err != nil { - continue - } - - rw := bufio.NewReadWriter(bufio.NewReader(s), bufio.NewWriter(s)) - go readData(ctx, rw, listenCh, s.Close) - go writeData(ctx, rw) - } - } - } -} - -func readData(ctx context.Context, rw *bufio.ReadWriter, listenCh chan<- string, closeFn func() error) { - for { - select { - case <-ctx.Done(): - return - default: - str, _ := rw.ReadString('\n') - listenCh <- str - - _ = closeFn() - - return - } - } -} - -func writeData(ctx context.Context, rw *bufio.ReadWriter) { - for { - select { - case <-ctx.Done(): - return - default: - _, _ = rw.WriteString("hello world\n") - _ = rw.Flush() - } - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package mockstream + +import ( + "bufio" + "context" + + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/protocol" +) + +func HandleStream(ctx context.Context, listenCh chan<- string) func(s network.Stream) { + return func(s network.Stream) { + // Create a buffer stream for non-blocking read and write. + rw := bufio.NewReadWriter(bufio.NewReader(s), bufio.NewWriter(s)) + + go readData(ctx, rw, listenCh, s.Close) + go writeData(ctx, rw) + + go func() { + <-ctx.Done() + s.Close() + }() + } +} + +func StartDataStream(ctx context.Context, h host.Host, protoc string, listenCh chan<- string) { + for { + select { + case <-ctx.Done(): + return + + default: + for _, p := range h.Peerstore().Peers() { + s, err := h.NewStream(ctx, p, protocol.ID(protoc)) + if err != nil { + continue + } + + rw := bufio.NewReadWriter(bufio.NewReader(s), bufio.NewWriter(s)) + go readData(ctx, rw, listenCh, s.Close) + go writeData(ctx, rw) + } + } + } +} + +func readData(ctx context.Context, rw *bufio.ReadWriter, listenCh chan<- string, closeFn func() error) { + for { + select { + case <-ctx.Done(): + return + default: + str, _ := rw.ReadString('\n') + listenCh <- str + + _ = closeFn() + + return + } + } +} + +func writeData(ctx context.Context, rw *bufio.ReadWriter) { + for { + select { + case <-ctx.Done(): + return + default: + _, _ = rw.WriteString("hello world\n") + _ = rw.Flush() + } + } +} diff --git a/server/routing/internal/p2p/options.go b/server/routing/internal/p2p/options.go index 1dbc67998..ffa58b67e 100644 --- a/server/routing/internal/p2p/options.go +++ b/server/routing/internal/p2p/options.go @@ -1,184 +1,184 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package p2p - -import ( - "crypto/ed25519" - "crypto/rand" - "errors" - "fmt" - "os" - "time" - - dht "github.com/libp2p/go-libp2p-kad-dht" - "github.com/libp2p/go-libp2p-kad-dht/providers" - "github.com/libp2p/go-libp2p/core/crypto" - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/peer" - "golang.org/x/crypto/ssh" -) - -type APIRegistrer func(host.Host) error - -type options struct { - Key crypto.PrivKey - ListenAddress string - DirectoryAPIAddress string - BootstrapPeers []peer.AddrInfo - RefreshInterval time.Duration - Randevous string - APIRegistrer APIRegistrer - ProviderStore providers.ProviderStore - DHTCustomOpts func(host.Host) ([]dht.Option, error) -} - -type Option func(*options) error - -func WithRandevous(randevous string) Option { - return func(opts *options) error { - opts.Randevous = randevous - - return nil - } -} - -func WithIdentityKey(key crypto.PrivKey) Option { - return func(opts *options) error { - opts.Key = key - - return nil - } -} - -func WithIdentityKeyPath(keyPath string) Option { - return func(opts *options) error { - // If path is not set, skip - if keyPath == "" { - return nil - } - - // Read data - keyBytes, err := os.ReadFile(keyPath) - if err != nil { - return fmt.Errorf("failed to read key: %w", err) - } - - // Parse the private key - key, err := ssh.ParseRawPrivateKey(keyBytes) - if err != nil { - return fmt.Errorf("failed to parse private key: %w", err) - } - - // Try to convert to ED25519 private key - ed25519Key, ok := key.(ed25519.PrivateKey) - if !ok { - return errors.New("key is not an ED25519 private key") - } - - // Generate random key - generatedKey, err := crypto.UnmarshalEd25519PrivateKey(ed25519Key) - if err != nil { - return fmt.Errorf("failed to unmarshal identity key: %w", err) - } - - // set key - opts.Key = generatedKey - - return nil - } -} - -func WithListenAddress(addr string) Option { - return func(opts *options) error { - opts.ListenAddress = addr - - return nil - } -} - -func WithDirectoryAPIAddress(addr string) Option { - return func(opts *options) error { - opts.DirectoryAPIAddress = addr - - return nil - } -} - -func WithBootstrapAddrs(addrs []string) Option { - return func(opts *options) error { - peerInfos := make([]peer.AddrInfo, len(addrs)) - - for i, addr := range addrs { - peerinfo, err := peer.AddrInfoFromString(addr) - if err != nil { - return fmt.Errorf("invalid bootstrap addr: %w", err) - } - - peerInfos[i] = *peerinfo - } - - opts.BootstrapPeers = peerInfos - - return nil - } -} - -func WithBootstrapPeers(peers []peer.AddrInfo) Option { - return func(opts *options) error { - opts.BootstrapPeers = peers - - return nil - } -} - -func WithRefreshInterval(period time.Duration) Option { - return func(opts *options) error { - opts.RefreshInterval = period - - return nil - } -} - -// API can only be registreded for non-bootstrap nodes. -func WithAPIRegistrer(reg APIRegistrer) Option { - return func(opts *options) error { - opts.APIRegistrer = reg - - return nil - } -} - -// WithCustomDHTOpts sets custom config for DHT. -// NOTE: this is app-specific, be careful when using! -func WithCustomDHTOpts(dhtOptFactory func(host.Host) ([]dht.Option, error)) Option { - return func(opts *options) error { - opts.DHTCustomOpts = dhtOptFactory - - return nil - } -} - -func withRandomIdentity() Option { - return func(opts *options) error { - // Do not generate random identity if we already have the key - if opts.Key != nil { - return nil - } - - // Generate random key - generatedKey, _, err := crypto.GenerateKeyPairWithReader( - crypto.Ed25519, // Select your key type. Ed25519 are nice short - -1, // Select key length when possible (i.e. RSA). - rand.Reader, // Always generate a random ID - ) - if err != nil { - return fmt.Errorf("failed to create identity key: %w", err) - } - - // set key - opts.Key = generatedKey - - return nil - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package p2p + +import ( + "crypto/ed25519" + "crypto/rand" + "errors" + "fmt" + "os" + "time" + + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p-kad-dht/providers" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "golang.org/x/crypto/ssh" +) + +type APIRegistrer func(host.Host) error + +type options struct { + Key crypto.PrivKey + ListenAddress string + DirectoryAPIAddress string + BootstrapPeers []peer.AddrInfo + RefreshInterval time.Duration + Randevous string + APIRegistrer APIRegistrer + ProviderStore providers.ProviderStore + DHTCustomOpts func(host.Host) ([]dht.Option, error) +} + +type Option func(*options) error + +func WithRandevous(randevous string) Option { + return func(opts *options) error { + opts.Randevous = randevous + + return nil + } +} + +func WithIdentityKey(key crypto.PrivKey) Option { + return func(opts *options) error { + opts.Key = key + + return nil + } +} + +func WithIdentityKeyPath(keyPath string) Option { + return func(opts *options) error { + // If path is not set, skip + if keyPath == "" { + return nil + } + + // Read data + keyBytes, err := os.ReadFile(keyPath) + if err != nil { + return fmt.Errorf("failed to read key: %w", err) + } + + // Parse the private key + key, err := ssh.ParseRawPrivateKey(keyBytes) + if err != nil { + return fmt.Errorf("failed to parse private key: %w", err) + } + + // Try to convert to ED25519 private key + ed25519Key, ok := key.(ed25519.PrivateKey) + if !ok { + return errors.New("key is not an ED25519 private key") + } + + // Generate random key + generatedKey, err := crypto.UnmarshalEd25519PrivateKey(ed25519Key) + if err != nil { + return fmt.Errorf("failed to unmarshal identity key: %w", err) + } + + // set key + opts.Key = generatedKey + + return nil + } +} + +func WithListenAddress(addr string) Option { + return func(opts *options) error { + opts.ListenAddress = addr + + return nil + } +} + +func WithDirectoryAPIAddress(addr string) Option { + return func(opts *options) error { + opts.DirectoryAPIAddress = addr + + return nil + } +} + +func WithBootstrapAddrs(addrs []string) Option { + return func(opts *options) error { + peerInfos := make([]peer.AddrInfo, len(addrs)) + + for i, addr := range addrs { + peerinfo, err := peer.AddrInfoFromString(addr) + if err != nil { + return fmt.Errorf("invalid bootstrap addr: %w", err) + } + + peerInfos[i] = *peerinfo + } + + opts.BootstrapPeers = peerInfos + + return nil + } +} + +func WithBootstrapPeers(peers []peer.AddrInfo) Option { + return func(opts *options) error { + opts.BootstrapPeers = peers + + return nil + } +} + +func WithRefreshInterval(period time.Duration) Option { + return func(opts *options) error { + opts.RefreshInterval = period + + return nil + } +} + +// API can only be registreded for non-bootstrap nodes. +func WithAPIRegistrer(reg APIRegistrer) Option { + return func(opts *options) error { + opts.APIRegistrer = reg + + return nil + } +} + +// WithCustomDHTOpts sets custom config for DHT. +// NOTE: this is app-specific, be careful when using! +func WithCustomDHTOpts(dhtOptFactory func(host.Host) ([]dht.Option, error)) Option { + return func(opts *options) error { + opts.DHTCustomOpts = dhtOptFactory + + return nil + } +} + +func withRandomIdentity() Option { + return func(opts *options) error { + // Do not generate random identity if we already have the key + if opts.Key != nil { + return nil + } + + // Generate random key + generatedKey, _, err := crypto.GenerateKeyPairWithReader( + crypto.Ed25519, // Select your key type. Ed25519 are nice short + -1, // Select key length when possible (i.e. RSA). + rand.Reader, // Always generate a random ID + ) + if err != nil { + return fmt.Errorf("failed to create identity key: %w", err) + } + + // set key + opts.Key = generatedKey + + return nil + } +} diff --git a/server/routing/internal/p2p/server.go b/server/routing/internal/p2p/server.go index 75a848896..b4f0f3314 100644 --- a/server/routing/internal/p2p/server.go +++ b/server/routing/internal/p2p/server.go @@ -1,321 +1,321 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package p2p - -import ( - "context" - "fmt" - - "github.com/agntcy/dir/utils/logging" - dht "github.com/libp2p/go-libp2p-kad-dht" - "github.com/libp2p/go-libp2p/core/crypto" - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/p2p/discovery/mdns" - discovery "github.com/libp2p/go-libp2p/p2p/discovery/routing" - "github.com/libp2p/go-libp2p/p2p/host/autorelay" -) - -var logger = logging.Logger("p2p") - -type Server struct { - opts *options - host host.Host - dht *dht.IpfsDHT - closeFn func() -} - -// New constructs a new p2p server. -func New(ctx context.Context, opts ...Option) (*Server, error) { - logger.Debug("Creating new p2p server", "opts", opts) - - // Load options - options := &options{} - for _, opt := range append(opts, withRandomIdentity()) { - if err := opt(options); err != nil { - return nil, err - } - } - - // Start in the background. - // Wait for ready status message before returning. - status := <-start(ctx, options) - if status.Err != nil { - return nil, fmt.Errorf("failed while starting services: %w", status.Err) - } - - server := &Server{ - opts: options, - host: status.Host, - dht: status.DHT, - closeFn: status.Close, - } - - logger.Debug("P2P server created", "host", server.host.ID(), "addresses", server.P2pAddrs()) - - return server, nil -} - -// Info returns the addresses at which we can reach this server. -func (s *Server) Info() *peer.AddrInfo { - return &peer.AddrInfo{ - ID: s.host.ID(), - Addrs: s.host.Addrs(), - } -} - -// Returns p2p specific addresses as addrinfos. -func (s *Server) P2pInfo() []peer.AddrInfo { - var p2pInfos []peer.AddrInfo //nolint:prealloc - - for _, addr := range s.P2pAddrs() { - p2pInfo, _ := peer.AddrInfoFromString(addr) - p2pInfos = append(p2pInfos, *p2pInfo) - } - - return p2pInfos -} - -// Returns p2p specific addresses as strings. -func (s *Server) P2pAddrs() []string { - var p2pAddrs []string //nolint:prealloc - for _, addr := range s.host.Addrs() { - p2pAddrs = append(p2pAddrs, fmt.Sprintf("%s/p2p/%s", addr.String(), s.host.ID().String())) - } - - return p2pAddrs -} - -func (s *Server) Host() host.Host { - return s.host -} - -func (s *Server) DHT() *dht.IpfsDHT { - return s.dht -} - -func (s *Server) Key() crypto.PrivKey { - return s.host.Peerstore().PrivKey(s.host.ID()) -} - -// Close stops running services. -func (s *Server) Close() { - s.closeFn() -} - -type status struct { - Err error - Host host.Host - DHT *dht.IpfsDHT - Close func() -} - -// start starts all routing related services. -// This function runs until ctx is closed. -// -// TODO: maybe limit how long we should wait for status channel -// via contexts. -func start(ctx context.Context, opts *options) <-chan status { - statusCh := make(chan status) - - go func() { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // Create host - host, err := newHost(opts.ListenAddress, opts.DirectoryAPIAddress, opts.Key) - if err != nil { - statusCh <- status{Err: err} - - return - } - - defer host.Close() - - logger.Debug("Host created", "id", host.ID(), "addresses", host.Addrs()) - - // Enable mDNS for local network peer discovery - setupMDNS(host) - - // Create DHT - var customDhtOpts []dht.Option - if opts.DHTCustomOpts != nil { - customDhtOpts, err = opts.DHTCustomOpts(host) - if err != nil { - statusCh <- status{Err: err} - - return - } - } - - kdht, err := newDHT(ctx, host, opts.BootstrapPeers, opts.RefreshInterval, customDhtOpts...) - if err != nil { - statusCh <- status{Err: err} - - return - } - defer kdht.Close() - - // Enable AutoRelay with DHT as peer source for finding relay candidates. - // AutoRelay makes NAT'd peers reachable by establishing relay circuits. - // The DHT routing table is queried to find potential relay peers. - if err := setupAutoRelay(host, kdht); err != nil { - logger.Warn("Failed to setup AutoRelay", "error", err) - } - - // Advertise to rendezvous for initial peer discovery. - // Peer discovery is now handled automatically by: - // - DHT: Bootstrap() connects to bootstrap peers at startup - // - DHT: RoutingTableRefreshPeriod() maintains routing table (every 30s) - // - GossipSub: Mesh maintenance with peer exchange (if enabled) - // - Connection Manager: Maintains healthy connection count (50-200) - // - // The custom discover() polling loop has been removed as it was redundant - // with DHT's built-in peer discovery and caused excessive polling (60/min). - if opts.Randevous != "" { - routingDiscovery := discovery.NewRoutingDiscovery(kdht) - - _, err := routingDiscovery.Advertise(ctx, opts.Randevous) - if err != nil { - logger.Warn("Failed to advertise to rendezvous", - "rendezvous", opts.Randevous, - "error", err) - } else { - logger.Info("Advertised to rendezvous (discovery handled by DHT)", - "rendezvous", opts.Randevous) - } - } - // Register services. Only available on non-bootstrap nodes. - if opts.APIRegistrer != nil && len(opts.BootstrapPeers) > 0 { - err := opts.APIRegistrer(host) - if err != nil { - statusCh <- status{Err: err} - - return - } - } - - // Run until context expiry - logger.Debug("Host and DHT created, running routing services", "host", host.ID(), "addresses", host.Addrs()) - - for _, peer := range opts.BootstrapPeers { - for _, addr := range peer.Addrs { - host.Peerstore().AddAddr(peer.ID, addr, 0) - } - } - - <-kdht.RefreshRoutingTable() - - // At this point, we are done. - // Notify listener that we are ready. - statusCh <- status{ - Host: host, - DHT: kdht, - Close: func() { - cancel() - host.Close() - kdht.Close() - }, - } - - // Wait for context to close - <-ctx.Done() - }() - - return statusCh -} - -// setupAutoRelay enables AutoRelay with DHT as the peer source for finding relay candidates. -// AutoRelay provides guaranteed connectivity for NAT'd peers by establishing relay circuits. -// The DHT routing table is used to discover potential relay peers (public nodes). -func setupAutoRelay(h host.Host, kdht *dht.IpfsDHT) error { - // Create a peer source function that queries DHT for relay candidates - peerSource := func(ctx context.Context, numPeers int) <-chan peer.AddrInfo { - peerChan := make(chan peer.AddrInfo) - - go func() { - defer close(peerChan) - - // Get peers from DHT routing table - // These are likely good relay candidates (public, well-connected) - routingTable := kdht.RoutingTable() - peers := routingTable.ListPeers() - - // Send up to numPeers candidates - count := 0 - for _, p := range peers { - if count >= numPeers { - break - } - - // Get peer's address info from peerstore - addrs := h.Peerstore().Addrs(p) - if len(addrs) > 0 { - select { - case peerChan <- peer.AddrInfo{ID: p, Addrs: addrs}: - count++ - case <-ctx.Done(): - return - } - } - } - - logger.Debug("Provided relay candidates from DHT", - "requested", numPeers, - "provided", count) - }() - - return peerChan - } - - // Enable AutoRelay with DHT-based peer source - _, err := autorelay.NewAutoRelay(h, autorelay.WithPeerSource(peerSource)) - if err != nil { - return fmt.Errorf("failed to enable AutoRelay: %w", err) - } - - logger.Info("AutoRelay enabled with DHT peer source") - - return nil -} - -// mdnsNotifee handles mDNS peer discovery events. -type mdnsNotifee struct { - host host.Host -} - -// HandlePeerFound is called when mDNS discovers a peer on the local network. -func (n *mdnsNotifee) HandlePeerFound(pi peer.AddrInfo) { - // Connect to discovered local peer - if err := n.host.Connect(context.Background(), pi); err != nil { - logger.Debug("Failed to connect to mDNS discovered peer", - "peer", pi.ID, - "error", err) - - return - } - - logger.Info("Connected to local peer via mDNS", - "peer", pi.ID, - "addrs", pi.Addrs) -} - -// setupMDNS enables mDNS discovery for local network peers. -// Peers on the same LAN will discover each other in < 1 second without bootstrap nodes. -// This is useful for development, testing, and enterprise LAN deployments. -func setupMDNS(h host.Host) { - notifee := &mdnsNotifee{host: h} - - service := mdns.NewMdnsService(h, MDNSServiceName, notifee) - if err := service.Start(); err != nil { - logger.Warn("Failed to start mDNS discovery", - "service", MDNSServiceName, - "error", err) - - return - } - - logger.Info("mDNS local discovery enabled", - "service", MDNSServiceName) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package p2p + +import ( + "context" + "fmt" + + "github.com/agntcy/dir/utils/logging" + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/p2p/discovery/mdns" + discovery "github.com/libp2p/go-libp2p/p2p/discovery/routing" + "github.com/libp2p/go-libp2p/p2p/host/autorelay" +) + +var logger = logging.Logger("p2p") + +type Server struct { + opts *options + host host.Host + dht *dht.IpfsDHT + closeFn func() +} + +// New constructs a new p2p server. +func New(ctx context.Context, opts ...Option) (*Server, error) { + logger.Debug("Creating new p2p server", "opts", opts) + + // Load options + options := &options{} + for _, opt := range append(opts, withRandomIdentity()) { + if err := opt(options); err != nil { + return nil, err + } + } + + // Start in the background. + // Wait for ready status message before returning. + status := <-start(ctx, options) + if status.Err != nil { + return nil, fmt.Errorf("failed while starting services: %w", status.Err) + } + + server := &Server{ + opts: options, + host: status.Host, + dht: status.DHT, + closeFn: status.Close, + } + + logger.Debug("P2P server created", "host", server.host.ID(), "addresses", server.P2pAddrs()) + + return server, nil +} + +// Info returns the addresses at which we can reach this server. +func (s *Server) Info() *peer.AddrInfo { + return &peer.AddrInfo{ + ID: s.host.ID(), + Addrs: s.host.Addrs(), + } +} + +// Returns p2p specific addresses as addrinfos. +func (s *Server) P2pInfo() []peer.AddrInfo { + var p2pInfos []peer.AddrInfo //nolint:prealloc + + for _, addr := range s.P2pAddrs() { + p2pInfo, _ := peer.AddrInfoFromString(addr) + p2pInfos = append(p2pInfos, *p2pInfo) + } + + return p2pInfos +} + +// Returns p2p specific addresses as strings. +func (s *Server) P2pAddrs() []string { + var p2pAddrs []string //nolint:prealloc + for _, addr := range s.host.Addrs() { + p2pAddrs = append(p2pAddrs, fmt.Sprintf("%s/p2p/%s", addr.String(), s.host.ID().String())) + } + + return p2pAddrs +} + +func (s *Server) Host() host.Host { + return s.host +} + +func (s *Server) DHT() *dht.IpfsDHT { + return s.dht +} + +func (s *Server) Key() crypto.PrivKey { + return s.host.Peerstore().PrivKey(s.host.ID()) +} + +// Close stops running services. +func (s *Server) Close() { + s.closeFn() +} + +type status struct { + Err error + Host host.Host + DHT *dht.IpfsDHT + Close func() +} + +// start starts all routing related services. +// This function runs until ctx is closed. +// +// TODO: maybe limit how long we should wait for status channel +// via contexts. +func start(ctx context.Context, opts *options) <-chan status { + statusCh := make(chan status) + + go func() { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Create host + host, err := newHost(opts.ListenAddress, opts.DirectoryAPIAddress, opts.Key) + if err != nil { + statusCh <- status{Err: err} + + return + } + + defer host.Close() + + logger.Debug("Host created", "id", host.ID(), "addresses", host.Addrs()) + + // Enable mDNS for local network peer discovery + setupMDNS(host) + + // Create DHT + var customDhtOpts []dht.Option + if opts.DHTCustomOpts != nil { + customDhtOpts, err = opts.DHTCustomOpts(host) + if err != nil { + statusCh <- status{Err: err} + + return + } + } + + kdht, err := newDHT(ctx, host, opts.BootstrapPeers, opts.RefreshInterval, customDhtOpts...) + if err != nil { + statusCh <- status{Err: err} + + return + } + defer kdht.Close() + + // Enable AutoRelay with DHT as peer source for finding relay candidates. + // AutoRelay makes NAT'd peers reachable by establishing relay circuits. + // The DHT routing table is queried to find potential relay peers. + if err := setupAutoRelay(host, kdht); err != nil { + logger.Warn("Failed to setup AutoRelay", "error", err) + } + + // Advertise to rendezvous for initial peer discovery. + // Peer discovery is now handled automatically by: + // - DHT: Bootstrap() connects to bootstrap peers at startup + // - DHT: RoutingTableRefreshPeriod() maintains routing table (every 30s) + // - GossipSub: Mesh maintenance with peer exchange (if enabled) + // - Connection Manager: Maintains healthy connection count (50-200) + // + // The custom discover() polling loop has been removed as it was redundant + // with DHT's built-in peer discovery and caused excessive polling (60/min). + if opts.Randevous != "" { + routingDiscovery := discovery.NewRoutingDiscovery(kdht) + + _, err := routingDiscovery.Advertise(ctx, opts.Randevous) + if err != nil { + logger.Warn("Failed to advertise to rendezvous", + "rendezvous", opts.Randevous, + "error", err) + } else { + logger.Info("Advertised to rendezvous (discovery handled by DHT)", + "rendezvous", opts.Randevous) + } + } + // Register services. Only available on non-bootstrap nodes. + if opts.APIRegistrer != nil && len(opts.BootstrapPeers) > 0 { + err := opts.APIRegistrer(host) + if err != nil { + statusCh <- status{Err: err} + + return + } + } + + // Run until context expiry + logger.Debug("Host and DHT created, running routing services", "host", host.ID(), "addresses", host.Addrs()) + + for _, peer := range opts.BootstrapPeers { + for _, addr := range peer.Addrs { + host.Peerstore().AddAddr(peer.ID, addr, 0) + } + } + + <-kdht.RefreshRoutingTable() + + // At this point, we are done. + // Notify listener that we are ready. + statusCh <- status{ + Host: host, + DHT: kdht, + Close: func() { + cancel() + host.Close() + kdht.Close() + }, + } + + // Wait for context to close + <-ctx.Done() + }() + + return statusCh +} + +// setupAutoRelay enables AutoRelay with DHT as the peer source for finding relay candidates. +// AutoRelay provides guaranteed connectivity for NAT'd peers by establishing relay circuits. +// The DHT routing table is used to discover potential relay peers (public nodes). +func setupAutoRelay(h host.Host, kdht *dht.IpfsDHT) error { + // Create a peer source function that queries DHT for relay candidates + peerSource := func(ctx context.Context, numPeers int) <-chan peer.AddrInfo { + peerChan := make(chan peer.AddrInfo) + + go func() { + defer close(peerChan) + + // Get peers from DHT routing table + // These are likely good relay candidates (public, well-connected) + routingTable := kdht.RoutingTable() + peers := routingTable.ListPeers() + + // Send up to numPeers candidates + count := 0 + for _, p := range peers { + if count >= numPeers { + break + } + + // Get peer's address info from peerstore + addrs := h.Peerstore().Addrs(p) + if len(addrs) > 0 { + select { + case peerChan <- peer.AddrInfo{ID: p, Addrs: addrs}: + count++ + case <-ctx.Done(): + return + } + } + } + + logger.Debug("Provided relay candidates from DHT", + "requested", numPeers, + "provided", count) + }() + + return peerChan + } + + // Enable AutoRelay with DHT-based peer source + _, err := autorelay.NewAutoRelay(h, autorelay.WithPeerSource(peerSource)) + if err != nil { + return fmt.Errorf("failed to enable AutoRelay: %w", err) + } + + logger.Info("AutoRelay enabled with DHT peer source") + + return nil +} + +// mdnsNotifee handles mDNS peer discovery events. +type mdnsNotifee struct { + host host.Host +} + +// HandlePeerFound is called when mDNS discovers a peer on the local network. +func (n *mdnsNotifee) HandlePeerFound(pi peer.AddrInfo) { + // Connect to discovered local peer + if err := n.host.Connect(context.Background(), pi); err != nil { + logger.Debug("Failed to connect to mDNS discovered peer", + "peer", pi.ID, + "error", err) + + return + } + + logger.Info("Connected to local peer via mDNS", + "peer", pi.ID, + "addrs", pi.Addrs) +} + +// setupMDNS enables mDNS discovery for local network peers. +// Peers on the same LAN will discover each other in < 1 second without bootstrap nodes. +// This is useful for development, testing, and enterprise LAN deployments. +func setupMDNS(h host.Host) { + notifee := &mdnsNotifee{host: h} + + service := mdns.NewMdnsService(h, MDNSServiceName, notifee) + if err := service.Start(); err != nil { + logger.Warn("Failed to start mDNS discovery", + "service", MDNSServiceName, + "error", err) + + return + } + + logger.Info("mDNS local discovery enabled", + "service", MDNSServiceName) +} diff --git a/server/routing/label_utils.go b/server/routing/label_utils.go index 757ff4aca..667d739aa 100644 --- a/server/routing/label_utils.go +++ b/server/routing/label_utils.go @@ -1,87 +1,87 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package routing - -import ( - "errors" - "fmt" - "strings" - - "github.com/agntcy/dir/server/types" -) - -// Key manipulation utilities for routing operations. -// These functions handle the enhanced label key format: /namespace/value/CID/PeerID - -// Example: Label("/skills/AI/ML") → "/skills/AI/ML/CID123/Peer1". -func BuildEnhancedLabelKey(label types.Label, cid, peerID string) string { - return fmt.Sprintf("%s/%s/%s", label.String(), cid, peerID) -} - -// Example: "/skills/AI/ML/CID123/Peer1" → (Label("/skills/AI/ML"), "CID123", "Peer1", nil). -func ParseEnhancedLabelKey(key string) (types.Label, string, string, error) { - labelStr, cid, peerID, err := parseEnhancedLabelKeyInternal(key) - if err != nil { - return types.Label(""), "", "", err - } - - return types.Label(labelStr), cid, peerID, nil -} - -// parseEnhancedLabelKeyInternal contains the actual parsing logic. -// This is used internally by ParseEnhancedLabelKey. -func parseEnhancedLabelKeyInternal(key string) (string, string, string, error) { - if !strings.HasPrefix(key, "/") { - return "", "", "", errors.New("key must start with /") - } - - parts := strings.Split(key, "/") - if len(parts) < types.MinLabelKeyParts { - return "", "", "", errors.New("key must have at least namespace/path/CID/PeerID") - } - - // Extract PeerID (last part) and CID (second to last part) - peerID := parts[len(parts)-1] - cid := parts[len(parts)-2] - - // Extract label (everything except the last two parts) - labelParts := parts[1 : len(parts)-2] // Skip empty first part and last two parts - label := "/" + strings.Join(labelParts, "/") - - return label, cid, peerID, nil -} - -// ExtractPeerIDFromKey extracts just the PeerID from a self-descriptive key. -func ExtractPeerIDFromKey(key string) string { - parts := strings.Split(key, "/") - if len(parts) < types.MinLabelKeyParts { - return "" - } - - return parts[len(parts)-1] -} - -// IsValidLabelKey checks if a key starts with any valid label type prefix. -// Returns true if the key starts with /skills/, /domains/, /features/, or /locators/. -func IsValidLabelKey(key string) bool { - for _, labelType := range types.AllLabelTypes() { - if strings.HasPrefix(key, labelType.Prefix()) { - return true - } - } - - return false -} - -// GetLabelTypeFromKey extracts the label type from a key. -// Returns the label type and true if found, or LabelTypeUnknown and false if not found. -func GetLabelTypeFromKey(key string) (types.LabelType, bool) { - for _, labelType := range types.AllLabelTypes() { - if strings.HasPrefix(key, labelType.Prefix()) { - return labelType, true - } - } - - return types.LabelTypeUnknown, false -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package routing + +import ( + "errors" + "fmt" + "strings" + + "github.com/agntcy/dir/server/types" +) + +// Key manipulation utilities for routing operations. +// These functions handle the enhanced label key format: /namespace/value/CID/PeerID + +// Example: Label("/skills/AI/ML") → "/skills/AI/ML/CID123/Peer1". +func BuildEnhancedLabelKey(label types.Label, cid, peerID string) string { + return fmt.Sprintf("%s/%s/%s", label.String(), cid, peerID) +} + +// Example: "/skills/AI/ML/CID123/Peer1" → (Label("/skills/AI/ML"), "CID123", "Peer1", nil). +func ParseEnhancedLabelKey(key string) (types.Label, string, string, error) { + labelStr, cid, peerID, err := parseEnhancedLabelKeyInternal(key) + if err != nil { + return types.Label(""), "", "", err + } + + return types.Label(labelStr), cid, peerID, nil +} + +// parseEnhancedLabelKeyInternal contains the actual parsing logic. +// This is used internally by ParseEnhancedLabelKey. +func parseEnhancedLabelKeyInternal(key string) (string, string, string, error) { + if !strings.HasPrefix(key, "/") { + return "", "", "", errors.New("key must start with /") + } + + parts := strings.Split(key, "/") + if len(parts) < types.MinLabelKeyParts { + return "", "", "", errors.New("key must have at least namespace/path/CID/PeerID") + } + + // Extract PeerID (last part) and CID (second to last part) + peerID := parts[len(parts)-1] + cid := parts[len(parts)-2] + + // Extract label (everything except the last two parts) + labelParts := parts[1 : len(parts)-2] // Skip empty first part and last two parts + label := "/" + strings.Join(labelParts, "/") + + return label, cid, peerID, nil +} + +// ExtractPeerIDFromKey extracts just the PeerID from a self-descriptive key. +func ExtractPeerIDFromKey(key string) string { + parts := strings.Split(key, "/") + if len(parts) < types.MinLabelKeyParts { + return "" + } + + return parts[len(parts)-1] +} + +// IsValidLabelKey checks if a key starts with any valid label type prefix. +// Returns true if the key starts with /skills/, /domains/, /features/, or /locators/. +func IsValidLabelKey(key string) bool { + for _, labelType := range types.AllLabelTypes() { + if strings.HasPrefix(key, labelType.Prefix()) { + return true + } + } + + return false +} + +// GetLabelTypeFromKey extracts the label type from a key. +// Returns the label type and true if found, or LabelTypeUnknown and false if not found. +func GetLabelTypeFromKey(key string) (types.LabelType, bool) { + for _, labelType := range types.AllLabelTypes() { + if strings.HasPrefix(key, labelType.Prefix()) { + return labelType, true + } + } + + return types.LabelTypeUnknown, false +} diff --git a/server/routing/label_utils_test.go b/server/routing/label_utils_test.go index fa818cf7b..79766a019 100644 --- a/server/routing/label_utils_test.go +++ b/server/routing/label_utils_test.go @@ -1,424 +1,424 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package routing - -import ( - "testing" - - "github.com/agntcy/dir/server/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestBuildEnhancedLabelKey(t *testing.T) { - testCases := []struct { - name string - label types.Label - cid string - peerID string - expected string - }{ - { - name: "skill_label", - label: types.Label("/skills/AI/ML"), - cid: "CID123", - peerID: "Peer1", - expected: "/skills/AI/ML/CID123/Peer1", - }, - { - name: "domain_label", - label: types.Label("/domains/research"), - cid: "CID456", - peerID: "Peer2", - expected: "/domains/research/CID456/Peer2", - }, - { - name: "module_label", - label: types.Label("/modules/runtime/model"), - cid: "CID789", - peerID: "Peer3", - expected: "/modules/runtime/model/CID789/Peer3", - }, - { - name: "locator_label", - label: types.Label("/locators/docker-image"), - cid: "CID999", - peerID: "Peer4", - expected: "/locators/docker-image/CID999/Peer4", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := BuildEnhancedLabelKey(tc.label, tc.cid, tc.peerID) - assert.Equal(t, tc.expected, result) - }) - } -} - -func TestParseEnhancedLabelKey(t *testing.T) { - testCases := []struct { - name string - key string - expectedLabel types.Label - expectedCID string - expectedPeer string - expectError bool - errorMsg string - }{ - { - name: "valid_skill_key", - key: "/skills/AI/ML/CID123/Peer1", - expectedLabel: types.Label("/skills/AI/ML"), - expectedCID: "CID123", - expectedPeer: "Peer1", - expectError: false, - }, - { - name: "valid_domain_key", - key: "/domains/research/healthcare/CID456/Peer2", - expectedLabel: types.Label("/domains/research/healthcare"), - expectedCID: "CID456", - expectedPeer: "Peer2", - expectError: false, - }, - { - name: "valid_module_key", - key: "/modules/runtime/model/CID789/Peer3", - expectedLabel: types.Label("/modules/runtime/model"), - expectedCID: "CID789", - expectedPeer: "Peer3", - expectError: false, - }, - { - name: "invalid_no_leading_slash", - key: "skills/AI/ML/CID123/Peer1", - expectError: true, - errorMsg: "key must start with /", - }, - { - name: "invalid_too_few_parts", - key: "/skills/AI", - expectError: true, - errorMsg: "key must have at least namespace/path/CID/PeerID", - }, - { - name: "minimal_valid_key", - key: "/skills/AI/CID123/Peer1", - expectedLabel: types.Label("/skills/AI"), - expectedCID: "CID123", - expectedPeer: "Peer1", - expectError: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - label, cid, peerID, err := ParseEnhancedLabelKey(tc.key) - - if tc.expectError { - require.Error(t, err) - assert.Contains(t, err.Error(), tc.errorMsg) - assert.Equal(t, types.Label(""), label) - assert.Empty(t, cid) - assert.Empty(t, peerID) - } else { - require.NoError(t, err) - assert.Equal(t, tc.expectedLabel, label) - assert.Equal(t, tc.expectedCID, cid) - assert.Equal(t, tc.expectedPeer, peerID) - } - }) - } -} - -func TestExtractPeerIDFromKey(t *testing.T) { - testCases := []struct { - name string - key string - expectedPeer string - }{ - { - name: "valid_key", - key: "/skills/AI/ML/CID123/Peer1", - expectedPeer: "Peer1", - }, - { - name: "complex_label", - key: "/domains/research/healthcare/informatics/CID456/Peer2", - expectedPeer: "Peer2", - }, - { - name: "too_few_parts", - key: "/skills/AI", - expectedPeer: "", - }, - { - name: "empty_key", - key: "", - expectedPeer: "", - }, - { - name: "single_slash", - key: "/", - expectedPeer: "", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := ExtractPeerIDFromKey(tc.key) - assert.Equal(t, tc.expectedPeer, result) - }) - } -} - -func TestIsValidLabelKey(t *testing.T) { - testCases := []struct { - name string - key string - expected bool - }{ - // Valid keys - { - name: "valid_skill_key", - key: "/skills/AI/ML/CID123/Peer1", - expected: true, - }, - { - name: "valid_domain_key", - key: "/domains/research/CID123/Peer1", - expected: true, - }, - { - name: "valid_module_key", - key: "/modules/runtime/CID123/Peer1", - expected: true, - }, - { - name: "valid_locator_key", - key: "/locators/docker-image/CID123/Peer1", - expected: true, - }, - // Invalid keys - { - name: "invalid_namespace", - key: "/invalid/test/CID123/Peer1", - expected: false, - }, - { - name: "records_namespace", - key: "/records/CID123", - expected: false, - }, - { - name: "no_leading_slash", - key: "skills/AI/CID123/Peer1", - expected: false, - }, - { - name: "empty_key", - key: "", - expected: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := IsValidLabelKey(tc.key) - assert.Equal(t, tc.expected, result) - }) - } -} - -func TestGetLabelTypeFromKey(t *testing.T) { - testCases := []struct { - name string - key string - expectedType types.LabelType - expectedOK bool - }{ - { - name: "skill_key", - key: "/skills/AI/ML/CID123/Peer1", - expectedType: types.LabelTypeSkill, - expectedOK: true, - }, - { - name: "domain_key", - key: "/domains/research/CID123/Peer1", - expectedType: types.LabelTypeDomain, - expectedOK: true, - }, - { - name: "module_key", - key: "/modules/runtime/CID123/Peer1", - expectedType: types.LabelTypeModule, - expectedOK: true, - }, - { - name: "locator_key", - key: "/locators/docker-image/CID123/Peer1", - expectedType: types.LabelTypeLocator, - expectedOK: true, - }, - { - name: "invalid_key", - key: "/invalid/test/CID123/Peer1", - expectedType: types.LabelTypeUnknown, - expectedOK: false, - }, - { - name: "records_key", - key: "/records/CID123", - expectedType: types.LabelTypeUnknown, - expectedOK: false, - }, - { - name: "empty_key", - key: "", - expectedType: types.LabelTypeUnknown, - expectedOK: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - labelType, ok := GetLabelTypeFromKey(tc.key) - assert.Equal(t, tc.expectedType, labelType) - assert.Equal(t, tc.expectedOK, ok) - }) - } -} - -func TestParseEnhancedLabelKeyInternal(t *testing.T) { - testCases := []struct { - name string - key string - expectedLabel string - expectedCID string - expectedPeer string - expectError bool - errorMsg string - }{ - { - name: "valid_simple_key", - key: "/skills/AI/CID123/Peer1", - expectedLabel: "/skills/AI", - expectedCID: "CID123", - expectedPeer: "Peer1", - expectError: false, - }, - { - name: "valid_complex_key", - key: "/modules/runtime/model/security/CID456/Peer2", - expectedLabel: "/modules/runtime/model/security", - expectedCID: "CID456", - expectedPeer: "Peer2", - expectError: false, - }, - { - name: "no_leading_slash", - key: "skills/AI/CID123/Peer1", - expectError: true, - errorMsg: "key must start with /", - }, - { - name: "too_few_parts", - key: "/skills/AI", - expectError: true, - errorMsg: "key must have at least namespace/path/CID/PeerID", - }, - { - name: "exactly_min_parts", - key: "/skills/AI/CID123/Peer1", - expectedLabel: "/skills/AI", - expectedCID: "CID123", - expectedPeer: "Peer1", - expectError: false, - }, - { - name: "empty_key", - key: "", - expectError: true, - errorMsg: "key must start with /", - }, - { - name: "only_slash", - key: "/", - expectError: true, - errorMsg: "key must have at least namespace/path/CID/PeerID", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - label, cid, peerID, err := parseEnhancedLabelKeyInternal(tc.key) - - if tc.expectError { - require.Error(t, err) - assert.Contains(t, err.Error(), tc.errorMsg) - assert.Empty(t, label) - assert.Empty(t, cid) - assert.Empty(t, peerID) - } else { - require.NoError(t, err) - assert.Equal(t, tc.expectedLabel, label) - assert.Equal(t, tc.expectedCID, cid) - assert.Equal(t, tc.expectedPeer, peerID) - } - }) - } -} - -func TestParseEnhancedLabelKey_RoundTrip(t *testing.T) { - // Test that BuildEnhancedLabelKey and ParseEnhancedLabelKey are inverse operations - testCases := []struct { - label types.Label - cid string - peerID string - }{ - {types.Label("/skills/AI/ML"), "CID123", "Peer1"}, - {types.Label("/domains/research"), "CID456", "Peer2"}, - {types.Label("/modules/runtime/model/security"), "CID789", "Peer3"}, - {types.Label("/locators/docker-image"), "CID999", "Peer4"}, - } - - for _, tc := range testCases { - t.Run(tc.label.String(), func(t *testing.T) { - // Build key - key := BuildEnhancedLabelKey(tc.label, tc.cid, tc.peerID) - - // Parse it back - parsedLabel, parsedCID, parsedPeer, err := ParseEnhancedLabelKey(key) - - require.NoError(t, err) - assert.Equal(t, tc.label, parsedLabel) - assert.Equal(t, tc.cid, parsedCID) - assert.Equal(t, tc.peerID, parsedPeer) - }) - } -} - -func BenchmarkBuildEnhancedLabelKey(b *testing.B) { - label := types.Label("/skills/AI/ML") - cid := "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku" - peerID := "12D3KooWBhvJH9k6u7S5Q8Z8u7S5Q8Z8u7S5Q8Z8u7S5Q8Z8u7S5Q8" - - b.ResetTimer() - - for range b.N { - _ = BuildEnhancedLabelKey(label, cid, peerID) - } -} - -func BenchmarkParseEnhancedLabelKey(b *testing.B) { - key := "/skills/AI/ML/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/12D3KooWBhvJH9k6u7S5Q8Z8u7S5Q8Z8u7S5Q8Z8u7S5Q8Z8u7S5Q8" - - b.ResetTimer() - - for range b.N { - _, _, _, _ = ParseEnhancedLabelKey(key) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package routing + +import ( + "testing" + + "github.com/agntcy/dir/server/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBuildEnhancedLabelKey(t *testing.T) { + testCases := []struct { + name string + label types.Label + cid string + peerID string + expected string + }{ + { + name: "skill_label", + label: types.Label("/skills/AI/ML"), + cid: "CID123", + peerID: "Peer1", + expected: "/skills/AI/ML/CID123/Peer1", + }, + { + name: "domain_label", + label: types.Label("/domains/research"), + cid: "CID456", + peerID: "Peer2", + expected: "/domains/research/CID456/Peer2", + }, + { + name: "module_label", + label: types.Label("/modules/runtime/model"), + cid: "CID789", + peerID: "Peer3", + expected: "/modules/runtime/model/CID789/Peer3", + }, + { + name: "locator_label", + label: types.Label("/locators/docker-image"), + cid: "CID999", + peerID: "Peer4", + expected: "/locators/docker-image/CID999/Peer4", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := BuildEnhancedLabelKey(tc.label, tc.cid, tc.peerID) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestParseEnhancedLabelKey(t *testing.T) { + testCases := []struct { + name string + key string + expectedLabel types.Label + expectedCID string + expectedPeer string + expectError bool + errorMsg string + }{ + { + name: "valid_skill_key", + key: "/skills/AI/ML/CID123/Peer1", + expectedLabel: types.Label("/skills/AI/ML"), + expectedCID: "CID123", + expectedPeer: "Peer1", + expectError: false, + }, + { + name: "valid_domain_key", + key: "/domains/research/healthcare/CID456/Peer2", + expectedLabel: types.Label("/domains/research/healthcare"), + expectedCID: "CID456", + expectedPeer: "Peer2", + expectError: false, + }, + { + name: "valid_module_key", + key: "/modules/runtime/model/CID789/Peer3", + expectedLabel: types.Label("/modules/runtime/model"), + expectedCID: "CID789", + expectedPeer: "Peer3", + expectError: false, + }, + { + name: "invalid_no_leading_slash", + key: "skills/AI/ML/CID123/Peer1", + expectError: true, + errorMsg: "key must start with /", + }, + { + name: "invalid_too_few_parts", + key: "/skills/AI", + expectError: true, + errorMsg: "key must have at least namespace/path/CID/PeerID", + }, + { + name: "minimal_valid_key", + key: "/skills/AI/CID123/Peer1", + expectedLabel: types.Label("/skills/AI"), + expectedCID: "CID123", + expectedPeer: "Peer1", + expectError: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + label, cid, peerID, err := ParseEnhancedLabelKey(tc.key) + + if tc.expectError { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.errorMsg) + assert.Equal(t, types.Label(""), label) + assert.Empty(t, cid) + assert.Empty(t, peerID) + } else { + require.NoError(t, err) + assert.Equal(t, tc.expectedLabel, label) + assert.Equal(t, tc.expectedCID, cid) + assert.Equal(t, tc.expectedPeer, peerID) + } + }) + } +} + +func TestExtractPeerIDFromKey(t *testing.T) { + testCases := []struct { + name string + key string + expectedPeer string + }{ + { + name: "valid_key", + key: "/skills/AI/ML/CID123/Peer1", + expectedPeer: "Peer1", + }, + { + name: "complex_label", + key: "/domains/research/healthcare/informatics/CID456/Peer2", + expectedPeer: "Peer2", + }, + { + name: "too_few_parts", + key: "/skills/AI", + expectedPeer: "", + }, + { + name: "empty_key", + key: "", + expectedPeer: "", + }, + { + name: "single_slash", + key: "/", + expectedPeer: "", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := ExtractPeerIDFromKey(tc.key) + assert.Equal(t, tc.expectedPeer, result) + }) + } +} + +func TestIsValidLabelKey(t *testing.T) { + testCases := []struct { + name string + key string + expected bool + }{ + // Valid keys + { + name: "valid_skill_key", + key: "/skills/AI/ML/CID123/Peer1", + expected: true, + }, + { + name: "valid_domain_key", + key: "/domains/research/CID123/Peer1", + expected: true, + }, + { + name: "valid_module_key", + key: "/modules/runtime/CID123/Peer1", + expected: true, + }, + { + name: "valid_locator_key", + key: "/locators/docker-image/CID123/Peer1", + expected: true, + }, + // Invalid keys + { + name: "invalid_namespace", + key: "/invalid/test/CID123/Peer1", + expected: false, + }, + { + name: "records_namespace", + key: "/records/CID123", + expected: false, + }, + { + name: "no_leading_slash", + key: "skills/AI/CID123/Peer1", + expected: false, + }, + { + name: "empty_key", + key: "", + expected: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := IsValidLabelKey(tc.key) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestGetLabelTypeFromKey(t *testing.T) { + testCases := []struct { + name string + key string + expectedType types.LabelType + expectedOK bool + }{ + { + name: "skill_key", + key: "/skills/AI/ML/CID123/Peer1", + expectedType: types.LabelTypeSkill, + expectedOK: true, + }, + { + name: "domain_key", + key: "/domains/research/CID123/Peer1", + expectedType: types.LabelTypeDomain, + expectedOK: true, + }, + { + name: "module_key", + key: "/modules/runtime/CID123/Peer1", + expectedType: types.LabelTypeModule, + expectedOK: true, + }, + { + name: "locator_key", + key: "/locators/docker-image/CID123/Peer1", + expectedType: types.LabelTypeLocator, + expectedOK: true, + }, + { + name: "invalid_key", + key: "/invalid/test/CID123/Peer1", + expectedType: types.LabelTypeUnknown, + expectedOK: false, + }, + { + name: "records_key", + key: "/records/CID123", + expectedType: types.LabelTypeUnknown, + expectedOK: false, + }, + { + name: "empty_key", + key: "", + expectedType: types.LabelTypeUnknown, + expectedOK: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + labelType, ok := GetLabelTypeFromKey(tc.key) + assert.Equal(t, tc.expectedType, labelType) + assert.Equal(t, tc.expectedOK, ok) + }) + } +} + +func TestParseEnhancedLabelKeyInternal(t *testing.T) { + testCases := []struct { + name string + key string + expectedLabel string + expectedCID string + expectedPeer string + expectError bool + errorMsg string + }{ + { + name: "valid_simple_key", + key: "/skills/AI/CID123/Peer1", + expectedLabel: "/skills/AI", + expectedCID: "CID123", + expectedPeer: "Peer1", + expectError: false, + }, + { + name: "valid_complex_key", + key: "/modules/runtime/model/security/CID456/Peer2", + expectedLabel: "/modules/runtime/model/security", + expectedCID: "CID456", + expectedPeer: "Peer2", + expectError: false, + }, + { + name: "no_leading_slash", + key: "skills/AI/CID123/Peer1", + expectError: true, + errorMsg: "key must start with /", + }, + { + name: "too_few_parts", + key: "/skills/AI", + expectError: true, + errorMsg: "key must have at least namespace/path/CID/PeerID", + }, + { + name: "exactly_min_parts", + key: "/skills/AI/CID123/Peer1", + expectedLabel: "/skills/AI", + expectedCID: "CID123", + expectedPeer: "Peer1", + expectError: false, + }, + { + name: "empty_key", + key: "", + expectError: true, + errorMsg: "key must start with /", + }, + { + name: "only_slash", + key: "/", + expectError: true, + errorMsg: "key must have at least namespace/path/CID/PeerID", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + label, cid, peerID, err := parseEnhancedLabelKeyInternal(tc.key) + + if tc.expectError { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.errorMsg) + assert.Empty(t, label) + assert.Empty(t, cid) + assert.Empty(t, peerID) + } else { + require.NoError(t, err) + assert.Equal(t, tc.expectedLabel, label) + assert.Equal(t, tc.expectedCID, cid) + assert.Equal(t, tc.expectedPeer, peerID) + } + }) + } +} + +func TestParseEnhancedLabelKey_RoundTrip(t *testing.T) { + // Test that BuildEnhancedLabelKey and ParseEnhancedLabelKey are inverse operations + testCases := []struct { + label types.Label + cid string + peerID string + }{ + {types.Label("/skills/AI/ML"), "CID123", "Peer1"}, + {types.Label("/domains/research"), "CID456", "Peer2"}, + {types.Label("/modules/runtime/model/security"), "CID789", "Peer3"}, + {types.Label("/locators/docker-image"), "CID999", "Peer4"}, + } + + for _, tc := range testCases { + t.Run(tc.label.String(), func(t *testing.T) { + // Build key + key := BuildEnhancedLabelKey(tc.label, tc.cid, tc.peerID) + + // Parse it back + parsedLabel, parsedCID, parsedPeer, err := ParseEnhancedLabelKey(key) + + require.NoError(t, err) + assert.Equal(t, tc.label, parsedLabel) + assert.Equal(t, tc.cid, parsedCID) + assert.Equal(t, tc.peerID, parsedPeer) + }) + } +} + +func BenchmarkBuildEnhancedLabelKey(b *testing.B) { + label := types.Label("/skills/AI/ML") + cid := "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku" + peerID := "12D3KooWBhvJH9k6u7S5Q8Z8u7S5Q8Z8u7S5Q8Z8u7S5Q8Z8u7S5Q8" + + b.ResetTimer() + + for range b.N { + _ = BuildEnhancedLabelKey(label, cid, peerID) + } +} + +func BenchmarkParseEnhancedLabelKey(b *testing.B) { + key := "/skills/AI/ML/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/12D3KooWBhvJH9k6u7S5Q8Z8u7S5Q8Z8u7S5Q8Z8u7S5Q8Z8u7S5Q8" + + b.ResetTimer() + + for range b.N { + _, _, _, _ = ParseEnhancedLabelKey(key) + } +} diff --git a/server/routing/metrics.go b/server/routing/metrics.go index c06be11f7..8c9e9cd94 100644 --- a/server/routing/metrics.go +++ b/server/routing/metrics.go @@ -1,117 +1,117 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Package routing provides label frequency metrics for operational monitoring. -// -// The Metrics system tracks how many records are associated with each label -// (skills, domains, features) on the local peer. This data is persisted to -// the datastore and can be used for: -// -// - Operational monitoring and dashboards -// - Debugging label distribution issues -// - Future query optimization features -// - Administrative APIs and tooling -// -// Metrics are automatically maintained during Publish/Unpublish operations -// and stored at the "/metrics" datastore key in JSON format. -package routing - -import ( - "context" - "encoding/json" - "errors" - "fmt" - - "github.com/agntcy/dir/server/types" - "github.com/ipfs/go-datastore" -) - -// LabelMetric represents the frequency count for a specific label. -type LabelMetric struct { - Name string `json:"name"` // Full label name (e.g., "/skills/AI/ML", "/domains/research") - Total uint64 `json:"total"` // Number of local records that have this label -} - -// Metrics tracks label frequency distribution for operational monitoring. -// This provides visibility into what types of records this peer is providing -// and can be used for debugging, monitoring, and future optimization features. -type Metrics struct { - Data map[string]LabelMetric `json:"data"` // Map of label name → frequency count -} - -func (m *Metrics) increment(label types.Label) { - labelStr := label.String() - if _, ok := m.Data[labelStr]; !ok { - m.Data[labelStr] = LabelMetric{ - Name: labelStr, - Total: 0, - } - } - - m.Data[labelStr] = LabelMetric{ - Name: labelStr, - Total: m.Data[labelStr].Total + 1, - } -} - -func (m *Metrics) decrement(label types.Label) { - labelStr := label.String() - if _, ok := m.Data[labelStr]; !ok { - return - } - - currentTotal := m.Data[labelStr].Total - if currentTotal > 0 { - m.Data[labelStr] = LabelMetric{ - Name: labelStr, - Total: currentTotal - 1, - } - } - - // Remove the label from the map if the total is zero. - if m.Data[labelStr].Total == 0 { - delete(m.Data, labelStr) - } -} - -// NOTE: counts() method removed as it's no longer used in the new List API -// The new ListResponse doesn't include label_counts field for simplicity - -// NOTE: labels() method removed as it's no longer used in the new List API -// The new List API doesn't return peer statistics for empty requests - -func (m *Metrics) update(ctx context.Context, dstore types.Datastore) error { - data, err := json.Marshal(m) - if err != nil { - return fmt.Errorf("failed to marshal metrics data: %w", err) - } - - err = dstore.Put(ctx, datastore.NewKey("/metrics"), data) - if err != nil { - return fmt.Errorf("failed to update metrics data: %w", err) - } - - return nil -} - -func loadMetrics(ctx context.Context, dstore types.Datastore) (*Metrics, error) { - // Fetch metrics data - data, err := dstore.Get(ctx, datastore.NewKey("/metrics")) - if err != nil { - if errors.Is(err, datastore.ErrNotFound) { - return &Metrics{ - Data: make(map[string]LabelMetric), - }, nil - } - - return nil, fmt.Errorf("failed to update metrics data: %w", err) - } - - // Parse existing metrics data - var metrics Metrics - if err := json.Unmarshal(data, &metrics); err != nil { - return nil, fmt.Errorf("failed to unmarshal metrics data: %w", err) - } - - return &metrics, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Package routing provides label frequency metrics for operational monitoring. +// +// The Metrics system tracks how many records are associated with each label +// (skills, domains, features) on the local peer. This data is persisted to +// the datastore and can be used for: +// +// - Operational monitoring and dashboards +// - Debugging label distribution issues +// - Future query optimization features +// - Administrative APIs and tooling +// +// Metrics are automatically maintained during Publish/Unpublish operations +// and stored at the "/metrics" datastore key in JSON format. +package routing + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + "github.com/agntcy/dir/server/types" + "github.com/ipfs/go-datastore" +) + +// LabelMetric represents the frequency count for a specific label. +type LabelMetric struct { + Name string `json:"name"` // Full label name (e.g., "/skills/AI/ML", "/domains/research") + Total uint64 `json:"total"` // Number of local records that have this label +} + +// Metrics tracks label frequency distribution for operational monitoring. +// This provides visibility into what types of records this peer is providing +// and can be used for debugging, monitoring, and future optimization features. +type Metrics struct { + Data map[string]LabelMetric `json:"data"` // Map of label name → frequency count +} + +func (m *Metrics) increment(label types.Label) { + labelStr := label.String() + if _, ok := m.Data[labelStr]; !ok { + m.Data[labelStr] = LabelMetric{ + Name: labelStr, + Total: 0, + } + } + + m.Data[labelStr] = LabelMetric{ + Name: labelStr, + Total: m.Data[labelStr].Total + 1, + } +} + +func (m *Metrics) decrement(label types.Label) { + labelStr := label.String() + if _, ok := m.Data[labelStr]; !ok { + return + } + + currentTotal := m.Data[labelStr].Total + if currentTotal > 0 { + m.Data[labelStr] = LabelMetric{ + Name: labelStr, + Total: currentTotal - 1, + } + } + + // Remove the label from the map if the total is zero. + if m.Data[labelStr].Total == 0 { + delete(m.Data, labelStr) + } +} + +// NOTE: counts() method removed as it's no longer used in the new List API +// The new ListResponse doesn't include label_counts field for simplicity + +// NOTE: labels() method removed as it's no longer used in the new List API +// The new List API doesn't return peer statistics for empty requests + +func (m *Metrics) update(ctx context.Context, dstore types.Datastore) error { + data, err := json.Marshal(m) + if err != nil { + return fmt.Errorf("failed to marshal metrics data: %w", err) + } + + err = dstore.Put(ctx, datastore.NewKey("/metrics"), data) + if err != nil { + return fmt.Errorf("failed to update metrics data: %w", err) + } + + return nil +} + +func loadMetrics(ctx context.Context, dstore types.Datastore) (*Metrics, error) { + // Fetch metrics data + data, err := dstore.Get(ctx, datastore.NewKey("/metrics")) + if err != nil { + if errors.Is(err, datastore.ErrNotFound) { + return &Metrics{ + Data: make(map[string]LabelMetric), + }, nil + } + + return nil, fmt.Errorf("failed to update metrics data: %w", err) + } + + // Parse existing metrics data + var metrics Metrics + if err := json.Unmarshal(data, &metrics); err != nil { + return nil, fmt.Errorf("failed to unmarshal metrics data: %w", err) + } + + return &metrics, nil +} diff --git a/server/routing/pubsub/constants.go b/server/routing/pubsub/constants.go index 1244f230d..bcd3c1fd5 100644 --- a/server/routing/pubsub/constants.go +++ b/server/routing/pubsub/constants.go @@ -1,32 +1,32 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package pubsub - -// Protocol constants for GossipSub label announcements. -// These values are INTENTIONALLY NOT CONFIGURABLE to ensure network-wide compatibility. -// All peers must use the same values to communicate properly. -// -// Rationale: -// - Different topics → peers can't discover each other's labels -// - Different message sizes → messages may be rejected -// - Different label limits → validation inconsistencies -// -// If protocol changes are needed, increment the topic version (e.g., "dir/labels/v2") -// and coordinate the upgrade across all peers. -const ( - // TopicLabels is the GossipSub topic for label announcements. - // All peers must subscribe to the same topic to communicate. - // Versioned to allow future protocol changes (e.g., "dir/labels/v2"). - TopicLabels = "dir/labels/v1" - - // MaxMessageSize is the maximum size of label announcement messages. - // This prevents abuse and ensures all peers can process messages. - // 10KB allows ~100 labels with reasonable overhead. - MaxMessageSize = 10 * 1024 // 10KB - - // MaxLabelsPerAnnouncement is the maximum number of labels per announcement. - // This prevents abuse from malicious peers. - // 100 labels is generous for typical records. - MaxLabelsPerAnnouncement = 100 -) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package pubsub + +// Protocol constants for GossipSub label announcements. +// These values are INTENTIONALLY NOT CONFIGURABLE to ensure network-wide compatibility. +// All peers must use the same values to communicate properly. +// +// Rationale: +// - Different topics → peers can't discover each other's labels +// - Different message sizes → messages may be rejected +// - Different label limits → validation inconsistencies +// +// If protocol changes are needed, increment the topic version (e.g., "dir/labels/v2") +// and coordinate the upgrade across all peers. +const ( + // TopicLabels is the GossipSub topic for label announcements. + // All peers must subscribe to the same topic to communicate. + // Versioned to allow future protocol changes (e.g., "dir/labels/v2"). + TopicLabels = "dir/labels/v1" + + // MaxMessageSize is the maximum size of label announcement messages. + // This prevents abuse and ensures all peers can process messages. + // 10KB allows ~100 labels with reasonable overhead. + MaxMessageSize = 10 * 1024 // 10KB + + // MaxLabelsPerAnnouncement is the maximum number of labels per announcement. + // This prevents abuse from malicious peers. + // 100 labels is generous for typical records. + MaxLabelsPerAnnouncement = 100 +) diff --git a/server/routing/pubsub/events.go b/server/routing/pubsub/events.go index c1137cc2b..f39a6a82a 100644 --- a/server/routing/pubsub/events.go +++ b/server/routing/pubsub/events.go @@ -1,134 +1,134 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package pubsub - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "time" - - "github.com/agntcy/dir/server/types" -) - -// PublishEventHandler is a callback function type for handling record publication events. -// This is used for dependency injection to allow components to trigger publishing -// operations without creating circular dependencies. -// -// The handler should: -// - Accept a types.Record interface (caller must wrap concrete types with adapters) -// - Announce the record to DHT -// - Publish the record's labels via GossipSub -// - Handle errors appropriately -// -// Example usage: -// -// // In routing_remote.go: -// cleanupManager := NewCleanupManager(..., routeAPI.Publish) -// -// // In cleanup_tasks.go: -// type CleanupManager struct { -// publishFunc pubsub.PublishEventHandler -// } -type PublishEventHandler func(context.Context, types.Record) error - -// RecordPublishEvent is the wire format for record publication announcements via GossipSub. -// This is a minimal structure optimized for network efficiency. -// -// Protocol parameters: See constants.go for TopicLabels, MaxMessageSize, etc. -// These are intentionally NOT configurable to ensure network-wide compatibility. -// -// Security Note: -// - PeerID is NOT included in the wire format to prevent spoofing -// - Instead, the authenticated sender (msg.ReceivedFrom) is passed separately to handlers -// - This ensures only cryptographically verified peer IDs are used for storage -// -// Conversion to storage format: -// - Wire: RecordPublishEvent with []string labels -// - Handler receives: authenticated PeerID from libp2p transport -// - Storage: Enhanced keys (/skills/AI/CID/PeerID) with types.LabelMetadata -// -// Example wire format: -// -// { -// "cid": "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi", -// "labels": ["/skills/AI/ML", "/domains/research", "/modules/tensorflow"], -// "timestamp": "2025-10-01T10:00:00Z" -// } -type RecordPublishEvent struct { - // CID is the content identifier of the record. - // This uniquely identifies the record being announced. - CID string `json:"cid"` - - // Labels is the list of label strings extracted from the record. - // Format: namespace-prefixed paths (e.g., "/skills/AI/ML") - // These will be converted to types.Label type upon receipt. - Labels []string `json:"labels"` - - // Timestamp is when this announcement was created. - // This becomes the types.LabelMetadata.Timestamp field. - Timestamp time.Time `json:"timestamp"` -} - -// Validate checks if the event is well-formed and safe to process. -// This prevents malformed or malicious events from being processed. -// -// Note: PeerID validation is intentionally omitted as it's provided -// separately by the authenticated libp2p transport layer (msg.ReceivedFrom). -func (e *RecordPublishEvent) Validate() error { - if e.CID == "" { - return errors.New("missing CID") - } - - if len(e.Labels) == 0 { - return errors.New("no labels provided") - } - - if len(e.Labels) > MaxLabelsPerAnnouncement { - return errors.New("too many labels") - } - - if e.Timestamp.IsZero() { - return errors.New("missing timestamp") - } - - return nil -} - -// Marshal serializes the event to JSON for network transmission. -func (e *RecordPublishEvent) Marshal() ([]byte, error) { - data, err := json.Marshal(e) - if err != nil { - return nil, fmt.Errorf("failed to marshal record publish event: %w", err) - } - - // Validate size to prevent oversized messages - if len(data) > MaxMessageSize { - return nil, errors.New("event exceeds maximum size") - } - - return data, nil -} - -// UnmarshalRecordPublishEvent deserializes and validates a record publish event. -// This is the entry point for processing received GossipSub messages. -func UnmarshalRecordPublishEvent(data []byte) (*RecordPublishEvent, error) { - // Check size before unmarshaling to prevent resource exhaustion - if len(data) > MaxMessageSize { - return nil, errors.New("event exceeds maximum size") - } - - var event RecordPublishEvent - if err := json.Unmarshal(data, &event); err != nil { - return nil, fmt.Errorf("failed to unmarshal record publish event: %w", err) - } - - // Validate after unmarshaling to ensure well-formed data - if err := event.Validate(); err != nil { - return nil, err - } - - return &event, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package pubsub + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/agntcy/dir/server/types" +) + +// PublishEventHandler is a callback function type for handling record publication events. +// This is used for dependency injection to allow components to trigger publishing +// operations without creating circular dependencies. +// +// The handler should: +// - Accept a types.Record interface (caller must wrap concrete types with adapters) +// - Announce the record to DHT +// - Publish the record's labels via GossipSub +// - Handle errors appropriately +// +// Example usage: +// +// // In routing_remote.go: +// cleanupManager := NewCleanupManager(..., routeAPI.Publish) +// +// // In cleanup_tasks.go: +// type CleanupManager struct { +// publishFunc pubsub.PublishEventHandler +// } +type PublishEventHandler func(context.Context, types.Record) error + +// RecordPublishEvent is the wire format for record publication announcements via GossipSub. +// This is a minimal structure optimized for network efficiency. +// +// Protocol parameters: See constants.go for TopicLabels, MaxMessageSize, etc. +// These are intentionally NOT configurable to ensure network-wide compatibility. +// +// Security Note: +// - PeerID is NOT included in the wire format to prevent spoofing +// - Instead, the authenticated sender (msg.ReceivedFrom) is passed separately to handlers +// - This ensures only cryptographically verified peer IDs are used for storage +// +// Conversion to storage format: +// - Wire: RecordPublishEvent with []string labels +// - Handler receives: authenticated PeerID from libp2p transport +// - Storage: Enhanced keys (/skills/AI/CID/PeerID) with types.LabelMetadata +// +// Example wire format: +// +// { +// "cid": "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi", +// "labels": ["/skills/AI/ML", "/domains/research", "/modules/tensorflow"], +// "timestamp": "2025-10-01T10:00:00Z" +// } +type RecordPublishEvent struct { + // CID is the content identifier of the record. + // This uniquely identifies the record being announced. + CID string `json:"cid"` + + // Labels is the list of label strings extracted from the record. + // Format: namespace-prefixed paths (e.g., "/skills/AI/ML") + // These will be converted to types.Label type upon receipt. + Labels []string `json:"labels"` + + // Timestamp is when this announcement was created. + // This becomes the types.LabelMetadata.Timestamp field. + Timestamp time.Time `json:"timestamp"` +} + +// Validate checks if the event is well-formed and safe to process. +// This prevents malformed or malicious events from being processed. +// +// Note: PeerID validation is intentionally omitted as it's provided +// separately by the authenticated libp2p transport layer (msg.ReceivedFrom). +func (e *RecordPublishEvent) Validate() error { + if e.CID == "" { + return errors.New("missing CID") + } + + if len(e.Labels) == 0 { + return errors.New("no labels provided") + } + + if len(e.Labels) > MaxLabelsPerAnnouncement { + return errors.New("too many labels") + } + + if e.Timestamp.IsZero() { + return errors.New("missing timestamp") + } + + return nil +} + +// Marshal serializes the event to JSON for network transmission. +func (e *RecordPublishEvent) Marshal() ([]byte, error) { + data, err := json.Marshal(e) + if err != nil { + return nil, fmt.Errorf("failed to marshal record publish event: %w", err) + } + + // Validate size to prevent oversized messages + if len(data) > MaxMessageSize { + return nil, errors.New("event exceeds maximum size") + } + + return data, nil +} + +// UnmarshalRecordPublishEvent deserializes and validates a record publish event. +// This is the entry point for processing received GossipSub messages. +func UnmarshalRecordPublishEvent(data []byte) (*RecordPublishEvent, error) { + // Check size before unmarshaling to prevent resource exhaustion + if len(data) > MaxMessageSize { + return nil, errors.New("event exceeds maximum size") + } + + var event RecordPublishEvent + if err := json.Unmarshal(data, &event); err != nil { + return nil, fmt.Errorf("failed to unmarshal record publish event: %w", err) + } + + // Validate after unmarshaling to ensure well-formed data + if err := event.Validate(); err != nil { + return nil, err + } + + return &event, nil +} diff --git a/server/routing/pubsub/manager.go b/server/routing/pubsub/manager.go index c96acf4c8..ee4e53e48 100644 --- a/server/routing/pubsub/manager.go +++ b/server/routing/pubsub/manager.go @@ -1,375 +1,375 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package pubsub - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/agntcy/dir/server/routing/internal/p2p" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/utils/logging" - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/core/host" -) - -var logger = logging.Logger("routing/pubsub") - -// Manager handles GossipSub operations for label announcements. -// It provides efficient label propagation across the network without -// requiring peers to pull entire records. -// -// Architecture: -// - Publisher: Announces labels when storing records -// - Subscriber: Receives and caches labels from remote peers -// - Integration: Works alongside DHT for resilient discovery -// -// Performance: -// - Propagation: ~5-20ms (vs DHT's ~100-500ms) -// - Bandwidth: ~100B per announcement (vs KB-MB for full record pull) -// - Reach: ALL subscribed peers (vs DHT's k-closest peers) -type Manager struct { - ctx context.Context //nolint:containedctx // Needed for long-running message handler goroutine - host host.Host - pubsub *pubsub.PubSub - topic *pubsub.Topic - sub *pubsub.Subscription - localPeerID string - topicName string // Topic name (protocol constant) - - // Callback invoked when record publish event is received. - // Parameters: - // - context.Context: Operation context - // - string: Authenticated peer ID (from msg.ReceivedFrom, cryptographically verified) - // - *RecordPublishEvent: The announcement payload - onRecordPublishEvent func(context.Context, string, *RecordPublishEvent) -} - -// New creates a new GossipSub manager for label announcements. -// This initializes the GossipSub router, joins the labels topic, and -// starts the message handler goroutine. -// -// Protocol parameters (TopicLabels, MaxMessageSize) are defined in constants.go -// and are intentionally NOT configurable to ensure network-wide compatibility. -// -// Parameters: -// - ctx: Context for lifecycle management -// - h: libp2p host for network operations -// -// Returns: -// - *Manager: Initialized manager ready for use -// - error: If GossipSub setup fails -func New(ctx context.Context, h host.Host) (*Manager, error) { - // Create GossipSub with protocol-defined settings - ps, err := pubsub.NewGossipSub( - ctx, - h, - // Enable peer exchange for better peer discovery - pubsub.WithPeerExchange(true), - // Limit message size to protocol-defined maximum - pubsub.WithMaxMessageSize(MaxMessageSize), - ) - if err != nil { - return nil, fmt.Errorf("failed to create gossipsub: %w", err) - } - - // Join the protocol-defined topic - topic, err := ps.Join(TopicLabels) - if err != nil { - return nil, fmt.Errorf("failed to join labels topic %q: %w", TopicLabels, err) - } - - // Subscribe to receive label announcements - sub, err := topic.Subscribe() - if err != nil { - return nil, fmt.Errorf("failed to subscribe to labels topic %q: %w", TopicLabels, err) - } - - manager := &Manager{ - ctx: ctx, - host: h, - pubsub: ps, - topic: topic, - sub: sub, - localPeerID: h.ID().String(), - topicName: TopicLabels, - } - - // Start message handler goroutine - go manager.handleMessages() - - logger.Info("GossipSub manager initialized", - "topic", TopicLabels, - "maxMessageSize", MaxMessageSize, - "peerID", manager.localPeerID) - - return manager, nil -} - -// PublishRecord announces a record's labels to the network. -// This is called when a record is stored locally and should be -// discoverable by remote peers. -// -// Flow: -// 1. Extract CID and labels from record -// 2. Convert types.Label to wire format ([]string) -// 3. Create and validate RecordPublishEvent -// 4. Publish to GossipSub topic -// 5. GossipSub mesh propagates to all subscribed peers -// -// Parameters: -// - ctx: Context for operation timeout/cancellation -// - record: The record interface (caller must wrap concrete types with adapter) -// -// Returns: -// - error: If validation or publishing fails -// -// Note: This is non-blocking. GossipSub handles propagation asynchronously. -func (m *Manager) PublishRecord(ctx context.Context, record types.Record) error { - if record == nil { - return errors.New("record is nil") - } - - // Extract CID from record - cid := record.GetCid() - if cid == "" { - return errors.New("record has no CID") - } - - // Extract labels from record (uses shared label extraction logic) - labelList := types.GetLabelsFromRecord(record) - if len(labelList) == 0 { - // No labels to publish (not an error, just nothing to do) - logger.Debug("Record has no labels, skipping GossipSub announcement", "cid", cid) - - return nil - } - - // Convert types.Label to strings for wire format - labelStrings := make([]string, len(labelList)) - for i, label := range labelList { - labelStrings[i] = label.String() - } - - // Create announcement with current timestamp - // Note: PeerID is not included in the wire format - recipients use - // the authenticated msg.ReceivedFrom from libp2p transport layer - announcement := &RecordPublishEvent{ - CID: cid, - Labels: labelStrings, - Timestamp: time.Now(), - } - - // Validate before publishing to catch issues early - if err := announcement.Validate(); err != nil { - return fmt.Errorf("invalid announcement: %w", err) - } - - // Serialize to JSON - data, err := announcement.Marshal() - if err != nil { - return fmt.Errorf("failed to marshal announcement: %w", err) - } - - // Publish to GossipSub topic - if err := m.topic.Publish(ctx, data); err != nil { - return fmt.Errorf("failed to publish announcement: %w", err) - } - - logger.Info("Published record announcement", - "cid", cid, - "labels", len(labelList), - "topicPeers", len(m.topic.ListPeers()), - "size", len(data)) - - return nil -} - -// SetOnRecordPublishEvent sets the callback for received record publication events. -// This callback is invoked for each valid announcement received from remote peers. -// -// The callback receives: -// - ctx: Operation context -// - authenticatedPeerID: The peer's ID from msg.ReceivedFrom (cryptographically verified) -// - event: The announcement payload -// -// The callback should: -// - Convert wire format ([]string) to labels.Label -// - Build enhanced keys using BuildEnhancedLabelKey() with authenticatedPeerID -// - Store labels.LabelMetadata in datastore -// -// Security Note: Always use authenticatedPeerID (not any ID from the event payload) -// as it's verified by libp2p's cryptographic transport layer. -// -// Example: -// -// manager.SetOnRecordPublishEvent(func(ctx context.Context, authenticatedPeerID string, event *RecordPublishEvent) { -// for _, labelStr := range event.Labels { -// label := labels.Label(labelStr) -// key := BuildEnhancedLabelKey(label, event.CID, authenticatedPeerID) -// // ... store in datastore ... -// } -// }) -func (m *Manager) SetOnRecordPublishEvent(fn func(context.Context, string, *RecordPublishEvent)) { - m.onRecordPublishEvent = fn -} - -// handleMessages is the main message processing loop. -// It runs in a goroutine and processes all incoming label announcements. -// -// Flow: -// 1. Wait for next message from subscription -// 2. Skip own messages (already cached locally) -// 3. Unmarshal and validate announcement -// 4. Invoke callback for processing -// -// Error handling: -// - Context cancellation: Normal shutdown, exit loop -// - Subscription cancelled: Normal shutdown, exit loop -// - Invalid messages: Log warning, continue processing -// - Unmarshal errors: Log warning, continue processing -// -// This goroutine runs for the lifetime of the Manager. -func (m *Manager) handleMessages() { - for { - msg, err := m.sub.Next(m.ctx) - if err != nil { - // Check if context was cancelled (normal shutdown) - if m.ctx.Err() != nil { - logger.Debug("Message handler stopping", "reason", "context_cancelled") - - return - } - - // Check if subscription was cancelled (happens during shutdown) - // This prevents error spam during graceful shutdown - if errors.Is(err, context.Canceled) || err.Error() == "subscription cancelled" { - logger.Debug("Message handler stopping", "reason", "subscription_cancelled") - - return - } - - // Log error but continue processing - logger.Error("Error reading from labels topic", "error", err) - - continue - } - - // Skip our own messages (we already cached labels locally) - if msg.ReceivedFrom == m.host.ID() { - continue - } - - // Parse and validate announcement - announcement, err := UnmarshalRecordPublishEvent(msg.Data) - if err != nil { - logger.Warn("Received invalid label announcement", - "from", msg.ReceivedFrom, - "error", err, - "size", len(msg.Data)) - - continue - } - - // Extract authenticated peer ID from libp2p transport layer - // This is cryptographically verified and cannot be spoofed - authenticatedPeerID := msg.ReceivedFrom.String() - - logger.Debug("Received label announcement", - "from", authenticatedPeerID, - "cid", announcement.CID, - "labels", len(announcement.Labels)) - - // Invoke callback with authenticated peer ID - if m.onRecordPublishEvent != nil { - // Pass authenticated peer ID as separate parameter for security - m.onRecordPublishEvent(m.ctx, authenticatedPeerID, announcement) - } - } -} - -// GetTopicPeers returns the list of peers subscribed to the labels topic. -// This is useful for monitoring network connectivity and debugging. -// -// Returns: -// - []string: List of peer IDs (as strings) -func (m *Manager) GetTopicPeers() []string { - peers := m.topic.ListPeers() - peerIDs := make([]string, len(peers)) - - for i, p := range peers { - peerIDs[i] = p.String() - } - - return peerIDs -} - -// GetMeshPeerCount returns the number of peers in the GossipSub mesh. -// This is used for readiness checks to ensure the mesh is formed. -func (m *Manager) GetMeshPeerCount() int { - return len(m.topic.ListPeers()) -} - -// Close stops the GossipSub manager and releases resources. -// This should be called during shutdown to clean up gracefully. -// -// Flow: -// 1. Cancel subscription (stops handleMessages goroutine) -// 2. Leave topic -// 3. Release resources -// -// Returns: -// - error: If cleanup fails (rare) -func (m *Manager) Close() error { - m.sub.Cancel() - - if err := m.topic.Close(); err != nil { - return fmt.Errorf("failed to close gossipsub topic: %w", err) - } - - return nil -} - -// TagMeshPeers tags all current GossipSub mesh peers with high priority -// to prevent them from being pruned by the Connection Manager. -// -// Mesh peers are critical for fast label propagation (5-20ms delivery). -// If mesh peers are pruned, the mesh must rebuild, causing temporary -// degradation in GossipSub performance. -// -// This method should be called: -// - After GossipSub initialization -// - Periodically (e.g., every 30 seconds) as mesh changes -// - Or in response to mesh events (advanced) -// -// Priority: 50 points (high, but below bootstrap's 100) -// -// Safety: -// - Safe to call even if Connection Manager is nil (no-op) -// - Safe to call when mesh is empty (no-op) -// - Safe to call multiple times (re-tagging is harmless) -func (m *Manager) TagMeshPeers() { - if m == nil || m.host.ConnManager() == nil { - return // No-op if manager or connection manager not available - } - - peers := m.topic.ListPeers() - - if len(peers) == 0 { - logger.Debug("No mesh peers to tag") - - return - } - - for _, p := range peers { - m.host.ConnManager().TagPeer(p, "gossipsub-mesh", p2p.PeerPriorityGossipSubMesh) - } - - logger.Debug("Tagged GossipSub mesh peers", - "count", len(peers), - "priority", p2p.PeerPriorityGossipSubMesh, - "topic", m.topicName) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package pubsub + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/agntcy/dir/server/routing/internal/p2p" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/utils/logging" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/host" +) + +var logger = logging.Logger("routing/pubsub") + +// Manager handles GossipSub operations for label announcements. +// It provides efficient label propagation across the network without +// requiring peers to pull entire records. +// +// Architecture: +// - Publisher: Announces labels when storing records +// - Subscriber: Receives and caches labels from remote peers +// - Integration: Works alongside DHT for resilient discovery +// +// Performance: +// - Propagation: ~5-20ms (vs DHT's ~100-500ms) +// - Bandwidth: ~100B per announcement (vs KB-MB for full record pull) +// - Reach: ALL subscribed peers (vs DHT's k-closest peers) +type Manager struct { + ctx context.Context //nolint:containedctx // Needed for long-running message handler goroutine + host host.Host + pubsub *pubsub.PubSub + topic *pubsub.Topic + sub *pubsub.Subscription + localPeerID string + topicName string // Topic name (protocol constant) + + // Callback invoked when record publish event is received. + // Parameters: + // - context.Context: Operation context + // - string: Authenticated peer ID (from msg.ReceivedFrom, cryptographically verified) + // - *RecordPublishEvent: The announcement payload + onRecordPublishEvent func(context.Context, string, *RecordPublishEvent) +} + +// New creates a new GossipSub manager for label announcements. +// This initializes the GossipSub router, joins the labels topic, and +// starts the message handler goroutine. +// +// Protocol parameters (TopicLabels, MaxMessageSize) are defined in constants.go +// and are intentionally NOT configurable to ensure network-wide compatibility. +// +// Parameters: +// - ctx: Context for lifecycle management +// - h: libp2p host for network operations +// +// Returns: +// - *Manager: Initialized manager ready for use +// - error: If GossipSub setup fails +func New(ctx context.Context, h host.Host) (*Manager, error) { + // Create GossipSub with protocol-defined settings + ps, err := pubsub.NewGossipSub( + ctx, + h, + // Enable peer exchange for better peer discovery + pubsub.WithPeerExchange(true), + // Limit message size to protocol-defined maximum + pubsub.WithMaxMessageSize(MaxMessageSize), + ) + if err != nil { + return nil, fmt.Errorf("failed to create gossipsub: %w", err) + } + + // Join the protocol-defined topic + topic, err := ps.Join(TopicLabels) + if err != nil { + return nil, fmt.Errorf("failed to join labels topic %q: %w", TopicLabels, err) + } + + // Subscribe to receive label announcements + sub, err := topic.Subscribe() + if err != nil { + return nil, fmt.Errorf("failed to subscribe to labels topic %q: %w", TopicLabels, err) + } + + manager := &Manager{ + ctx: ctx, + host: h, + pubsub: ps, + topic: topic, + sub: sub, + localPeerID: h.ID().String(), + topicName: TopicLabels, + } + + // Start message handler goroutine + go manager.handleMessages() + + logger.Info("GossipSub manager initialized", + "topic", TopicLabels, + "maxMessageSize", MaxMessageSize, + "peerID", manager.localPeerID) + + return manager, nil +} + +// PublishRecord announces a record's labels to the network. +// This is called when a record is stored locally and should be +// discoverable by remote peers. +// +// Flow: +// 1. Extract CID and labels from record +// 2. Convert types.Label to wire format ([]string) +// 3. Create and validate RecordPublishEvent +// 4. Publish to GossipSub topic +// 5. GossipSub mesh propagates to all subscribed peers +// +// Parameters: +// - ctx: Context for operation timeout/cancellation +// - record: The record interface (caller must wrap concrete types with adapter) +// +// Returns: +// - error: If validation or publishing fails +// +// Note: This is non-blocking. GossipSub handles propagation asynchronously. +func (m *Manager) PublishRecord(ctx context.Context, record types.Record) error { + if record == nil { + return errors.New("record is nil") + } + + // Extract CID from record + cid := record.GetCid() + if cid == "" { + return errors.New("record has no CID") + } + + // Extract labels from record (uses shared label extraction logic) + labelList := types.GetLabelsFromRecord(record) + if len(labelList) == 0 { + // No labels to publish (not an error, just nothing to do) + logger.Debug("Record has no labels, skipping GossipSub announcement", "cid", cid) + + return nil + } + + // Convert types.Label to strings for wire format + labelStrings := make([]string, len(labelList)) + for i, label := range labelList { + labelStrings[i] = label.String() + } + + // Create announcement with current timestamp + // Note: PeerID is not included in the wire format - recipients use + // the authenticated msg.ReceivedFrom from libp2p transport layer + announcement := &RecordPublishEvent{ + CID: cid, + Labels: labelStrings, + Timestamp: time.Now(), + } + + // Validate before publishing to catch issues early + if err := announcement.Validate(); err != nil { + return fmt.Errorf("invalid announcement: %w", err) + } + + // Serialize to JSON + data, err := announcement.Marshal() + if err != nil { + return fmt.Errorf("failed to marshal announcement: %w", err) + } + + // Publish to GossipSub topic + if err := m.topic.Publish(ctx, data); err != nil { + return fmt.Errorf("failed to publish announcement: %w", err) + } + + logger.Info("Published record announcement", + "cid", cid, + "labels", len(labelList), + "topicPeers", len(m.topic.ListPeers()), + "size", len(data)) + + return nil +} + +// SetOnRecordPublishEvent sets the callback for received record publication events. +// This callback is invoked for each valid announcement received from remote peers. +// +// The callback receives: +// - ctx: Operation context +// - authenticatedPeerID: The peer's ID from msg.ReceivedFrom (cryptographically verified) +// - event: The announcement payload +// +// The callback should: +// - Convert wire format ([]string) to labels.Label +// - Build enhanced keys using BuildEnhancedLabelKey() with authenticatedPeerID +// - Store labels.LabelMetadata in datastore +// +// Security Note: Always use authenticatedPeerID (not any ID from the event payload) +// as it's verified by libp2p's cryptographic transport layer. +// +// Example: +// +// manager.SetOnRecordPublishEvent(func(ctx context.Context, authenticatedPeerID string, event *RecordPublishEvent) { +// for _, labelStr := range event.Labels { +// label := labels.Label(labelStr) +// key := BuildEnhancedLabelKey(label, event.CID, authenticatedPeerID) +// // ... store in datastore ... +// } +// }) +func (m *Manager) SetOnRecordPublishEvent(fn func(context.Context, string, *RecordPublishEvent)) { + m.onRecordPublishEvent = fn +} + +// handleMessages is the main message processing loop. +// It runs in a goroutine and processes all incoming label announcements. +// +// Flow: +// 1. Wait for next message from subscription +// 2. Skip own messages (already cached locally) +// 3. Unmarshal and validate announcement +// 4. Invoke callback for processing +// +// Error handling: +// - Context cancellation: Normal shutdown, exit loop +// - Subscription cancelled: Normal shutdown, exit loop +// - Invalid messages: Log warning, continue processing +// - Unmarshal errors: Log warning, continue processing +// +// This goroutine runs for the lifetime of the Manager. +func (m *Manager) handleMessages() { + for { + msg, err := m.sub.Next(m.ctx) + if err != nil { + // Check if context was cancelled (normal shutdown) + if m.ctx.Err() != nil { + logger.Debug("Message handler stopping", "reason", "context_cancelled") + + return + } + + // Check if subscription was cancelled (happens during shutdown) + // This prevents error spam during graceful shutdown + if errors.Is(err, context.Canceled) || err.Error() == "subscription cancelled" { + logger.Debug("Message handler stopping", "reason", "subscription_cancelled") + + return + } + + // Log error but continue processing + logger.Error("Error reading from labels topic", "error", err) + + continue + } + + // Skip our own messages (we already cached labels locally) + if msg.ReceivedFrom == m.host.ID() { + continue + } + + // Parse and validate announcement + announcement, err := UnmarshalRecordPublishEvent(msg.Data) + if err != nil { + logger.Warn("Received invalid label announcement", + "from", msg.ReceivedFrom, + "error", err, + "size", len(msg.Data)) + + continue + } + + // Extract authenticated peer ID from libp2p transport layer + // This is cryptographically verified and cannot be spoofed + authenticatedPeerID := msg.ReceivedFrom.String() + + logger.Debug("Received label announcement", + "from", authenticatedPeerID, + "cid", announcement.CID, + "labels", len(announcement.Labels)) + + // Invoke callback with authenticated peer ID + if m.onRecordPublishEvent != nil { + // Pass authenticated peer ID as separate parameter for security + m.onRecordPublishEvent(m.ctx, authenticatedPeerID, announcement) + } + } +} + +// GetTopicPeers returns the list of peers subscribed to the labels topic. +// This is useful for monitoring network connectivity and debugging. +// +// Returns: +// - []string: List of peer IDs (as strings) +func (m *Manager) GetTopicPeers() []string { + peers := m.topic.ListPeers() + peerIDs := make([]string, len(peers)) + + for i, p := range peers { + peerIDs[i] = p.String() + } + + return peerIDs +} + +// GetMeshPeerCount returns the number of peers in the GossipSub mesh. +// This is used for readiness checks to ensure the mesh is formed. +func (m *Manager) GetMeshPeerCount() int { + return len(m.topic.ListPeers()) +} + +// Close stops the GossipSub manager and releases resources. +// This should be called during shutdown to clean up gracefully. +// +// Flow: +// 1. Cancel subscription (stops handleMessages goroutine) +// 2. Leave topic +// 3. Release resources +// +// Returns: +// - error: If cleanup fails (rare) +func (m *Manager) Close() error { + m.sub.Cancel() + + if err := m.topic.Close(); err != nil { + return fmt.Errorf("failed to close gossipsub topic: %w", err) + } + + return nil +} + +// TagMeshPeers tags all current GossipSub mesh peers with high priority +// to prevent them from being pruned by the Connection Manager. +// +// Mesh peers are critical for fast label propagation (5-20ms delivery). +// If mesh peers are pruned, the mesh must rebuild, causing temporary +// degradation in GossipSub performance. +// +// This method should be called: +// - After GossipSub initialization +// - Periodically (e.g., every 30 seconds) as mesh changes +// - Or in response to mesh events (advanced) +// +// Priority: 50 points (high, but below bootstrap's 100) +// +// Safety: +// - Safe to call even if Connection Manager is nil (no-op) +// - Safe to call when mesh is empty (no-op) +// - Safe to call multiple times (re-tagging is harmless) +func (m *Manager) TagMeshPeers() { + if m == nil || m.host.ConnManager() == nil { + return // No-op if manager or connection manager not available + } + + peers := m.topic.ListPeers() + + if len(peers) == 0 { + logger.Debug("No mesh peers to tag") + + return + } + + for _, p := range peers { + m.host.ConnManager().TagPeer(p, "gossipsub-mesh", p2p.PeerPriorityGossipSubMesh) + } + + logger.Debug("Tagged GossipSub mesh peers", + "count", len(peers), + "priority", p2p.PeerPriorityGossipSubMesh, + "topic", m.topicName) +} diff --git a/server/routing/query_matching.go b/server/routing/query_matching.go index 17757f449..b7fc60073 100644 --- a/server/routing/query_matching.go +++ b/server/routing/query_matching.go @@ -1,185 +1,185 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package routing - -import ( - "context" - "strings" - - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/utils/logging" -) - -var queryLogger = logging.Logger("routing/query") - -// LabelRetriever function type for injecting different label retrieval strategies. -// This allows us to use the same query matching logic for both local and remote scenarios -// while keeping the label retrieval implementation separate. -type LabelRetriever func(ctx context.Context, cid string) []types.Label - -// MatchesAllQueries checks if a record matches ALL provided queries using injected label retrieval. -// This implements AND logic - all queries must match for the record to be considered a match. -// -// Parameters: -// - ctx: Context for the operation -// - cid: The CID of the record to check -// - queries: List of queries that must ALL match (AND relationship) -// - labelRetriever: Function to retrieve labels for the given CID -// -// Returns true if all queries match, false otherwise. -func MatchesAllQueries( - ctx context.Context, - cid string, - queries []*routingv1.RecordQuery, - labelRetriever LabelRetriever, -) bool { - if len(queries) == 0 { - return true // No filters = match everything - } - - // Use the injected label retrieval strategy - labels := labelRetriever(ctx, cid) - - // ALL queries must match (AND relationship) - for _, query := range queries { - if !QueryMatchesLabels(query, labels) { - return false - } - } - - return true -} - -// QueryMatchesLabels checks if a single query matches against a list of labels. -// This function contains the unified logic for all query types, resolving the -// differences between local and remote implementations. -// -//nolint:gocognit,cyclop // Complex but necessary logic for handling all query types with exact and prefix matching -func QueryMatchesLabels(query *routingv1.RecordQuery, labelList []types.Label) bool { - if query == nil { - return false - } - - switch query.GetType() { - case routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL: - // Check if any skill label matches the query - targetSkill := types.LabelTypeSkill.Prefix() + query.GetValue() - - for _, label := range labelList { - // Type-safe filtering: only check skill labels - if label.Type() != types.LabelTypeSkill { - continue - } - - labelStr := label.String() - // Exact match: /skills/category1/class1 matches "category1/class1" - if labelStr == targetSkill { - return true - } - // Prefix match: /skills/category2/class2 matches "category2" - if strings.HasPrefix(labelStr, targetSkill+"/") { - return true - } - } - - return false - - case routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR: - // Unified locator handling - use proper namespace prefix (fixing remote implementation) - targetLocator := types.LabelTypeLocator.Prefix() + query.GetValue() - - for _, label := range labelList { - // Type-safe filtering: only check locator labels - if label.Type() != types.LabelTypeLocator { - continue - } - - // Exact match: /locators/docker-image matches "docker-image" - if label.String() == targetLocator { - return true - } - } - - return false - - case routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN: - // Check if any domain label matches the query - targetDomain := types.LabelTypeDomain.Prefix() + query.GetValue() - - for _, label := range labelList { - // Type-safe filtering: only check domain labels - if label.Type() != types.LabelTypeDomain { - continue - } - - labelStr := label.String() - // Exact match: /domains/research matches "research" - if labelStr == targetDomain { - return true - } - // Prefix match: /domains/research/subfield matches "research" - if strings.HasPrefix(labelStr, targetDomain+"/") { - return true - } - } - - return false - - case routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE: - // Check if any module label matches the query - targetModule := types.LabelTypeModule.Prefix() + query.GetValue() - - for _, label := range labelList { - // Type-safe filtering: only check module labels - if label.Type() != types.LabelTypeModule { - continue - } - - labelStr := label.String() - // Exact match: /modules/runtime/model matches "runtime/model" - if labelStr == targetModule { - return true - } - // Prefix match: /modules/runtime/model/security matches "runtime/model" - if strings.HasPrefix(labelStr, targetModule+"/") { - return true - } - } - - return false - - case routingv1.RecordQueryType_RECORD_QUERY_TYPE_UNSPECIFIED: - // Unspecified queries match everything - return true - - default: - queryLogger.Warn("Unknown query type", "type", query.GetType()) - - return false - } -} - -// GetMatchingQueries returns the queries that match against a specific label key. -// This is used primarily for calculating match scores in Search operations. -func GetMatchingQueries(labelKey string, queries []*routingv1.RecordQuery) []*routingv1.RecordQuery { - var matchingQueries []*routingv1.RecordQuery - - // Extract label from the enhanced key - label, _, _, err := ParseEnhancedLabelKey(labelKey) - if err != nil { - queryLogger.Warn("Failed to parse enhanced label key for query matching", "key", labelKey, "error", err) - - return matchingQueries - } - - // Check which queries this label satisfies - for _, query := range queries { - if QueryMatchesLabels(query, []types.Label{label}) { - matchingQueries = append(matchingQueries, query) - } - } - - return matchingQueries -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package routing + +import ( + "context" + "strings" + + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/utils/logging" +) + +var queryLogger = logging.Logger("routing/query") + +// LabelRetriever function type for injecting different label retrieval strategies. +// This allows us to use the same query matching logic for both local and remote scenarios +// while keeping the label retrieval implementation separate. +type LabelRetriever func(ctx context.Context, cid string) []types.Label + +// MatchesAllQueries checks if a record matches ALL provided queries using injected label retrieval. +// This implements AND logic - all queries must match for the record to be considered a match. +// +// Parameters: +// - ctx: Context for the operation +// - cid: The CID of the record to check +// - queries: List of queries that must ALL match (AND relationship) +// - labelRetriever: Function to retrieve labels for the given CID +// +// Returns true if all queries match, false otherwise. +func MatchesAllQueries( + ctx context.Context, + cid string, + queries []*routingv1.RecordQuery, + labelRetriever LabelRetriever, +) bool { + if len(queries) == 0 { + return true // No filters = match everything + } + + // Use the injected label retrieval strategy + labels := labelRetriever(ctx, cid) + + // ALL queries must match (AND relationship) + for _, query := range queries { + if !QueryMatchesLabels(query, labels) { + return false + } + } + + return true +} + +// QueryMatchesLabels checks if a single query matches against a list of labels. +// This function contains the unified logic for all query types, resolving the +// differences between local and remote implementations. +// +//nolint:gocognit,cyclop // Complex but necessary logic for handling all query types with exact and prefix matching +func QueryMatchesLabels(query *routingv1.RecordQuery, labelList []types.Label) bool { + if query == nil { + return false + } + + switch query.GetType() { + case routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL: + // Check if any skill label matches the query + targetSkill := types.LabelTypeSkill.Prefix() + query.GetValue() + + for _, label := range labelList { + // Type-safe filtering: only check skill labels + if label.Type() != types.LabelTypeSkill { + continue + } + + labelStr := label.String() + // Exact match: /skills/category1/class1 matches "category1/class1" + if labelStr == targetSkill { + return true + } + // Prefix match: /skills/category2/class2 matches "category2" + if strings.HasPrefix(labelStr, targetSkill+"/") { + return true + } + } + + return false + + case routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR: + // Unified locator handling - use proper namespace prefix (fixing remote implementation) + targetLocator := types.LabelTypeLocator.Prefix() + query.GetValue() + + for _, label := range labelList { + // Type-safe filtering: only check locator labels + if label.Type() != types.LabelTypeLocator { + continue + } + + // Exact match: /locators/docker-image matches "docker-image" + if label.String() == targetLocator { + return true + } + } + + return false + + case routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN: + // Check if any domain label matches the query + targetDomain := types.LabelTypeDomain.Prefix() + query.GetValue() + + for _, label := range labelList { + // Type-safe filtering: only check domain labels + if label.Type() != types.LabelTypeDomain { + continue + } + + labelStr := label.String() + // Exact match: /domains/research matches "research" + if labelStr == targetDomain { + return true + } + // Prefix match: /domains/research/subfield matches "research" + if strings.HasPrefix(labelStr, targetDomain+"/") { + return true + } + } + + return false + + case routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE: + // Check if any module label matches the query + targetModule := types.LabelTypeModule.Prefix() + query.GetValue() + + for _, label := range labelList { + // Type-safe filtering: only check module labels + if label.Type() != types.LabelTypeModule { + continue + } + + labelStr := label.String() + // Exact match: /modules/runtime/model matches "runtime/model" + if labelStr == targetModule { + return true + } + // Prefix match: /modules/runtime/model/security matches "runtime/model" + if strings.HasPrefix(labelStr, targetModule+"/") { + return true + } + } + + return false + + case routingv1.RecordQueryType_RECORD_QUERY_TYPE_UNSPECIFIED: + // Unspecified queries match everything + return true + + default: + queryLogger.Warn("Unknown query type", "type", query.GetType()) + + return false + } +} + +// GetMatchingQueries returns the queries that match against a specific label key. +// This is used primarily for calculating match scores in Search operations. +func GetMatchingQueries(labelKey string, queries []*routingv1.RecordQuery) []*routingv1.RecordQuery { + var matchingQueries []*routingv1.RecordQuery + + // Extract label from the enhanced key + label, _, _, err := ParseEnhancedLabelKey(labelKey) + if err != nil { + queryLogger.Warn("Failed to parse enhanced label key for query matching", "key", labelKey, "error", err) + + return matchingQueries + } + + // Check which queries this label satisfies + for _, query := range queries { + if QueryMatchesLabels(query, []types.Label{label}) { + matchingQueries = append(matchingQueries, query) + } + } + + return matchingQueries +} diff --git a/server/routing/query_matching_test.go b/server/routing/query_matching_test.go index 7edaf436d..38066c087 100644 --- a/server/routing/query_matching_test.go +++ b/server/routing/query_matching_test.go @@ -1,536 +1,536 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package routing - -import ( - "context" - "testing" - - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/server/types" - "github.com/stretchr/testify/assert" -) - -func TestQueryMatchesLabels(t *testing.T) { - testCases := []struct { - name string - query *routingv1.RecordQuery - labels []types.Label - expected bool - }{ - // Skill queries - { - name: "skill_exact_match", - query: &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI", - }, - labels: []types.Label{types.Label("/skills/AI"), types.Label("/skills/web-development")}, - expected: true, - }, - { - name: "skill_prefix_match", - query: &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI", - }, - labels: []types.Label{types.Label("/skills/AI/ML"), types.Label("/skills/web-development")}, - expected: true, - }, - { - name: "skill_no_match", - query: &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "blockchain", - }, - labels: []types.Label{types.Label("/skills/AI"), types.Label("/skills/web-development")}, - expected: false, - }, - { - name: "skill_partial_no_match", - query: &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI/ML/deep-learning", - }, - labels: []types.Label{types.Label("/skills/AI/ML"), types.Label("/skills/web-development")}, - expected: false, - }, - - // Locator queries - { - name: "locator_exact_match", - query: &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, - Value: "docker-image", - }, - labels: []types.Label{types.Label("/locators/docker-image"), types.Label("/skills/AI")}, - expected: true, - }, - { - name: "locator_no_match", - query: &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, - Value: "git-repo", - }, - labels: []types.Label{types.Label("/locators/docker-image"), types.Label("/skills/AI")}, - expected: false, - }, - - // Domain queries - { - name: "domain_exact_match", - query: &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, - Value: "healthcare", - }, - labels: []types.Label{types.Label("/domains/healthcare"), types.Label("/skills/AI")}, - expected: true, - }, - { - name: "domain_prefix_match", - query: &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, - Value: "healthcare", - }, - labels: []types.Label{types.Label("/domains/healthcare/diagnostics"), types.Label("/skills/AI")}, - expected: true, - }, - { - name: "domain_no_match", - query: &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, - Value: "finance", - }, - labels: []types.Label{types.Label("/domains/healthcare"), types.Label("/skills/AI")}, - expected: false, - }, - { - name: "domain_partial_no_match", - query: &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, - Value: "healthcare/diagnostics/radiology", - }, - labels: []types.Label{types.Label("/domains/healthcare/diagnostics"), types.Label("/skills/AI")}, - expected: false, - }, - - // Module queries - { - name: "module_exact_match", - query: &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, - Value: "runtime/model", - }, - labels: []types.Label{types.Label("/modules/runtime/model"), types.Label("/skills/AI")}, - expected: true, - }, - { - name: "module_prefix_match", - query: &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, - Value: "runtime", - }, - labels: []types.Label{types.Label("/modules/runtime/model"), types.Label("/skills/AI")}, - expected: true, - }, - { - name: "module_no_match", - query: &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, - Value: "security", - }, - labels: []types.Label{types.Label("/modules/runtime/model"), types.Label("/skills/AI")}, - expected: false, - }, - { - name: "module_partial_no_match", - query: &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, - Value: "runtime/model/python/3.9", - }, - labels: []types.Label{types.Label("/modules/runtime/model"), types.Label("/skills/AI")}, - expected: false, - }, - - // Unspecified queries - { - name: "unspecified_always_matches", - query: &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_UNSPECIFIED, - Value: "anything", - }, - labels: []types.Label{types.Label("/skills/AI")}, - expected: true, - }, - { - name: "unspecified_matches_empty_labels", - query: &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_UNSPECIFIED, - Value: "anything", - }, - labels: []types.Label{}, - expected: true, - }, - - // Edge cases - { - name: "empty_labels", - query: &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI", - }, - labels: []types.Label{}, - expected: false, - }, - { - name: "case_sensitive_skill", - query: &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "ai", // lowercase - }, - labels: []types.Label{types.Label("/skills/AI")}, // uppercase - expected: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := QueryMatchesLabels(tc.query, tc.labels) - assert.Equal(t, tc.expected, result) - }) - } -} - -func TestMatchesAllQueries(t *testing.T) { - ctx := t.Context() - testCID := "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi" - - // Mock label retriever that returns predefined labels - mockLabelRetriever := func(_ context.Context, cid string) []types.Label { - if cid == testCID { - return []types.Label{ - types.Label("/skills/AI"), - types.Label("/skills/AI/ML"), - types.Label("/domains/technology"), - types.Label("/modules/runtime/model"), - types.Label("/locators/docker-image"), - } - } - - return []types.Label{} - } - - testCases := []struct { - name string - cid string - queries []*routingv1.RecordQuery - expected bool - }{ - { - name: "no_queries_matches_all", - cid: testCID, - queries: []*routingv1.RecordQuery{}, - expected: true, - }, - { - name: "single_matching_query", - cid: testCID, - queries: []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI", - }, - }, - expected: true, - }, - { - name: "single_non_matching_query", - cid: testCID, - queries: []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "blockchain", - }, - }, - expected: false, - }, - { - name: "multiple_matching_queries_and_logic", - cid: testCID, - queries: []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI", - }, - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, - Value: "docker-image", - }, - }, - expected: true, - }, - { - name: "mixed_matching_and_non_matching_queries", - cid: testCID, - queries: []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI", // matches - }, - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "blockchain", // doesn't match - }, - }, - expected: false, // AND logic - all must match - }, - { - name: "domain_query_matches", - cid: testCID, - queries: []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, - Value: "technology", - }, - }, - expected: true, - }, - { - name: "module_query_matches", - cid: testCID, - queries: []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, - Value: "runtime/model", - }, - }, - expected: true, - }, - { - name: "all_query_types_match", - cid: testCID, - queries: []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI", - }, - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, - Value: "technology", - }, - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, - Value: "runtime/model", - }, - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, - Value: "docker-image", - }, - }, - expected: true, // All should match - }, - { - name: "unknown_cid", - cid: "unknown-cid", - queries: []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI", - }, - }, - expected: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := MatchesAllQueries(ctx, tc.cid, tc.queries, mockLabelRetriever) - assert.Equal(t, tc.expected, result) - }) - } -} - -func TestGetMatchingQueries(t *testing.T) { - testQueries := []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI", - }, - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "web-development", - }, - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, - Value: "healthcare", - }, - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, - Value: "runtime/model", - }, - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, - Value: "docker-image", - }, - } - - testCases := []struct { - name string - labelKey string - expectedMatches int - expectedQueryType routingv1.RecordQueryType - }{ - { - name: "skill_ai_matches", - labelKey: "/skills/AI/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi/peer1", - expectedMatches: 1, - expectedQueryType: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - }, - { - name: "skill_web_dev_matches", - labelKey: "/skills/web-development/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi/peer1", - expectedMatches: 1, - expectedQueryType: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - }, - { - name: "locator_matches", - labelKey: "/locators/docker-image/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi/peer1", - expectedMatches: 1, - expectedQueryType: routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, - }, - { - name: "domain_matches", - labelKey: "/domains/healthcare/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi/peer1", - expectedMatches: 1, - expectedQueryType: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, - }, - { - name: "module_matches", - labelKey: "/modules/runtime/model/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi/peer1", - expectedMatches: 1, - expectedQueryType: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, - }, - { - name: "no_matches", - labelKey: "/skills/blockchain/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi/peer1", - expectedMatches: 0, - }, - { - name: "malformed_key", - labelKey: "/invalid-key", - expectedMatches: 0, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - matches := GetMatchingQueries(tc.labelKey, testQueries) - assert.Len(t, matches, tc.expectedMatches) - - if tc.expectedMatches > 0 { - assert.Equal(t, tc.expectedQueryType, matches[0].GetType()) - } - }) - } -} - -func TestQueryMatchingEdgeCases(t *testing.T) { - t.Run("nil_query", func(t *testing.T) { - // This should not panic - result := QueryMatchesLabels(nil, []types.Label{types.Label("/skills/AI")}) - assert.False(t, result) - }) - - t.Run("unknown_query_type", func(t *testing.T) { - query := &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType(999), // Unknown type - Value: "test", - } - result := QueryMatchesLabels(query, []types.Label{types.Label("/skills/AI")}) - assert.False(t, result) - }) - - t.Run("empty_query_value", func(t *testing.T) { - query := &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "", - } - result := QueryMatchesLabels(query, []types.Label{types.Label("/skills/")}) - assert.True(t, result) // Empty value matches "/skills/" prefix - }) - - t.Run("nil_labels", func(t *testing.T) { - query := &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI", - } - result := QueryMatchesLabels(query, nil) - assert.False(t, result) - }) -} - -// Test the integration between MatchesAllQueries and QueryMatchesLabels. -func TestQueryMatchingIntegration(t *testing.T) { - ctx := t.Context() - - // Test with a more complex label retriever - complexLabelRetriever := func(_ context.Context, cid string) []types.Label { - switch cid { - case "ai-record": - return []types.Label{ - types.Label("/skills/AI"), - types.Label("/skills/AI/ML"), - types.Label("/skills/AI/NLP"), - } - case "web-record": - return []types.Label{ - types.Label("/skills/web-development"), - types.Label("/skills/javascript"), - types.Label("/locators/git-repo"), - } - case "mixed-record": - return []types.Label{ - types.Label("/skills/AI"), - types.Label("/skills/web-development"), - types.Label("/domains/healthcare"), - types.Label("/modules/runtime/model"), - types.Label("/locators/docker-image"), - } - default: - return []types.Label{} - } - } - - t.Run("complex_and_logic_test", func(t *testing.T) { - queries := []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI", - }, - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "web-development", - }, - } - - // Only mixed-record should match both queries - assert.True(t, MatchesAllQueries(ctx, "mixed-record", queries, complexLabelRetriever)) - assert.False(t, MatchesAllQueries(ctx, "ai-record", queries, complexLabelRetriever)) - assert.False(t, MatchesAllQueries(ctx, "web-record", queries, complexLabelRetriever)) - }) - - t.Run("hierarchical_skill_matching", func(t *testing.T) { - queries := []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI/ML", - }, - } - - // Should match records with AI/ML or more specific skills - assert.True(t, MatchesAllQueries(ctx, "ai-record", queries, complexLabelRetriever)) - assert.False(t, MatchesAllQueries(ctx, "web-record", queries, complexLabelRetriever)) - assert.False(t, MatchesAllQueries(ctx, "mixed-record", queries, complexLabelRetriever)) // Only has /skills/AI, not AI/ML - }) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package routing + +import ( + "context" + "testing" + + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/server/types" + "github.com/stretchr/testify/assert" +) + +func TestQueryMatchesLabels(t *testing.T) { + testCases := []struct { + name string + query *routingv1.RecordQuery + labels []types.Label + expected bool + }{ + // Skill queries + { + name: "skill_exact_match", + query: &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI", + }, + labels: []types.Label{types.Label("/skills/AI"), types.Label("/skills/web-development")}, + expected: true, + }, + { + name: "skill_prefix_match", + query: &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI", + }, + labels: []types.Label{types.Label("/skills/AI/ML"), types.Label("/skills/web-development")}, + expected: true, + }, + { + name: "skill_no_match", + query: &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "blockchain", + }, + labels: []types.Label{types.Label("/skills/AI"), types.Label("/skills/web-development")}, + expected: false, + }, + { + name: "skill_partial_no_match", + query: &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI/ML/deep-learning", + }, + labels: []types.Label{types.Label("/skills/AI/ML"), types.Label("/skills/web-development")}, + expected: false, + }, + + // Locator queries + { + name: "locator_exact_match", + query: &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, + Value: "docker-image", + }, + labels: []types.Label{types.Label("/locators/docker-image"), types.Label("/skills/AI")}, + expected: true, + }, + { + name: "locator_no_match", + query: &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, + Value: "git-repo", + }, + labels: []types.Label{types.Label("/locators/docker-image"), types.Label("/skills/AI")}, + expected: false, + }, + + // Domain queries + { + name: "domain_exact_match", + query: &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, + Value: "healthcare", + }, + labels: []types.Label{types.Label("/domains/healthcare"), types.Label("/skills/AI")}, + expected: true, + }, + { + name: "domain_prefix_match", + query: &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, + Value: "healthcare", + }, + labels: []types.Label{types.Label("/domains/healthcare/diagnostics"), types.Label("/skills/AI")}, + expected: true, + }, + { + name: "domain_no_match", + query: &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, + Value: "finance", + }, + labels: []types.Label{types.Label("/domains/healthcare"), types.Label("/skills/AI")}, + expected: false, + }, + { + name: "domain_partial_no_match", + query: &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, + Value: "healthcare/diagnostics/radiology", + }, + labels: []types.Label{types.Label("/domains/healthcare/diagnostics"), types.Label("/skills/AI")}, + expected: false, + }, + + // Module queries + { + name: "module_exact_match", + query: &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, + Value: "runtime/model", + }, + labels: []types.Label{types.Label("/modules/runtime/model"), types.Label("/skills/AI")}, + expected: true, + }, + { + name: "module_prefix_match", + query: &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, + Value: "runtime", + }, + labels: []types.Label{types.Label("/modules/runtime/model"), types.Label("/skills/AI")}, + expected: true, + }, + { + name: "module_no_match", + query: &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, + Value: "security", + }, + labels: []types.Label{types.Label("/modules/runtime/model"), types.Label("/skills/AI")}, + expected: false, + }, + { + name: "module_partial_no_match", + query: &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, + Value: "runtime/model/python/3.9", + }, + labels: []types.Label{types.Label("/modules/runtime/model"), types.Label("/skills/AI")}, + expected: false, + }, + + // Unspecified queries + { + name: "unspecified_always_matches", + query: &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_UNSPECIFIED, + Value: "anything", + }, + labels: []types.Label{types.Label("/skills/AI")}, + expected: true, + }, + { + name: "unspecified_matches_empty_labels", + query: &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_UNSPECIFIED, + Value: "anything", + }, + labels: []types.Label{}, + expected: true, + }, + + // Edge cases + { + name: "empty_labels", + query: &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI", + }, + labels: []types.Label{}, + expected: false, + }, + { + name: "case_sensitive_skill", + query: &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "ai", // lowercase + }, + labels: []types.Label{types.Label("/skills/AI")}, // uppercase + expected: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := QueryMatchesLabels(tc.query, tc.labels) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestMatchesAllQueries(t *testing.T) { + ctx := t.Context() + testCID := "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi" + + // Mock label retriever that returns predefined labels + mockLabelRetriever := func(_ context.Context, cid string) []types.Label { + if cid == testCID { + return []types.Label{ + types.Label("/skills/AI"), + types.Label("/skills/AI/ML"), + types.Label("/domains/technology"), + types.Label("/modules/runtime/model"), + types.Label("/locators/docker-image"), + } + } + + return []types.Label{} + } + + testCases := []struct { + name string + cid string + queries []*routingv1.RecordQuery + expected bool + }{ + { + name: "no_queries_matches_all", + cid: testCID, + queries: []*routingv1.RecordQuery{}, + expected: true, + }, + { + name: "single_matching_query", + cid: testCID, + queries: []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI", + }, + }, + expected: true, + }, + { + name: "single_non_matching_query", + cid: testCID, + queries: []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "blockchain", + }, + }, + expected: false, + }, + { + name: "multiple_matching_queries_and_logic", + cid: testCID, + queries: []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI", + }, + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, + Value: "docker-image", + }, + }, + expected: true, + }, + { + name: "mixed_matching_and_non_matching_queries", + cid: testCID, + queries: []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI", // matches + }, + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "blockchain", // doesn't match + }, + }, + expected: false, // AND logic - all must match + }, + { + name: "domain_query_matches", + cid: testCID, + queries: []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, + Value: "technology", + }, + }, + expected: true, + }, + { + name: "module_query_matches", + cid: testCID, + queries: []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, + Value: "runtime/model", + }, + }, + expected: true, + }, + { + name: "all_query_types_match", + cid: testCID, + queries: []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI", + }, + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, + Value: "technology", + }, + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, + Value: "runtime/model", + }, + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, + Value: "docker-image", + }, + }, + expected: true, // All should match + }, + { + name: "unknown_cid", + cid: "unknown-cid", + queries: []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI", + }, + }, + expected: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := MatchesAllQueries(ctx, tc.cid, tc.queries, mockLabelRetriever) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestGetMatchingQueries(t *testing.T) { + testQueries := []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI", + }, + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "web-development", + }, + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, + Value: "healthcare", + }, + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, + Value: "runtime/model", + }, + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, + Value: "docker-image", + }, + } + + testCases := []struct { + name string + labelKey string + expectedMatches int + expectedQueryType routingv1.RecordQueryType + }{ + { + name: "skill_ai_matches", + labelKey: "/skills/AI/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi/peer1", + expectedMatches: 1, + expectedQueryType: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + }, + { + name: "skill_web_dev_matches", + labelKey: "/skills/web-development/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi/peer1", + expectedMatches: 1, + expectedQueryType: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + }, + { + name: "locator_matches", + labelKey: "/locators/docker-image/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi/peer1", + expectedMatches: 1, + expectedQueryType: routingv1.RecordQueryType_RECORD_QUERY_TYPE_LOCATOR, + }, + { + name: "domain_matches", + labelKey: "/domains/healthcare/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi/peer1", + expectedMatches: 1, + expectedQueryType: routingv1.RecordQueryType_RECORD_QUERY_TYPE_DOMAIN, + }, + { + name: "module_matches", + labelKey: "/modules/runtime/model/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi/peer1", + expectedMatches: 1, + expectedQueryType: routingv1.RecordQueryType_RECORD_QUERY_TYPE_MODULE, + }, + { + name: "no_matches", + labelKey: "/skills/blockchain/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi/peer1", + expectedMatches: 0, + }, + { + name: "malformed_key", + labelKey: "/invalid-key", + expectedMatches: 0, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + matches := GetMatchingQueries(tc.labelKey, testQueries) + assert.Len(t, matches, tc.expectedMatches) + + if tc.expectedMatches > 0 { + assert.Equal(t, tc.expectedQueryType, matches[0].GetType()) + } + }) + } +} + +func TestQueryMatchingEdgeCases(t *testing.T) { + t.Run("nil_query", func(t *testing.T) { + // This should not panic + result := QueryMatchesLabels(nil, []types.Label{types.Label("/skills/AI")}) + assert.False(t, result) + }) + + t.Run("unknown_query_type", func(t *testing.T) { + query := &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType(999), // Unknown type + Value: "test", + } + result := QueryMatchesLabels(query, []types.Label{types.Label("/skills/AI")}) + assert.False(t, result) + }) + + t.Run("empty_query_value", func(t *testing.T) { + query := &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "", + } + result := QueryMatchesLabels(query, []types.Label{types.Label("/skills/")}) + assert.True(t, result) // Empty value matches "/skills/" prefix + }) + + t.Run("nil_labels", func(t *testing.T) { + query := &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI", + } + result := QueryMatchesLabels(query, nil) + assert.False(t, result) + }) +} + +// Test the integration between MatchesAllQueries and QueryMatchesLabels. +func TestQueryMatchingIntegration(t *testing.T) { + ctx := t.Context() + + // Test with a more complex label retriever + complexLabelRetriever := func(_ context.Context, cid string) []types.Label { + switch cid { + case "ai-record": + return []types.Label{ + types.Label("/skills/AI"), + types.Label("/skills/AI/ML"), + types.Label("/skills/AI/NLP"), + } + case "web-record": + return []types.Label{ + types.Label("/skills/web-development"), + types.Label("/skills/javascript"), + types.Label("/locators/git-repo"), + } + case "mixed-record": + return []types.Label{ + types.Label("/skills/AI"), + types.Label("/skills/web-development"), + types.Label("/domains/healthcare"), + types.Label("/modules/runtime/model"), + types.Label("/locators/docker-image"), + } + default: + return []types.Label{} + } + } + + t.Run("complex_and_logic_test", func(t *testing.T) { + queries := []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI", + }, + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "web-development", + }, + } + + // Only mixed-record should match both queries + assert.True(t, MatchesAllQueries(ctx, "mixed-record", queries, complexLabelRetriever)) + assert.False(t, MatchesAllQueries(ctx, "ai-record", queries, complexLabelRetriever)) + assert.False(t, MatchesAllQueries(ctx, "web-record", queries, complexLabelRetriever)) + }) + + t.Run("hierarchical_skill_matching", func(t *testing.T) { + queries := []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI/ML", + }, + } + + // Should match records with AI/ML or more specific skills + assert.True(t, MatchesAllQueries(ctx, "ai-record", queries, complexLabelRetriever)) + assert.False(t, MatchesAllQueries(ctx, "web-record", queries, complexLabelRetriever)) + assert.False(t, MatchesAllQueries(ctx, "mixed-record", queries, complexLabelRetriever)) // Only has /skills/AI, not AI/ML + }) +} diff --git a/server/routing/routing.go b/server/routing/routing.go index 2ee4433cf..c583e08da 100644 --- a/server/routing/routing.go +++ b/server/routing/routing.go @@ -1,164 +1,164 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Package routing provides distributed content routing capabilities for the dir system. -// It implements both local and remote routing strategies with automatic cleanup of stale data. -// -// The routing system consists of: -// - Local routing: Fast queries against local datastore -// - Remote routing: DHT-based discovery across the network -// - Cleanup service: Automatic removal of stale labels and orphaned records -// -// Label metadata is stored in JSON format with timestamps for lifecycle management. -package routing - -import ( - "context" - "fmt" - - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/server/datastore" - "github.com/agntcy/dir/server/events" - "github.com/agntcy/dir/server/types" - "google.golang.org/grpc/status" -) - -type route struct { - local *routeLocal - remote *routeRemote - eventBus *events.SafeEventBus -} - -// hasPeersInRoutingTable checks if we have any peers in the DHT routing table. -// This determines whether we can perform network operations or should fall back to local-only mode. -func (r *route) hasPeersInRoutingTable() bool { - if r.remote == nil || r.remote.server == nil { - return false - } - - return r.remote.server.DHT().RoutingTable().Size() > 0 -} - -func New(ctx context.Context, store types.StoreAPI, opts types.APIOptions) (types.RoutingAPI, error) { - // Create main router - mainRounter := &route{ - eventBus: opts.EventBus(), - } - - // Create routing datastore - var dsOpts []datastore.Option - if dstoreDir := opts.Config().Routing.DatastoreDir; dstoreDir != "" { - dsOpts = append(dsOpts, datastore.WithFsProvider(dstoreDir)) - } - - dstore, err := datastore.New(dsOpts...) - if err != nil { - return nil, fmt.Errorf("failed to create routing datastore: %w", err) - } - - // Create remote router first to get the peer ID - mainRounter.remote, err = newRemote(ctx, store, dstore, opts) - if err != nil { - return nil, fmt.Errorf("failed to create remote routing: %w", err) - } - - // Get local peer ID from the remote server host - localPeerID := mainRounter.remote.server.Host().ID().String() - - // Create local router with peer ID - mainRounter.local = newLocal(store, dstore, localPeerID) - - return mainRounter, nil -} - -func (r *route) Publish(ctx context.Context, record types.Record) error { - // Always publish data locally for archival/querying - err := r.local.Publish(ctx, record) - if err != nil { - st := status.Convert(err) - - return status.Errorf(st.Code(), "failed to publish locally: %s", st.Message()) - } - - // Only publish to network if peers are available - if r.hasPeersInRoutingTable() { - err = r.remote.Publish(ctx, record) - if err != nil { - st := status.Convert(err) - - return status.Errorf(st.Code(), "failed to publish to the network: %s", st.Message()) - } - } - - // Emit RECORD_PUBLISHED event after successful publication - labels := types.GetLabelsFromRecord(record) - labelStrings := make([]string, len(labels)) - - for i, label := range labels { - labelStrings[i] = label.String() - } - - r.eventBus.RecordPublished(record.GetCid(), labelStrings) - - return nil -} - -func (r *route) List(ctx context.Context, req *routingv1.ListRequest) (<-chan *routingv1.ListResponse, error) { - // List is always local-only - it returns records that this peer is currently providing - // This operation does not interact with the network (per proto comment) - return r.local.List(ctx, req) -} - -func (r *route) Search(ctx context.Context, req *routingv1.SearchRequest) (<-chan *routingv1.SearchResponse, error) { - // Search is always remote-only - it returns records from other peers using cached announcements - // This operation queries locally cached remote announcements from DHT - return r.remote.Search(ctx, req) -} - -func (r *route) Unpublish(ctx context.Context, record types.Record) error { - err := r.local.Unpublish(ctx, record) - if err != nil { - st := status.Convert(err) - - return status.Errorf(st.Code(), "failed to unpublish locally: %s", st.Message()) - } - - // Emit RECORD_UNPUBLISHED event after successful unpublication - r.eventBus.RecordUnpublished(record.GetCid()) - - // no need to explicitly handle unpublishing from the network - // TODO clarify if network sync trigger is needed here - return nil -} - -// Stop stops the routing services and releases resources. -// This should be called during server shutdown to clean up gracefully. -func (r *route) Stop() error { - // Stop remote routing (includes GossipSub and p2p server) - if r.remote != nil { - if err := r.remote.Stop(); err != nil { - return fmt.Errorf("failed to stop remote routing: %w", err) - } - } - - return nil -} - -// IsReady checks if the routing subsystem is ready to serve traffic. -func (r *route) IsReady(ctx context.Context) bool { - // Check if local list request is successful - _, err := r.local.List(ctx, &routingv1.ListRequest{}) - if err != nil { - localLogger.Debug("Routing not ready: local list request failed", "error", err) - - return false - } - - if r.remote == nil { - remoteLogger.Debug("Routing not ready: remote router is nil") - - return false - } - - return r.remote.IsReady(ctx) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Package routing provides distributed content routing capabilities for the dir system. +// It implements both local and remote routing strategies with automatic cleanup of stale data. +// +// The routing system consists of: +// - Local routing: Fast queries against local datastore +// - Remote routing: DHT-based discovery across the network +// - Cleanup service: Automatic removal of stale labels and orphaned records +// +// Label metadata is stored in JSON format with timestamps for lifecycle management. +package routing + +import ( + "context" + "fmt" + + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/server/datastore" + "github.com/agntcy/dir/server/events" + "github.com/agntcy/dir/server/types" + "google.golang.org/grpc/status" +) + +type route struct { + local *routeLocal + remote *routeRemote + eventBus *events.SafeEventBus +} + +// hasPeersInRoutingTable checks if we have any peers in the DHT routing table. +// This determines whether we can perform network operations or should fall back to local-only mode. +func (r *route) hasPeersInRoutingTable() bool { + if r.remote == nil || r.remote.server == nil { + return false + } + + return r.remote.server.DHT().RoutingTable().Size() > 0 +} + +func New(ctx context.Context, store types.StoreAPI, opts types.APIOptions) (types.RoutingAPI, error) { + // Create main router + mainRounter := &route{ + eventBus: opts.EventBus(), + } + + // Create routing datastore + var dsOpts []datastore.Option + if dstoreDir := opts.Config().Routing.DatastoreDir; dstoreDir != "" { + dsOpts = append(dsOpts, datastore.WithFsProvider(dstoreDir)) + } + + dstore, err := datastore.New(dsOpts...) + if err != nil { + return nil, fmt.Errorf("failed to create routing datastore: %w", err) + } + + // Create remote router first to get the peer ID + mainRounter.remote, err = newRemote(ctx, store, dstore, opts) + if err != nil { + return nil, fmt.Errorf("failed to create remote routing: %w", err) + } + + // Get local peer ID from the remote server host + localPeerID := mainRounter.remote.server.Host().ID().String() + + // Create local router with peer ID + mainRounter.local = newLocal(store, dstore, localPeerID) + + return mainRounter, nil +} + +func (r *route) Publish(ctx context.Context, record types.Record) error { + // Always publish data locally for archival/querying + err := r.local.Publish(ctx, record) + if err != nil { + st := status.Convert(err) + + return status.Errorf(st.Code(), "failed to publish locally: %s", st.Message()) + } + + // Only publish to network if peers are available + if r.hasPeersInRoutingTable() { + err = r.remote.Publish(ctx, record) + if err != nil { + st := status.Convert(err) + + return status.Errorf(st.Code(), "failed to publish to the network: %s", st.Message()) + } + } + + // Emit RECORD_PUBLISHED event after successful publication + labels := types.GetLabelsFromRecord(record) + labelStrings := make([]string, len(labels)) + + for i, label := range labels { + labelStrings[i] = label.String() + } + + r.eventBus.RecordPublished(record.GetCid(), labelStrings) + + return nil +} + +func (r *route) List(ctx context.Context, req *routingv1.ListRequest) (<-chan *routingv1.ListResponse, error) { + // List is always local-only - it returns records that this peer is currently providing + // This operation does not interact with the network (per proto comment) + return r.local.List(ctx, req) +} + +func (r *route) Search(ctx context.Context, req *routingv1.SearchRequest) (<-chan *routingv1.SearchResponse, error) { + // Search is always remote-only - it returns records from other peers using cached announcements + // This operation queries locally cached remote announcements from DHT + return r.remote.Search(ctx, req) +} + +func (r *route) Unpublish(ctx context.Context, record types.Record) error { + err := r.local.Unpublish(ctx, record) + if err != nil { + st := status.Convert(err) + + return status.Errorf(st.Code(), "failed to unpublish locally: %s", st.Message()) + } + + // Emit RECORD_UNPUBLISHED event after successful unpublication + r.eventBus.RecordUnpublished(record.GetCid()) + + // no need to explicitly handle unpublishing from the network + // TODO clarify if network sync trigger is needed here + return nil +} + +// Stop stops the routing services and releases resources. +// This should be called during server shutdown to clean up gracefully. +func (r *route) Stop() error { + // Stop remote routing (includes GossipSub and p2p server) + if r.remote != nil { + if err := r.remote.Stop(); err != nil { + return fmt.Errorf("failed to stop remote routing: %w", err) + } + } + + return nil +} + +// IsReady checks if the routing subsystem is ready to serve traffic. +func (r *route) IsReady(ctx context.Context) bool { + // Check if local list request is successful + _, err := r.local.List(ctx, &routingv1.ListRequest{}) + if err != nil { + localLogger.Debug("Routing not ready: local list request failed", "error", err) + + return false + } + + if r.remote == nil { + remoteLogger.Debug("Routing not ready: remote router is nil") + + return false + } + + return r.remote.IsReady(ctx) +} diff --git a/server/routing/routing_events_test.go b/server/routing/routing_events_test.go index 837c53ba2..97ba691be 100644 --- a/server/routing/routing_events_test.go +++ b/server/routing/routing_events_test.go @@ -1,132 +1,132 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package routing - -import ( - "testing" - - typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" - corev1 "github.com/agntcy/dir/api/core/v1" - eventsv1 "github.com/agntcy/dir/api/events/v1" - "github.com/agntcy/dir/server/events" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/server/types/adapters" -) - -func TestRoutingPublishEmitsEvent(t *testing.T) { - // Create event bus - eventBus := events.NewEventBus() - safeEventBus := events.NewSafeEventBus(eventBus) - - // Subscribe to events - req := &eventsv1.ListenRequest{ - EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED}, - } - - subID, eventCh := eventBus.Subscribe(req) - defer eventBus.Unsubscribe(subID) - - // Create a simple route with event bus - // Note: We can't easily test the full routing service without complex setup, - // so this is a minimal test to verify the event emission code path - r := &route{ - eventBus: safeEventBus, - // local and remote would normally be initialized, but for event testing - // we're just verifying the event emission mechanism - } - - // Create a test record - record := corev1.New(&typesv1alpha0.Record{ - Name: "test-agent", - SchemaVersion: "v0.3.1", - Skills: []*typesv1alpha0.Skill{ - {CategoryName: toPtr("AI"), ClassName: toPtr("Processing")}, - }, - }) - - // Directly emit event (simulating what Publish does) - labels := types.GetLabelsFromRecord(adapters.NewRecordAdapter(record)) - labelStrings := make([]string, len(labels)) - - for i, label := range labels { - labelStrings[i] = label.String() - } - - r.eventBus.RecordPublished(record.GetCid(), labelStrings) - - // Wait for async delivery to complete - eventBus.WaitForAsyncPublish() - - // Verify event was emitted - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED { - t.Errorf("Expected RECORD_PUBLISHED event, got %v", event.Type) - } - - if event.ResourceID != record.GetCid() { - t.Errorf("Expected event resource_id %s, got %s", record.GetCid(), event.ResourceID) - } - - if len(event.Labels) == 0 { - t.Error("Expected labels to be included in event") - } - default: - t.Error("Expected to receive RECORD_PUBLISHED event") - } -} - -func TestRoutingUnpublishEmitsEvent(t *testing.T) { - // Create event bus - eventBus := events.NewEventBus() - safeEventBus := events.NewSafeEventBus(eventBus) - - // Subscribe to events - req := &eventsv1.ListenRequest{ - EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_UNPUBLISHED}, - } - - subID, eventCh := eventBus.Subscribe(req) - defer eventBus.Unsubscribe(subID) - - // Create a simple route with event bus - r := &route{ - eventBus: safeEventBus, - } - - // Test CID - testCID := "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi" - - // Directly emit event (simulating what Unpublish does) - r.eventBus.RecordUnpublished(testCID) - - // Wait for async delivery to complete - eventBus.WaitForAsyncPublish() - - // Verify event was emitted - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_UNPUBLISHED { - t.Errorf("Expected RECORD_UNPUBLISHED event, got %v", event.Type) - } - - if event.ResourceID != testCID { - t.Errorf("Expected event resource_id %s, got %s", testCID, event.ResourceID) - } - default: - t.Error("Expected to receive RECORD_UNPUBLISHED event") - } -} - -func TestRoutingWithNilEventBus(_ *testing.T) { - // Verify routing works even with nil event bus (shouldn't panic) - r := &route{ - eventBus: events.NewSafeEventBus(nil), - } - - // Should not panic - testCID := "bafytest123" - r.eventBus.RecordPublished(testCID, []string{"/test"}) - r.eventBus.RecordUnpublished(testCID) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package routing + +import ( + "testing" + + typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" + corev1 "github.com/agntcy/dir/api/core/v1" + eventsv1 "github.com/agntcy/dir/api/events/v1" + "github.com/agntcy/dir/server/events" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/server/types/adapters" +) + +func TestRoutingPublishEmitsEvent(t *testing.T) { + // Create event bus + eventBus := events.NewEventBus() + safeEventBus := events.NewSafeEventBus(eventBus) + + // Subscribe to events + req := &eventsv1.ListenRequest{ + EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED}, + } + + subID, eventCh := eventBus.Subscribe(req) + defer eventBus.Unsubscribe(subID) + + // Create a simple route with event bus + // Note: We can't easily test the full routing service without complex setup, + // so this is a minimal test to verify the event emission code path + r := &route{ + eventBus: safeEventBus, + // local and remote would normally be initialized, but for event testing + // we're just verifying the event emission mechanism + } + + // Create a test record + record := corev1.New(&typesv1alpha0.Record{ + Name: "test-agent", + SchemaVersion: "v0.3.1", + Skills: []*typesv1alpha0.Skill{ + {CategoryName: toPtr("AI"), ClassName: toPtr("Processing")}, + }, + }) + + // Directly emit event (simulating what Publish does) + labels := types.GetLabelsFromRecord(adapters.NewRecordAdapter(record)) + labelStrings := make([]string, len(labels)) + + for i, label := range labels { + labelStrings[i] = label.String() + } + + r.eventBus.RecordPublished(record.GetCid(), labelStrings) + + // Wait for async delivery to complete + eventBus.WaitForAsyncPublish() + + // Verify event was emitted + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PUBLISHED { + t.Errorf("Expected RECORD_PUBLISHED event, got %v", event.Type) + } + + if event.ResourceID != record.GetCid() { + t.Errorf("Expected event resource_id %s, got %s", record.GetCid(), event.ResourceID) + } + + if len(event.Labels) == 0 { + t.Error("Expected labels to be included in event") + } + default: + t.Error("Expected to receive RECORD_PUBLISHED event") + } +} + +func TestRoutingUnpublishEmitsEvent(t *testing.T) { + // Create event bus + eventBus := events.NewEventBus() + safeEventBus := events.NewSafeEventBus(eventBus) + + // Subscribe to events + req := &eventsv1.ListenRequest{ + EventTypes: []eventsv1.EventType{eventsv1.EventType_EVENT_TYPE_RECORD_UNPUBLISHED}, + } + + subID, eventCh := eventBus.Subscribe(req) + defer eventBus.Unsubscribe(subID) + + // Create a simple route with event bus + r := &route{ + eventBus: safeEventBus, + } + + // Test CID + testCID := "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi" + + // Directly emit event (simulating what Unpublish does) + r.eventBus.RecordUnpublished(testCID) + + // Wait for async delivery to complete + eventBus.WaitForAsyncPublish() + + // Verify event was emitted + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_UNPUBLISHED { + t.Errorf("Expected RECORD_UNPUBLISHED event, got %v", event.Type) + } + + if event.ResourceID != testCID { + t.Errorf("Expected event resource_id %s, got %s", testCID, event.ResourceID) + } + default: + t.Error("Expected to receive RECORD_UNPUBLISHED event") + } +} + +func TestRoutingWithNilEventBus(_ *testing.T) { + // Verify routing works even with nil event bus (shouldn't panic) + r := &route{ + eventBus: events.NewSafeEventBus(nil), + } + + // Should not panic + testCID := "bafytest123" + r.eventBus.RecordPublished(testCID, []string{"/test"}) + r.eventBus.RecordUnpublished(testCID) +} diff --git a/server/routing/routing_local.go b/server/routing/routing_local.go index eb888e8a4..b5835ae63 100644 --- a/server/routing/routing_local.go +++ b/server/routing/routing_local.go @@ -1,308 +1,308 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package routing - -import ( - "context" - "encoding/json" - "strings" - "time" - - corev1 "github.com/agntcy/dir/api/core/v1" - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/utils/logging" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/query" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var localLogger = logging.Logger("routing/local") - -// operations performed locally. -type routeLocal struct { - store types.StoreAPI - dstore types.Datastore - localPeerID string // Cached local peer ID for efficient filtering -} - -func newLocal(store types.StoreAPI, dstore types.Datastore, localPeerID string) *routeLocal { - return &routeLocal{ - store: store, - dstore: dstore, - localPeerID: localPeerID, - } -} - -func (r *routeLocal) Publish(ctx context.Context, record types.Record) error { - if record == nil { - return status.Error(codes.InvalidArgument, "record is required") //nolint:wrapcheck // Mock should return exact error without wrapping - } - - cid := record.GetCid() - if cid == "" { - return status.Error(codes.InvalidArgument, "record has no CID") //nolint:wrapcheck - } - - localLogger.Debug("Called local routing's Publish method", "cid", cid) - - metrics, err := loadMetrics(ctx, r.dstore) - if err != nil { - return status.Errorf(codes.Internal, "failed to load metrics: %v", err) - } - - batch, err := r.dstore.Batch(ctx) - if err != nil { - return status.Errorf(codes.Internal, "failed to create batch: %v", err) - } - - // the key where we will save the record - recordKey := datastore.NewKey("/records/" + cid) - - // check if we have the record already - // this is useful to avoid updating metrics and running the same operation multiple times - recordExists, err := r.dstore.Has(ctx, recordKey) - if err != nil { - return status.Errorf(codes.Internal, "failed to check if record exists: %v", err) - } - - if recordExists { - localLogger.Info("Skipping republish as record was already published", "cid", cid) - - return nil - } - - // store record for later lookup - if err := batch.Put(ctx, recordKey, nil); err != nil { - return status.Errorf(codes.Internal, "failed to put record key: %v", err) - } - - // Update metrics for all record labels and store them locally for queries - // Note: This handles ALL local storage for both local-only and network scenarios - // Network announcements are handled separately by routing_remote when peers are available - labelList := types.GetLabelsFromRecord(record) - for _, label := range labelList { - // Create minimal metadata (PeerID and CID now in key) - metadata := &types.LabelMetadata{ - Timestamp: time.Now(), - LastSeen: time.Now(), - } - - // Serialize metadata to JSON - metadataBytes, err := json.Marshal(metadata) - if err != nil { - return status.Errorf(codes.Internal, "failed to serialize label metadata: %v", err) - } - - // Store with enhanced self-descriptive key: /skills/AI/CID123/Peer1 - enhancedKey := BuildEnhancedLabelKey(label, cid, r.localPeerID) - - labelKey := datastore.NewKey(enhancedKey) - if err := batch.Put(ctx, labelKey, metadataBytes); err != nil { - return status.Errorf(codes.Internal, "failed to put label key: %v", err) - } - - metrics.increment(label) - } - - err = batch.Commit(ctx) - if err != nil { - return status.Errorf(codes.Internal, "failed to commit batch: %v", err) - } - - // sync metrics - err = metrics.update(ctx, r.dstore) - if err != nil { - return status.Errorf(codes.Internal, "failed to update metrics: %v", err) - } - - localLogger.Info("Successfully published record", "cid", cid) - - return nil -} - -//nolint:cyclop -func (r *routeLocal) List(ctx context.Context, req *routingv1.ListRequest) (<-chan *routingv1.ListResponse, error) { - localLogger.Debug("Called local routing's List method", "req", req) - - // ✅ DEFENSIVE: Deduplicate queries for consistent behavior (same as remote Search) - originalQueries := req.GetQueries() - deduplicatedQueries := deduplicateQueries(originalQueries) - - if len(originalQueries) != len(deduplicatedQueries) { - localLogger.Info("Deduplicated list queries for consistent filtering", - "originalCount", len(originalQueries), "deduplicatedCount", len(deduplicatedQueries)) - } - - // Output channel for results - outCh := make(chan *routingv1.ListResponse) - - // Process in background with deduplicated queries - go func() { - defer close(outCh) - - r.listLocalRecords(ctx, deduplicatedQueries, req.GetLimit(), outCh) - }() - - return outCh, nil -} - -// listLocalRecords lists all local records with optional query filtering. -// Uses the simple and efficient approach: start with /records/ index, then filter by queries. -func (r *routeLocal) listLocalRecords(ctx context.Context, queries []*routingv1.RecordQuery, limit uint32, outCh chan<- *routingv1.ListResponse) { - processedCount := 0 - limitInt := int(limit) - - // Step 1: Get all local record CIDs from /records/ index - recordResults, err := r.dstore.Query(ctx, query.Query{ - Prefix: "/records/", - }) - if err != nil { - localLogger.Error("Failed to query local records", "error", err) - - return - } - defer recordResults.Close() - - // Step 2: For each local record, check if it matches ALL queries - for result := range recordResults.Next() { - if result.Error != nil { - localLogger.Warn("Error reading record entry", "key", result.Key, "error", result.Error) - - continue - } - - // Extract CID from record key: /records/CID123 → CID123 - cid := strings.TrimPrefix(result.Key, "/records/") - if cid == "" { - continue - } - - // Check if this record matches all queries (AND relationship) - if r.matchesAllQueries(ctx, cid, queries) { - // Get labels for this record - internalLabels := r.getRecordLabelsEfficiently(ctx, cid) - - // Convert []Label to []string for gRPC API boundary - apiLabels := make([]string, len(internalLabels)) - for i, label := range internalLabels { - apiLabels[i] = label.String() - } - - // Send the response - outCh <- &routingv1.ListResponse{ - RecordRef: &corev1.RecordRef{Cid: cid}, - Labels: apiLabels, - } - - processedCount++ - if limitInt > 0 && processedCount >= limitInt { - break - } - } - } - - localLogger.Debug("Completed List operation", "processed", processedCount, "queries", len(queries)) -} - -// matchesAllQueries checks if a record matches ALL provided queries (AND relationship). -// Uses shared query matching logic with local label retrieval strategy. -func (r *routeLocal) matchesAllQueries(ctx context.Context, cid string, queries []*routingv1.RecordQuery) bool { - // Inject local label retrieval strategy into shared query matching logic - return MatchesAllQueries(ctx, cid, queries, r.getRecordLabelsEfficiently) -} - -// getRecordLabelsEfficiently gets labels for a record by extracting them from datastore keys. -// This completely avoids expensive Pull operations by using the fact that labels are stored as keys. -// This function is designed to be resilient - it never returns an error, only logs warnings. -func (r *routeLocal) getRecordLabelsEfficiently(ctx context.Context, cid string) []types.Label { - var labelList []types.Label - - // Use shared namespace iteration function - entries, err := QueryAllNamespaces(ctx, r.dstore) - if err != nil { - localLogger.Error("Failed to get namespace entries for labels", "cid", cid, "error", err) - - return labelList - } - - // Find keys for this CID and local peer: "/skills/AI/ML/CID123/Peer1" - for _, entry := range entries { - // Parse the enhanced key to get components - label, keyCID, keyPeerID, err := ParseEnhancedLabelKey(entry.Key) - if err != nil { - localLogger.Warn("Failed to parse enhanced label key", "key", entry.Key, "error", err) - - continue - } - - // Check if this key matches our CID and is from local peer - if keyCID == cid && keyPeerID == r.localPeerID { - labelList = append(labelList, label) - } - } - - return labelList -} - -func (r *routeLocal) Unpublish(ctx context.Context, record types.Record) error { - if record == nil { - return status.Error(codes.InvalidArgument, "record is required") //nolint:wrapcheck // Mock should return exact error without wrapping - } - - cid := record.GetCid() - if cid == "" { - return status.Error(codes.InvalidArgument, "record has no CID") //nolint:wrapcheck - } - - localLogger.Debug("Called local routing's Unpublish method", "cid", cid) - - // load metrics for the client - metrics, err := loadMetrics(ctx, r.dstore) - if err != nil { - return status.Errorf(codes.Internal, "failed to load metrics: %v", err) - } - - batch, err := r.dstore.Batch(ctx) - if err != nil { - return status.Errorf(codes.Internal, "failed to create batch: %v", err) - } - - // get record key and remove record - recordKey := datastore.NewKey("/records/" + cid) - if err := batch.Delete(ctx, recordKey); err != nil { - return status.Errorf(codes.Internal, "failed to delete record key: %v", err) - } - - // keep track of all record labels - labelList := types.GetLabelsFromRecord(record) - - for _, label := range labelList { - // Delete enhanced key with CID and PeerID - enhancedKey := BuildEnhancedLabelKey(label, cid, r.localPeerID) - - labelKey := datastore.NewKey(enhancedKey) - if err := batch.Delete(ctx, labelKey); err != nil { - return status.Errorf(codes.Internal, "failed to delete label key: %v", err) - } - - metrics.decrement(label) - } - - err = batch.Commit(ctx) - if err != nil { - return status.Errorf(codes.Internal, "failed to commit batch: %v", err) - } - - // sync metrics - err = metrics.update(ctx, r.dstore) - if err != nil { - return status.Errorf(codes.Internal, "failed to update metrics: %v", err) - } - - localLogger.Info("Successfully unpublished record", "cid", cid) - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package routing + +import ( + "context" + "encoding/json" + "strings" + "time" + + corev1 "github.com/agntcy/dir/api/core/v1" + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/utils/logging" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var localLogger = logging.Logger("routing/local") + +// operations performed locally. +type routeLocal struct { + store types.StoreAPI + dstore types.Datastore + localPeerID string // Cached local peer ID for efficient filtering +} + +func newLocal(store types.StoreAPI, dstore types.Datastore, localPeerID string) *routeLocal { + return &routeLocal{ + store: store, + dstore: dstore, + localPeerID: localPeerID, + } +} + +func (r *routeLocal) Publish(ctx context.Context, record types.Record) error { + if record == nil { + return status.Error(codes.InvalidArgument, "record is required") //nolint:wrapcheck // Mock should return exact error without wrapping + } + + cid := record.GetCid() + if cid == "" { + return status.Error(codes.InvalidArgument, "record has no CID") //nolint:wrapcheck + } + + localLogger.Debug("Called local routing's Publish method", "cid", cid) + + metrics, err := loadMetrics(ctx, r.dstore) + if err != nil { + return status.Errorf(codes.Internal, "failed to load metrics: %v", err) + } + + batch, err := r.dstore.Batch(ctx) + if err != nil { + return status.Errorf(codes.Internal, "failed to create batch: %v", err) + } + + // the key where we will save the record + recordKey := datastore.NewKey("/records/" + cid) + + // check if we have the record already + // this is useful to avoid updating metrics and running the same operation multiple times + recordExists, err := r.dstore.Has(ctx, recordKey) + if err != nil { + return status.Errorf(codes.Internal, "failed to check if record exists: %v", err) + } + + if recordExists { + localLogger.Info("Skipping republish as record was already published", "cid", cid) + + return nil + } + + // store record for later lookup + if err := batch.Put(ctx, recordKey, nil); err != nil { + return status.Errorf(codes.Internal, "failed to put record key: %v", err) + } + + // Update metrics for all record labels and store them locally for queries + // Note: This handles ALL local storage for both local-only and network scenarios + // Network announcements are handled separately by routing_remote when peers are available + labelList := types.GetLabelsFromRecord(record) + for _, label := range labelList { + // Create minimal metadata (PeerID and CID now in key) + metadata := &types.LabelMetadata{ + Timestamp: time.Now(), + LastSeen: time.Now(), + } + + // Serialize metadata to JSON + metadataBytes, err := json.Marshal(metadata) + if err != nil { + return status.Errorf(codes.Internal, "failed to serialize label metadata: %v", err) + } + + // Store with enhanced self-descriptive key: /skills/AI/CID123/Peer1 + enhancedKey := BuildEnhancedLabelKey(label, cid, r.localPeerID) + + labelKey := datastore.NewKey(enhancedKey) + if err := batch.Put(ctx, labelKey, metadataBytes); err != nil { + return status.Errorf(codes.Internal, "failed to put label key: %v", err) + } + + metrics.increment(label) + } + + err = batch.Commit(ctx) + if err != nil { + return status.Errorf(codes.Internal, "failed to commit batch: %v", err) + } + + // sync metrics + err = metrics.update(ctx, r.dstore) + if err != nil { + return status.Errorf(codes.Internal, "failed to update metrics: %v", err) + } + + localLogger.Info("Successfully published record", "cid", cid) + + return nil +} + +//nolint:cyclop +func (r *routeLocal) List(ctx context.Context, req *routingv1.ListRequest) (<-chan *routingv1.ListResponse, error) { + localLogger.Debug("Called local routing's List method", "req", req) + + // ✅ DEFENSIVE: Deduplicate queries for consistent behavior (same as remote Search) + originalQueries := req.GetQueries() + deduplicatedQueries := deduplicateQueries(originalQueries) + + if len(originalQueries) != len(deduplicatedQueries) { + localLogger.Info("Deduplicated list queries for consistent filtering", + "originalCount", len(originalQueries), "deduplicatedCount", len(deduplicatedQueries)) + } + + // Output channel for results + outCh := make(chan *routingv1.ListResponse) + + // Process in background with deduplicated queries + go func() { + defer close(outCh) + + r.listLocalRecords(ctx, deduplicatedQueries, req.GetLimit(), outCh) + }() + + return outCh, nil +} + +// listLocalRecords lists all local records with optional query filtering. +// Uses the simple and efficient approach: start with /records/ index, then filter by queries. +func (r *routeLocal) listLocalRecords(ctx context.Context, queries []*routingv1.RecordQuery, limit uint32, outCh chan<- *routingv1.ListResponse) { + processedCount := 0 + limitInt := int(limit) + + // Step 1: Get all local record CIDs from /records/ index + recordResults, err := r.dstore.Query(ctx, query.Query{ + Prefix: "/records/", + }) + if err != nil { + localLogger.Error("Failed to query local records", "error", err) + + return + } + defer recordResults.Close() + + // Step 2: For each local record, check if it matches ALL queries + for result := range recordResults.Next() { + if result.Error != nil { + localLogger.Warn("Error reading record entry", "key", result.Key, "error", result.Error) + + continue + } + + // Extract CID from record key: /records/CID123 → CID123 + cid := strings.TrimPrefix(result.Key, "/records/") + if cid == "" { + continue + } + + // Check if this record matches all queries (AND relationship) + if r.matchesAllQueries(ctx, cid, queries) { + // Get labels for this record + internalLabels := r.getRecordLabelsEfficiently(ctx, cid) + + // Convert []Label to []string for gRPC API boundary + apiLabels := make([]string, len(internalLabels)) + for i, label := range internalLabels { + apiLabels[i] = label.String() + } + + // Send the response + outCh <- &routingv1.ListResponse{ + RecordRef: &corev1.RecordRef{Cid: cid}, + Labels: apiLabels, + } + + processedCount++ + if limitInt > 0 && processedCount >= limitInt { + break + } + } + } + + localLogger.Debug("Completed List operation", "processed", processedCount, "queries", len(queries)) +} + +// matchesAllQueries checks if a record matches ALL provided queries (AND relationship). +// Uses shared query matching logic with local label retrieval strategy. +func (r *routeLocal) matchesAllQueries(ctx context.Context, cid string, queries []*routingv1.RecordQuery) bool { + // Inject local label retrieval strategy into shared query matching logic + return MatchesAllQueries(ctx, cid, queries, r.getRecordLabelsEfficiently) +} + +// getRecordLabelsEfficiently gets labels for a record by extracting them from datastore keys. +// This completely avoids expensive Pull operations by using the fact that labels are stored as keys. +// This function is designed to be resilient - it never returns an error, only logs warnings. +func (r *routeLocal) getRecordLabelsEfficiently(ctx context.Context, cid string) []types.Label { + var labelList []types.Label + + // Use shared namespace iteration function + entries, err := QueryAllNamespaces(ctx, r.dstore) + if err != nil { + localLogger.Error("Failed to get namespace entries for labels", "cid", cid, "error", err) + + return labelList + } + + // Find keys for this CID and local peer: "/skills/AI/ML/CID123/Peer1" + for _, entry := range entries { + // Parse the enhanced key to get components + label, keyCID, keyPeerID, err := ParseEnhancedLabelKey(entry.Key) + if err != nil { + localLogger.Warn("Failed to parse enhanced label key", "key", entry.Key, "error", err) + + continue + } + + // Check if this key matches our CID and is from local peer + if keyCID == cid && keyPeerID == r.localPeerID { + labelList = append(labelList, label) + } + } + + return labelList +} + +func (r *routeLocal) Unpublish(ctx context.Context, record types.Record) error { + if record == nil { + return status.Error(codes.InvalidArgument, "record is required") //nolint:wrapcheck // Mock should return exact error without wrapping + } + + cid := record.GetCid() + if cid == "" { + return status.Error(codes.InvalidArgument, "record has no CID") //nolint:wrapcheck + } + + localLogger.Debug("Called local routing's Unpublish method", "cid", cid) + + // load metrics for the client + metrics, err := loadMetrics(ctx, r.dstore) + if err != nil { + return status.Errorf(codes.Internal, "failed to load metrics: %v", err) + } + + batch, err := r.dstore.Batch(ctx) + if err != nil { + return status.Errorf(codes.Internal, "failed to create batch: %v", err) + } + + // get record key and remove record + recordKey := datastore.NewKey("/records/" + cid) + if err := batch.Delete(ctx, recordKey); err != nil { + return status.Errorf(codes.Internal, "failed to delete record key: %v", err) + } + + // keep track of all record labels + labelList := types.GetLabelsFromRecord(record) + + for _, label := range labelList { + // Delete enhanced key with CID and PeerID + enhancedKey := BuildEnhancedLabelKey(label, cid, r.localPeerID) + + labelKey := datastore.NewKey(enhancedKey) + if err := batch.Delete(ctx, labelKey); err != nil { + return status.Errorf(codes.Internal, "failed to delete label key: %v", err) + } + + metrics.decrement(label) + } + + err = batch.Commit(ctx) + if err != nil { + return status.Errorf(codes.Internal, "failed to commit batch: %v", err) + } + + // sync metrics + err = metrics.update(ctx, r.dstore) + if err != nil { + return status.Errorf(codes.Internal, "failed to update metrics: %v", err) + } + + localLogger.Info("Successfully unpublished record", "cid", cid) + + return nil +} diff --git a/server/routing/routing_local_test.go b/server/routing/routing_local_test.go index fa488dd7b..9e9e7b0c7 100644 --- a/server/routing/routing_local_test.go +++ b/server/routing/routing_local_test.go @@ -1,401 +1,401 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// nolint:testifylint,wsl -package routing - -import ( - "context" - "errors" - "log/slog" - "os" - "strings" - "testing" - "time" - - typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" - corev1 "github.com/agntcy/dir/api/core/v1" - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/server/datastore" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/server/types/adapters" - "github.com/agntcy/dir/utils/logging" - ipfsdatastore "github.com/ipfs/go-datastore" - "github.com/stretchr/testify/assert" -) - -const testPeerID = "test-peer-id" - -func TestPublish_InvalidObject(t *testing.T) { - r := &routeLocal{localPeerID: testPeerID} - - t.Run("nil record", func(t *testing.T) { - err := r.Publish(t.Context(), nil) - - assert.Error(t, err) - assert.ErrorContains(t, err, "record is required") - }) - - t.Run("record with no CID", func(t *testing.T) { - record := &corev1.Record{} - adapter := adapters.NewRecordAdapter(record) - err := r.Publish(t.Context(), adapter) - - assert.Error(t, err) - assert.ErrorContains(t, err, "record has no CID") - }) -} - -type mockStore struct { - data map[string]*corev1.Record -} - -func newMockStore() *mockStore { - return &mockStore{ - data: make(map[string]*corev1.Record), - } -} - -func (m *mockStore) Push(_ context.Context, record *corev1.Record) (*corev1.RecordRef, error) { - cid := record.GetCid() - if cid == "" { - return nil, errors.New("record CID is required") - } - - m.data[cid] = record - - return &corev1.RecordRef{Cid: cid}, nil -} - -func (m *mockStore) Lookup(_ context.Context, ref *corev1.RecordRef) (*corev1.RecordMeta, error) { - if _, exists := m.data[ref.GetCid()]; exists { - return &corev1.RecordMeta{ - Cid: ref.GetCid(), - }, nil - } - - return nil, errors.New("test object not found") -} - -func (m *mockStore) Pull(_ context.Context, ref *corev1.RecordRef) (*corev1.Record, error) { - if record, exists := m.data[ref.GetCid()]; exists { - return record, nil - } - - return nil, errors.New("test object not found") -} - -func (m *mockStore) Delete(_ context.Context, ref *corev1.RecordRef) error { - delete(m.data, ref.GetCid()) - - return nil -} - -func (m *mockStore) IsReady(_ context.Context) bool { - return true -} - -func TestPublishList_ValidSingleSkillQuery(t *testing.T) { - var ( - testRecord = corev1.New(&typesv1alpha0.Record{ - Name: "test-agent-1", - SchemaVersion: "v0.3.1", - Skills: []*typesv1alpha0.Skill{ - {CategoryName: toPtr("category1"), ClassName: toPtr("class1")}, - }, - }) - testRecord2 = corev1.New(&typesv1alpha0.Record{ - Name: "test-agent-2", - SchemaVersion: "v0.3.1", - Skills: []*typesv1alpha0.Skill{ - {CategoryName: toPtr("category1"), ClassName: toPtr("class1")}, - {CategoryName: toPtr("category2"), ClassName: toPtr("class2")}, - }, - }) - - testRef = &corev1.RecordRef{Cid: testRecord.GetCid()} - testRef2 = &corev1.RecordRef{Cid: testRecord2.GetCid()} - - validQueriesWithExpectedObjectRef = map[string][]*corev1.RecordRef{ - // tests exact lookup for skills - "/skills/category1/class1": { - {Cid: testRef.GetCid()}, - {Cid: testRef2.GetCid()}, - }, - // tests prefix based-lookup for skills - "/skills/category2": { - {Cid: testRef2.GetCid()}, - }, - } - ) - - // create demo network - mainNode := newTestServer(t, t.Context(), nil) - r := newTestServer(t, t.Context(), mainNode.remote.server.P2pAddrs()) - - // wait for connection - <-mainNode.remote.server.DHT().RefreshRoutingTable() - time.Sleep(1 * time.Second) - - // Mock store - mockstore := newMockStore() - r.local.store = mockstore - - _, err := r.local.store.Push(t.Context(), testRecord) - assert.NoError(t, err) - - _, err = r.local.store.Push(t.Context(), testRecord2) - assert.NoError(t, err) - - // Publish first record - adapter := adapters.NewRecordAdapter(testRecord) - err = r.Publish(t.Context(), adapter) - assert.NoError(t, err) - - // Publish second record - adapter2 := adapters.NewRecordAdapter(testRecord2) - err = r.Publish(t.Context(), adapter2) - assert.NoError(t, err) - - for k, v := range validQueriesWithExpectedObjectRef { - t.Run("Valid query: "+k, func(t *testing.T) { - // Convert label to RecordQuery - var queries []*routingv1.RecordQuery - - if strings.HasPrefix(k, "/skills/") { - skillName := strings.TrimPrefix(k, "/skills/") - queries = append(queries, &routingv1.RecordQuery{ - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: skillName, - }) - } - - // list - refsChan, err := r.List(t.Context(), &routingv1.ListRequest{ - Queries: queries, - }) - assert.NoError(t, err) - - // Collect items from the channel - var refs []*routingv1.ListResponse - for ref := range refsChan { - refs = append(refs, ref) - } - - // check if expected refs are present - assert.Len(t, refs, len(v)) - - // check if all expected refs are present - for _, expectedRef := range v { - found := false - - for _, ref := range refs { - if ref.GetRecordRef().GetCid() == expectedRef.GetCid() { - found = true - - break - } - } - - assert.True(t, found, "Expected ref not found: %s", expectedRef.GetCid()) - } - }) - } - - // Unpublish second record - adapterUnpub := adapters.NewRecordAdapter(testRecord2) - err = r.Unpublish(t.Context(), adapterUnpub) - assert.NoError(t, err) - - // Try to list second record using RecordQuery - refsChan, err := r.List(t.Context(), &routingv1.ListRequest{ - Queries: []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "category2", - }, - }, - }) - assert.NoError(t, err) - - // Collect items from the channel - var refs []*routingv1.ListResponse //nolint:prealloc - for ref := range refsChan { - refs = append(refs, ref) - } - - // check no refs are present - assert.Len(t, refs, 0) -} - -func TestPublishList_ValidMultiSkillQuery(t *testing.T) { - // Test data - var ( - testRecord = corev1.New(&typesv1alpha0.Record{ - Name: "test-agent-multi", - SchemaVersion: "v0.3.1", - Skills: []*typesv1alpha0.Skill{ - {CategoryName: toPtr("category1"), ClassName: toPtr("class1")}, - {CategoryName: toPtr("category2"), ClassName: toPtr("class2")}, - }, - }) - testRef = &corev1.RecordRef{Cid: testRecord.GetCid()} - ) - - // create demo network - mainNode := newTestServer(t, t.Context(), nil) - r := newTestServer(t, t.Context(), mainNode.remote.server.P2pAddrs()) - - // wait for connection - <-mainNode.remote.server.DHT().RefreshRoutingTable() - time.Sleep(1 * time.Second) - - // Mock store - mockstore := newMockStore() - r.local.store = mockstore - - _, err := r.local.store.Push(t.Context(), testRecord) - assert.NoError(t, err) - - // Publish first record - adapter := adapters.NewRecordAdapter(testRecord) - err = r.Publish(t.Context(), adapter) - assert.NoError(t, err) - - t.Run("Valid multi skill query", func(t *testing.T) { - // list with multiple RecordQueries (AND logic) - refsChan, err := r.List(t.Context(), &routingv1.ListRequest{ - Queries: []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "category1/class1", - }, - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "category2/class2", - }, - }, - }) - assert.NoError(t, err) - - // Collect items from the channel - var refs []*routingv1.ListResponse - for ref := range refsChan { - refs = append(refs, ref) - } - - // check if expected refs are present - assert.Len(t, refs, 1) - - // check if expected ref is present - assert.Equal(t, testRef.GetCid(), refs[0].GetRecordRef().GetCid()) - }) -} - -func newBadgerDatastore(b *testing.B) types.Datastore { - b.Helper() - - dsOpts := []datastore.Option{ - datastore.WithFsProvider("/tmp/test-datastore"), // Use a temporary directory - } - - dstore, err := datastore.New(dsOpts...) - if err != nil { - b.Fatalf("failed to create badger datastore: %v", err) - } - - b.Cleanup(func() { - _ = dstore.Close() - _ = os.RemoveAll("/tmp/test-datastore") - }) - - return dstore -} - -func newInMemoryDatastore(b *testing.B) types.Datastore { - b.Helper() - - dstore, err := datastore.New() - if err != nil { - b.Fatalf("failed to create in-memory datastore: %v", err) - } - - return dstore -} - -func Benchmark_RouteLocal(b *testing.B) { - store := newMockStore() - badgerDatastore := newBadgerDatastore(b) - inMemoryDatastore := newInMemoryDatastore(b) - localLogger = slog.New(slog.DiscardHandler) - - badgerRouter := newLocal(store, badgerDatastore, testPeerID) - inMemoryRouter := newLocal(store, inMemoryDatastore, testPeerID) - - record := corev1.New(&typesv1alpha0.Record{ - Name: "bench-agent", - SchemaVersion: "v0.3.1", - Skills: []*typesv1alpha0.Skill{ - {CategoryName: toPtr("category1"), ClassName: toPtr("class1")}, - }, - }) - - _, err := store.Push(b.Context(), record) - assert.NoError(b, err) - - b.Run("Badger DB Publish and Unpublish", func(b *testing.B) { - adapter := adapters.NewRecordAdapter(record) - for b.Loop() { - _ = badgerRouter.Publish(b.Context(), adapter) - err := badgerRouter.Unpublish(b.Context(), adapter) - assert.NoError(b, err) - } - }) - - b.Run("Badger DB List", func(b *testing.B) { - adapter := adapters.NewRecordAdapter(record) - - _ = badgerRouter.Publish(b.Context(), adapter) - for b.Loop() { - _, err := badgerRouter.List(b.Context(), &routingv1.ListRequest{ - Queries: []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "category1/class1", - }, - }, - }) - assert.NoError(b, err) - } - }) - - b.Run("In memory DB Publish and Unpublish", func(b *testing.B) { - adapter := adapters.NewRecordAdapter(record) - for b.Loop() { - _ = inMemoryRouter.Publish(b.Context(), adapter) - err := inMemoryRouter.Unpublish(b.Context(), adapter) - assert.NoError(b, err) - } - }) - - b.Run("In memory DB List", func(b *testing.B) { - adapter := adapters.NewRecordAdapter(record) - - _ = inMemoryRouter.Publish(b.Context(), adapter) - for b.Loop() { - _, err := inMemoryRouter.List(b.Context(), &routingv1.ListRequest{ - Queries: []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "category1/class1", - }, - }, - }) - assert.NoError(b, err) - } - }) - - _ = badgerDatastore.Delete(b.Context(), ipfsdatastore.NewKey("/")) // Delete all keys - _ = inMemoryDatastore.Delete(b.Context(), ipfsdatastore.NewKey("/")) // Delete all keys - localLogger = logging.Logger("routing/local") -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// nolint:testifylint,wsl +package routing + +import ( + "context" + "errors" + "log/slog" + "os" + "strings" + "testing" + "time" + + typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" + corev1 "github.com/agntcy/dir/api/core/v1" + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/server/datastore" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/server/types/adapters" + "github.com/agntcy/dir/utils/logging" + ipfsdatastore "github.com/ipfs/go-datastore" + "github.com/stretchr/testify/assert" +) + +const testPeerID = "test-peer-id" + +func TestPublish_InvalidObject(t *testing.T) { + r := &routeLocal{localPeerID: testPeerID} + + t.Run("nil record", func(t *testing.T) { + err := r.Publish(t.Context(), nil) + + assert.Error(t, err) + assert.ErrorContains(t, err, "record is required") + }) + + t.Run("record with no CID", func(t *testing.T) { + record := &corev1.Record{} + adapter := adapters.NewRecordAdapter(record) + err := r.Publish(t.Context(), adapter) + + assert.Error(t, err) + assert.ErrorContains(t, err, "record has no CID") + }) +} + +type mockStore struct { + data map[string]*corev1.Record +} + +func newMockStore() *mockStore { + return &mockStore{ + data: make(map[string]*corev1.Record), + } +} + +func (m *mockStore) Push(_ context.Context, record *corev1.Record) (*corev1.RecordRef, error) { + cid := record.GetCid() + if cid == "" { + return nil, errors.New("record CID is required") + } + + m.data[cid] = record + + return &corev1.RecordRef{Cid: cid}, nil +} + +func (m *mockStore) Lookup(_ context.Context, ref *corev1.RecordRef) (*corev1.RecordMeta, error) { + if _, exists := m.data[ref.GetCid()]; exists { + return &corev1.RecordMeta{ + Cid: ref.GetCid(), + }, nil + } + + return nil, errors.New("test object not found") +} + +func (m *mockStore) Pull(_ context.Context, ref *corev1.RecordRef) (*corev1.Record, error) { + if record, exists := m.data[ref.GetCid()]; exists { + return record, nil + } + + return nil, errors.New("test object not found") +} + +func (m *mockStore) Delete(_ context.Context, ref *corev1.RecordRef) error { + delete(m.data, ref.GetCid()) + + return nil +} + +func (m *mockStore) IsReady(_ context.Context) bool { + return true +} + +func TestPublishList_ValidSingleSkillQuery(t *testing.T) { + var ( + testRecord = corev1.New(&typesv1alpha0.Record{ + Name: "test-agent-1", + SchemaVersion: "v0.3.1", + Skills: []*typesv1alpha0.Skill{ + {CategoryName: toPtr("category1"), ClassName: toPtr("class1")}, + }, + }) + testRecord2 = corev1.New(&typesv1alpha0.Record{ + Name: "test-agent-2", + SchemaVersion: "v0.3.1", + Skills: []*typesv1alpha0.Skill{ + {CategoryName: toPtr("category1"), ClassName: toPtr("class1")}, + {CategoryName: toPtr("category2"), ClassName: toPtr("class2")}, + }, + }) + + testRef = &corev1.RecordRef{Cid: testRecord.GetCid()} + testRef2 = &corev1.RecordRef{Cid: testRecord2.GetCid()} + + validQueriesWithExpectedObjectRef = map[string][]*corev1.RecordRef{ + // tests exact lookup for skills + "/skills/category1/class1": { + {Cid: testRef.GetCid()}, + {Cid: testRef2.GetCid()}, + }, + // tests prefix based-lookup for skills + "/skills/category2": { + {Cid: testRef2.GetCid()}, + }, + } + ) + + // create demo network + mainNode := newTestServer(t, t.Context(), nil) + r := newTestServer(t, t.Context(), mainNode.remote.server.P2pAddrs()) + + // wait for connection + <-mainNode.remote.server.DHT().RefreshRoutingTable() + time.Sleep(1 * time.Second) + + // Mock store + mockstore := newMockStore() + r.local.store = mockstore + + _, err := r.local.store.Push(t.Context(), testRecord) + assert.NoError(t, err) + + _, err = r.local.store.Push(t.Context(), testRecord2) + assert.NoError(t, err) + + // Publish first record + adapter := adapters.NewRecordAdapter(testRecord) + err = r.Publish(t.Context(), adapter) + assert.NoError(t, err) + + // Publish second record + adapter2 := adapters.NewRecordAdapter(testRecord2) + err = r.Publish(t.Context(), adapter2) + assert.NoError(t, err) + + for k, v := range validQueriesWithExpectedObjectRef { + t.Run("Valid query: "+k, func(t *testing.T) { + // Convert label to RecordQuery + var queries []*routingv1.RecordQuery + + if strings.HasPrefix(k, "/skills/") { + skillName := strings.TrimPrefix(k, "/skills/") + queries = append(queries, &routingv1.RecordQuery{ + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: skillName, + }) + } + + // list + refsChan, err := r.List(t.Context(), &routingv1.ListRequest{ + Queries: queries, + }) + assert.NoError(t, err) + + // Collect items from the channel + var refs []*routingv1.ListResponse + for ref := range refsChan { + refs = append(refs, ref) + } + + // check if expected refs are present + assert.Len(t, refs, len(v)) + + // check if all expected refs are present + for _, expectedRef := range v { + found := false + + for _, ref := range refs { + if ref.GetRecordRef().GetCid() == expectedRef.GetCid() { + found = true + + break + } + } + + assert.True(t, found, "Expected ref not found: %s", expectedRef.GetCid()) + } + }) + } + + // Unpublish second record + adapterUnpub := adapters.NewRecordAdapter(testRecord2) + err = r.Unpublish(t.Context(), adapterUnpub) + assert.NoError(t, err) + + // Try to list second record using RecordQuery + refsChan, err := r.List(t.Context(), &routingv1.ListRequest{ + Queries: []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "category2", + }, + }, + }) + assert.NoError(t, err) + + // Collect items from the channel + var refs []*routingv1.ListResponse //nolint:prealloc + for ref := range refsChan { + refs = append(refs, ref) + } + + // check no refs are present + assert.Len(t, refs, 0) +} + +func TestPublishList_ValidMultiSkillQuery(t *testing.T) { + // Test data + var ( + testRecord = corev1.New(&typesv1alpha0.Record{ + Name: "test-agent-multi", + SchemaVersion: "v0.3.1", + Skills: []*typesv1alpha0.Skill{ + {CategoryName: toPtr("category1"), ClassName: toPtr("class1")}, + {CategoryName: toPtr("category2"), ClassName: toPtr("class2")}, + }, + }) + testRef = &corev1.RecordRef{Cid: testRecord.GetCid()} + ) + + // create demo network + mainNode := newTestServer(t, t.Context(), nil) + r := newTestServer(t, t.Context(), mainNode.remote.server.P2pAddrs()) + + // wait for connection + <-mainNode.remote.server.DHT().RefreshRoutingTable() + time.Sleep(1 * time.Second) + + // Mock store + mockstore := newMockStore() + r.local.store = mockstore + + _, err := r.local.store.Push(t.Context(), testRecord) + assert.NoError(t, err) + + // Publish first record + adapter := adapters.NewRecordAdapter(testRecord) + err = r.Publish(t.Context(), adapter) + assert.NoError(t, err) + + t.Run("Valid multi skill query", func(t *testing.T) { + // list with multiple RecordQueries (AND logic) + refsChan, err := r.List(t.Context(), &routingv1.ListRequest{ + Queries: []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "category1/class1", + }, + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "category2/class2", + }, + }, + }) + assert.NoError(t, err) + + // Collect items from the channel + var refs []*routingv1.ListResponse + for ref := range refsChan { + refs = append(refs, ref) + } + + // check if expected refs are present + assert.Len(t, refs, 1) + + // check if expected ref is present + assert.Equal(t, testRef.GetCid(), refs[0].GetRecordRef().GetCid()) + }) +} + +func newBadgerDatastore(b *testing.B) types.Datastore { + b.Helper() + + dsOpts := []datastore.Option{ + datastore.WithFsProvider("/tmp/test-datastore"), // Use a temporary directory + } + + dstore, err := datastore.New(dsOpts...) + if err != nil { + b.Fatalf("failed to create badger datastore: %v", err) + } + + b.Cleanup(func() { + _ = dstore.Close() + _ = os.RemoveAll("/tmp/test-datastore") + }) + + return dstore +} + +func newInMemoryDatastore(b *testing.B) types.Datastore { + b.Helper() + + dstore, err := datastore.New() + if err != nil { + b.Fatalf("failed to create in-memory datastore: %v", err) + } + + return dstore +} + +func Benchmark_RouteLocal(b *testing.B) { + store := newMockStore() + badgerDatastore := newBadgerDatastore(b) + inMemoryDatastore := newInMemoryDatastore(b) + localLogger = slog.New(slog.DiscardHandler) + + badgerRouter := newLocal(store, badgerDatastore, testPeerID) + inMemoryRouter := newLocal(store, inMemoryDatastore, testPeerID) + + record := corev1.New(&typesv1alpha0.Record{ + Name: "bench-agent", + SchemaVersion: "v0.3.1", + Skills: []*typesv1alpha0.Skill{ + {CategoryName: toPtr("category1"), ClassName: toPtr("class1")}, + }, + }) + + _, err := store.Push(b.Context(), record) + assert.NoError(b, err) + + b.Run("Badger DB Publish and Unpublish", func(b *testing.B) { + adapter := adapters.NewRecordAdapter(record) + for b.Loop() { + _ = badgerRouter.Publish(b.Context(), adapter) + err := badgerRouter.Unpublish(b.Context(), adapter) + assert.NoError(b, err) + } + }) + + b.Run("Badger DB List", func(b *testing.B) { + adapter := adapters.NewRecordAdapter(record) + + _ = badgerRouter.Publish(b.Context(), adapter) + for b.Loop() { + _, err := badgerRouter.List(b.Context(), &routingv1.ListRequest{ + Queries: []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "category1/class1", + }, + }, + }) + assert.NoError(b, err) + } + }) + + b.Run("In memory DB Publish and Unpublish", func(b *testing.B) { + adapter := adapters.NewRecordAdapter(record) + for b.Loop() { + _ = inMemoryRouter.Publish(b.Context(), adapter) + err := inMemoryRouter.Unpublish(b.Context(), adapter) + assert.NoError(b, err) + } + }) + + b.Run("In memory DB List", func(b *testing.B) { + adapter := adapters.NewRecordAdapter(record) + + _ = inMemoryRouter.Publish(b.Context(), adapter) + for b.Loop() { + _, err := inMemoryRouter.List(b.Context(), &routingv1.ListRequest{ + Queries: []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "category1/class1", + }, + }, + }) + assert.NoError(b, err) + } + }) + + _ = badgerDatastore.Delete(b.Context(), ipfsdatastore.NewKey("/")) // Delete all keys + _ = inMemoryDatastore.Delete(b.Context(), ipfsdatastore.NewKey("/")) // Delete all keys + localLogger = logging.Logger("routing/local") +} diff --git a/server/routing/routing_remote.go b/server/routing/routing_remote.go index 909874be6..adec29b63 100644 --- a/server/routing/routing_remote.go +++ b/server/routing/routing_remote.go @@ -1,1056 +1,1056 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package routing - -import ( - "context" - "encoding/json" - "fmt" - "sync" - "time" - - corev1 "github.com/agntcy/dir/api/core/v1" - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/server/routing/internal/p2p" - "github.com/agntcy/dir/server/routing/pubsub" - "github.com/agntcy/dir/server/routing/rpc" - validators "github.com/agntcy/dir/server/routing/validators" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/server/types/adapters" - "github.com/agntcy/dir/utils/logging" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/query" - dht "github.com/libp2p/go-libp2p-kad-dht" - "github.com/libp2p/go-libp2p-kad-dht/providers" - record "github.com/libp2p/go-libp2p-record" - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/protocol" - ma "github.com/multiformats/go-multiaddr" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var remoteLogger = logging.Logger("routing/remote") - -// NamespaceEntry contains processed namespace query data. -// This is used by namespace iteration functions for routing operations. -type NamespaceEntry struct { - Namespace string - Key string - Value []byte -} - -// QueryAllNamespaces queries all supported label namespaces and returns processed entries. -// This centralizes namespace iteration and datastore querying, eliminating code duplication -// between local and remote routing operations. All resource management is handled internally. -func QueryAllNamespaces(ctx context.Context, dstore types.Datastore) ([]NamespaceEntry, error) { - var entries []NamespaceEntry - - // Query all label namespaces - namespaces := []string{ - types.LabelTypeSkill.Prefix(), - types.LabelTypeDomain.Prefix(), - types.LabelTypeModule.Prefix(), - types.LabelTypeLocator.Prefix(), - } - - for _, namespace := range namespaces { - // Check for context cancellation - select { - case <-ctx.Done(): - return nil, fmt.Errorf("namespace query canceled: %w", ctx.Err()) - default: - } - - results, err := dstore.Query(ctx, query.Query{Prefix: namespace}) - if err != nil { - remoteLogger.Warn("Failed to query namespace", "namespace", namespace, "error", err) - - continue - } - - // Process results and handle cleanup - func() { - defer results.Close() - - for result := range results.Next() { - if result.Error != nil { - continue - } - - entries = append(entries, NamespaceEntry{ - Namespace: namespace, - Key: result.Key, - Value: result.Value, - }) - } - }() - } - - return entries, nil -} - -// routeRemote handles routing across the network with hybrid label discovery. -// It uses both GossipSub (efficient, wide propagation) and DHT+Pull (fallback). -type routeRemote struct { - storeAPI types.StoreAPI - server *p2p.Server - service *rpc.Service - notifyCh chan *handlerSync - dstore types.Datastore - cleanupManager *CleanupManager - pubsubManager *pubsub.Manager // GossipSub manager for label announcements (nil if disabled) - isBootstrapNode bool // True if this node is a bootstrap node (no bootstrap peers configured) - - // Lifecycle management - //nolint:containedctx // Context needed for managing lifecycle of multiple long-running goroutines (handleNotify, cleanup tasks) - ctx context.Context // Routing subsystem context - cancel context.CancelFunc // Cancel function for graceful shutdown - wg sync.WaitGroup // Tracks all background goroutines -} - -func newRemote(parentCtx context.Context, - storeAPI types.StoreAPI, - dstore types.Datastore, - opts types.APIOptions, -) (*routeRemote, error) { - // Create routing subsystem context for lifecycle management of background tasks - routingCtx, cancel := context.WithCancel(parentCtx) - - // Determine if this is a bootstrap node (no bootstrap peers configured) - isBootstrapNode := len(opts.Config().Routing.BootstrapPeers) == 0 - - // Create routing - routeAPI := &routeRemote{ - storeAPI: storeAPI, - notifyCh: make(chan *handlerSync, NotificationChannelSize), - dstore: dstore, - ctx: routingCtx, - cancel: cancel, - isBootstrapNode: isBootstrapNode, - } - - refreshInterval := RefreshInterval - if opts.Config().Routing.RefreshInterval > 0 { - refreshInterval = opts.Config().Routing.RefreshInterval - } - - // Use parent context for p2p server (should live as long as the server) - server, err := p2p.New(parentCtx, - p2p.WithListenAddress(opts.Config().Routing.ListenAddress), - p2p.WithDirectoryAPIAddress(opts.Config().Routing.DirectoryAPIAddress), - p2p.WithBootstrapAddrs(opts.Config().Routing.BootstrapPeers), - p2p.WithRefreshInterval(refreshInterval), - p2p.WithRandevous(ProtocolRendezvous), // enable libp2p auto-discovery - p2p.WithIdentityKeyPath(opts.Config().Routing.KeyPath), - p2p.WithCustomDHTOpts( - func(h host.Host) ([]dht.Option, error) { - providerMgr, err := providers.NewProviderManager(h.ID(), h.Peerstore(), dstore) - if err != nil { - return nil, fmt.Errorf("failed to create provider manager: %w", err) - } - - labelValidators := validators.CreateLabelValidators() - validator := record.NamespacedValidator{ - types.LabelTypeSkill.String(): labelValidators[types.LabelTypeSkill.String()], - types.LabelTypeDomain.String(): labelValidators[types.LabelTypeDomain.String()], - types.LabelTypeModule.String(): labelValidators[types.LabelTypeModule.String()], - } - - return []dht.Option{ - dht.Datastore(dstore), // custom DHT datastore - dht.ProtocolPrefix(protocol.ID(ProtocolPrefix)), // custom DHT protocol prefix - dht.Validator(validator), // custom validators for label namespaces - dht.MaxRecordAge(RecordTTL), // set consistent TTL for all DHT records - dht.Mode(dht.ModeServer), - dht.ProviderStore(&handler{ - ProviderManager: providerMgr, - hostID: h.ID().String(), - notifyCh: routeAPI.notifyCh, - }), - }, nil - }, - ), - ) - if err != nil { - return nil, fmt.Errorf("failed to create p2p: %w", err) - } - - routeAPI.server = server - - rpcService, err := rpc.New(server.Host(), storeAPI) - if err != nil { - defer server.Close() - - return nil, fmt.Errorf("failed to create RPC service: %w", err) - } - - routeAPI.service = rpcService - - // Initialize GossipSub manager if enabled - // Protocol parameters (topic, message size) are defined in pubsub.constants - // and are NOT configurable to ensure network-wide compatibility - if opts.Config().Routing.GossipSub.Enabled { - // Use parent context for GossipSub (should live as long as the server) - pubsubManager, err := pubsub.New(parentCtx, server.Host()) - if err != nil { - defer server.Close() - - return nil, fmt.Errorf("failed to create pubsub manager: %w", err) - } - - routeAPI.pubsubManager = pubsubManager - - // Set callback for received label announcements - pubsubManager.SetOnRecordPublishEvent(routeAPI.handleRecordPublishEvent) - - // Start periodic mesh peer tagging to protect them from Connection Manager pruning - routeAPI.startMeshPeerTagging() - - remoteLogger.Info("GossipSub label announcements enabled") - } else { - remoteLogger.Info("GossipSub disabled, using DHT+Pull fallback only") - } - - // Pass Publish as callback to avoid circular dependency - // The method value captures routeAPI's state (server, pubsubManager) - routeAPI.cleanupManager = NewCleanupManager(dstore, storeAPI, server, routeAPI.Publish) - - // Start all background goroutines with routing context - routeAPI.wg.Add(1) - - go routeAPI.handleNotify() - - routeAPI.wg.Add(1) - //nolint:contextcheck // Intentionally passing routing context to child goroutine for lifecycle management - go routeAPI.cleanupManager.StartLabelRepublishTask(routeAPI.ctx, &routeAPI.wg) - - routeAPI.wg.Add(1) - //nolint:contextcheck // Intentionally passing routing context to child goroutine for lifecycle management - go routeAPI.cleanupManager.StartRemoteLabelCleanupTask(routeAPI.ctx, &routeAPI.wg) - - return routeAPI, nil -} - -// Publish announces a record to the network via DHT and GossipSub. -// This method is part of the RoutingAPI interface and is also used -// by CleanupManager for republishing via method value injection. -// -// Flow: -// 1. Validate and extract CID from record -// 2. Announce CID to DHT (critical - returns error if fails) -// 3. Publish record via GossipSub (best-effort - logs warning if fails) -// -// Parameters: -// - ctx: Operation context -// - record: Record interface (caller must wrap corev1.Record with adapter) -// -// Returns: -// - error: If critical operations fail (validation, CID parsing, DHT announcement) -func (r *routeRemote) Publish(ctx context.Context, record types.Record) error { - // Validation - if record == nil { - return status.Error(codes.InvalidArgument, "record is required") //nolint:wrapcheck - } - - // Extract and validate CID - cidStr := record.GetCid() - if cidStr == "" { - return status.Error(codes.InvalidArgument, "record has no CID") //nolint:wrapcheck - } - - remoteLogger.Debug("Publishing record to network", "cid", cidStr) - - // Parse CID - decodedCID, err := cid.Decode(cidStr) - if err != nil { - return status.Errorf(codes.InvalidArgument, "invalid CID %q: %v", cidStr, err) - } - - // 1. Announce CID to DHT network (content discovery) - err = r.server.DHT().Provide(ctx, decodedCID, true) - if err != nil { - return status.Errorf(codes.Internal, "failed to announce CID to DHT: %v", err) - } - - // 2. Publish record via GossipSub (if enabled) - // This provides efficient label propagation to ALL subscribed peers - if r.pubsubManager != nil { - if err := r.pubsubManager.PublishRecord(ctx, record); err != nil { - // Log warning but don't fail - DHT announcement already succeeded - // Remote peers can still discover via DHT+Pull fallback - remoteLogger.Warn("Failed to publish record via GossipSub", - "cid", cidStr, - "error", err, - "fallback", "DHT+Pull will handle discovery") - } else { - remoteLogger.Debug("Successfully published record via GossipSub", - "cid", cidStr, - "topicPeers", len(r.pubsubManager.GetTopicPeers())) - } - } - - remoteLogger.Debug("Successfully announced record to network", - "cid", cidStr, - "dhtPeers", r.server.DHT().RoutingTable().Size(), - "gossipSubEnabled", r.pubsubManager != nil) - - return nil -} - -// Search queries remote records using cached labels with OR logic and minimum threshold. -// Records are returned if they match at least minMatchScore queries (OR relationship). -func (r *routeRemote) Search(ctx context.Context, req *routingv1.SearchRequest) (<-chan *routingv1.SearchResponse, error) { - remoteLogger.Debug("Called remote routing's Search method", "req", req) - - // Deduplicate queries to ensure consistent scoring regardless of client behavior - originalQueries := req.GetQueries() - deduplicatedQueries := deduplicateQueries(originalQueries) - - if len(originalQueries) != len(deduplicatedQueries) { - remoteLogger.Info("Deduplicated search queries for consistent scoring", - "originalCount", len(originalQueries), "deduplicatedCount", len(deduplicatedQueries)) - } - - // Enforce minimum match score for proto compliance - // Proto: "If not set, it will return records that match at least one query" - minMatchScore := req.GetMinMatchScore() - if minMatchScore < DefaultMinMatchScore { - minMatchScore = DefaultMinMatchScore - remoteLogger.Debug("Applied minimum match score for production safety", "original", req.GetMinMatchScore(), "applied", minMatchScore) - } - - outCh := make(chan *routingv1.SearchResponse) - - go func() { - defer close(outCh) - - r.searchRemoteRecords(ctx, deduplicatedQueries, req.GetLimit(), minMatchScore, outCh) - }() - - return outCh, nil -} - -// searchRemoteRecords searches for remote records using cached labels with OR logic. -// Records are returned if they match at least minMatchScore queries. -// -//nolint:gocognit // Core search algorithm requires complex logic for namespace iteration, filtering, and scoring -func (r *routeRemote) searchRemoteRecords(ctx context.Context, queries []*routingv1.RecordQuery, limit uint32, minMatchScore uint32, outCh chan<- *routingv1.SearchResponse) { - localPeerID := r.server.Host().ID().String() - processedCIDs := make(map[string]bool) // Avoid duplicates - processedCount := 0 - limitInt := int(limit) - - remoteLogger.Debug("Starting remote search with OR logic and minimum threshold", "queries", len(queries), "minMatchScore", minMatchScore, "localPeerID", localPeerID) - - // Query all namespaces to find remote records - entries, err := QueryAllNamespaces(ctx, r.dstore) - if err != nil { - remoteLogger.Error("Failed to get namespace entries for search", "error", err) - - return - } - - for _, entry := range entries { - if limitInt > 0 && processedCount >= limitInt { - break - } - - _, keyCID, keyPeerID, err := ParseEnhancedLabelKey(entry.Key) - if err != nil { - remoteLogger.Warn("Failed to parse enhanced label key", "key", entry.Key, "error", err) - - continue - } - - // Filter for remote records only (exclude local records) - if keyPeerID == localPeerID { - continue // Skip local records - } - - // Avoid duplicate CIDs (same record might have multiple matching labels) - if processedCIDs[keyCID] { - continue - } - - // Calculate match score using OR logic (how many queries match this record) - matchQueries, score := r.calculateMatchScore(ctx, keyCID, queries, keyPeerID) - - remoteLogger.Debug("Calculated match score for remote record", "cid", keyCID, "score", score, "minMatchScore", minMatchScore, "matchingQueries", len(matchQueries)) - - // Apply minimum match score filter (record included if score ≥ threshold) - if score >= minMatchScore { - peer := r.createPeerInfo(ctx, keyPeerID) - - outCh <- &routingv1.SearchResponse{ - RecordRef: &corev1.RecordRef{Cid: keyCID}, - Peer: peer, - MatchQueries: matchQueries, - MatchScore: score, - } - - processedCIDs[keyCID] = true - processedCount++ - - remoteLogger.Debug("Record meets minimum threshold, including in results", "cid", keyCID, "score", score) - - if limitInt > 0 && processedCount >= limitInt { - break - } - } else { - remoteLogger.Debug("Record does not meet minimum threshold, excluding from results", "cid", keyCID, "score", score, "minMatchScore", minMatchScore) - } - } - - remoteLogger.Debug("Completed Search operation", "processed", processedCount, "queries", len(queries)) -} - -// calculateMatchScore calculates how many queries match a remote record (OR logic). -// Returns the matching queries and the match score for minimum threshold filtering. -func (r *routeRemote) calculateMatchScore(ctx context.Context, cid string, queries []*routingv1.RecordQuery, peerID string) ([]*routingv1.RecordQuery, uint32) { - if len(queries) == 0 { - return nil, 0 - } - - labels := r.getRemoteRecordLabels(ctx, cid, peerID) - if len(labels) == 0 { - return nil, 0 - } - - var matchingQueries []*routingv1.RecordQuery - - // Check each query against all labels - any match counts toward the score (OR logic) - for _, query := range queries { - if QueryMatchesLabels(query, labels) { - matchingQueries = append(matchingQueries, query) - } - } - - score := safeIntToUint32(len(matchingQueries)) - - remoteLogger.Debug("OR logic match score calculated", "cid", cid, "total_queries", len(queries), "matching_queries", len(matchingQueries), "score", score) - - return matchingQueries, score -} - -// getRemoteRecordLabels gets labels for a remote record by finding all enhanced keys for this CID/PeerID. -func (r *routeRemote) getRemoteRecordLabels(ctx context.Context, cid, peerID string) []types.Label { - var labelList []types.Label - - entries, err := QueryAllNamespaces(ctx, r.dstore) - if err != nil { - remoteLogger.Error("Failed to get namespace entries for labels", "error", err) - - return nil - } - - for _, entry := range entries { - label, keyCID, keyPeerID, err := ParseEnhancedLabelKey(entry.Key) - if err != nil { - continue - } - - if keyCID == cid && keyPeerID == peerID { - labelList = append(labelList, label) - } - } - - return labelList -} - -// createPeerInfo creates a Peer message from a PeerID string. -func (r *routeRemote) createPeerInfo(ctx context.Context, peerID string) *routingv1.Peer { - dirAPIAddr := r.getDirectoryAPIAddress(ctx, peerID) - - return &routingv1.Peer{ - Id: peerID, - Addrs: []string{dirAPIAddr}, - } -} - -func (r *routeRemote) getDirectoryAPIAddress(ctx context.Context, peerID string) string { - // Try datastore cache first (fast path) - if dirAddr := r.getDirectoryAPIAddressFromDatastore(ctx, peerID); dirAddr != "" { - return dirAddr - } - - // Fallback: Try live peerstore (handles mDNS and DHT without addresses) - pid, err := peer.Decode(peerID) - if err != nil { - remoteLogger.Error("Failed to decode peer ID", "peerID", peerID, "error", err) - - return "" - } - - peerstoreAddrs := r.server.Host().Peerstore().Addrs(pid) - if len(peerstoreAddrs) == 0 { - remoteLogger.Warn("No Directory API address found for peer", - "peerID", peerID, - "note", "Peer might be discovered via mDNS or DHT without /dir/ configuration") - - return "" - } - - remoteLogger.Debug("Trying peerstore addresses for /dir/ protocol", - "peerID", peerID, - "addrs", len(peerstoreAddrs)) - - if dirAddr := extractDirProtocol(peerstoreAddrs, peerID); dirAddr != "" { - return dirAddr - } - - remoteLogger.Warn("No /dir/ protocol found in peerstore addresses", - "peerID", peerID) - - return "" -} - -// getDirectoryAPIAddressFromDatastore checks datastore cache for peer addresses. -func (r *routeRemote) getDirectoryAPIAddressFromDatastore(ctx context.Context, peerID string) string { - key := datastore.NewKey("peer_addrs/" + peerID) - - addresses, err := r.dstore.Get(ctx, key) - if err != nil { - remoteLogger.Debug("No cached peer addresses in datastore", "peerID", peerID) - - return "" - } - - var multiaddrs []ma.Multiaddr - if err := json.Unmarshal(addresses, &multiaddrs); err != nil { - remoteLogger.Error("Failed to unmarshal peer addresses", "error", err) - - return "" - } - - return extractDirProtocol(multiaddrs, peerID) -} - -// storePeerAddresses stores peer addresses in datastore for later retrieval. -// Tries DHT notification addresses first, falls back to peerstore if empty. -func (r *routeRemote) storePeerAddresses(ctx context.Context, peerIDStr string, peerID peer.ID, notifAddrs []ma.Multiaddr, cid string) { - // Try DHT notification addresses first - peerAddrs := notifAddrs - if len(peerAddrs) == 0 { - // Fallback: get addresses from libp2p peerstore - peerAddrs = r.server.Host().Peerstore().Addrs(peerID) - remoteLogger.Debug("DHT notification had no addresses, using peerstore", - "peerID", peerIDStr, - "peerstoreAddrs", len(peerAddrs)) - } - - if len(peerAddrs) == 0 { - remoteLogger.Warn("No peer addresses available from DHT or peerstore", - "peerID", peerIDStr, - "cid", cid) - - return - } - - // Check if already stored - key := datastore.NewKey("peer_addrs/" + peerIDStr) - if _, err := r.dstore.Get(ctx, key); err == nil { - return // Already have addresses - } - - // Marshal and store - addresses, err := json.Marshal(peerAddrs) - if err != nil { - remoteLogger.Error("Failed to marshal peer addresses", "error", err) - - return - } - - if err := r.dstore.Put(ctx, key, addresses); err != nil { - remoteLogger.Error("Failed to store peer addresses", "error", err) - - return - } - - remoteLogger.Debug("Stored peer addresses", "peerID", peerIDStr, "count", len(peerAddrs)) -} - -// extractDirProtocol extracts the /dir/ protocol value from a list of multiaddrs. -// Returns empty string if no /dir/ protocol is found. -func extractDirProtocol(multiaddrs []ma.Multiaddr, peerID string) string { - for _, addr := range multiaddrs { - protocols := addr.Protocols() - for _, protocol := range protocols { - if protocol.Code == p2p.DirProtocolCode { - value, err := addr.ValueForProtocol(p2p.DirProtocolCode) - if err != nil { - remoteLogger.Debug("Failed to extract /dir/ protocol value", - "peerID", peerID, - "addr", addr.String(), - "error", err) - } else { - remoteLogger.Debug("Found Directory API address", - "peerID", peerID, - "dirAddress", value) - - return value - } - } - } - } - - return "" -} - -func (r *routeRemote) handleNotify() { - defer r.wg.Done() - - cleanupLogger.Debug("Started DHT provider notification handler") - - // Process DHT provider notifications and handle pull-based label discovery - for { - select { - case <-r.ctx.Done(): - cleanupLogger.Debug("DHT provider notification handler stopped") - - return - case notif := <-r.notifyCh: - // All announcements are now CID provider announcements - // Labels are discovered via pull-based mechanism - r.handleCIDProviderNotification(r.ctx, notif) - } - } -} - -// startMeshPeerTagging starts a background goroutine that periodically tags -// GossipSub mesh peers to protect them from Connection Manager pruning. -// -// GossipSub mesh changes over time as peers join/leave and mesh prunes/grafts. -// This periodic tagging ensures current mesh peers are always protected with -// high priority (50 points), preventing the Connection Manager from disconnecting -// them when connection limits are reached. -// -// The goroutine: -// - Tags mesh peers immediately (initial protection) -// - Re-tags every 30 seconds (maintain protection as mesh changes) -// - Stops when routing context is cancelled (clean shutdown) -// -// This method should only be called when GossipSub is enabled. -func (r *routeRemote) startMeshPeerTagging() { - if r.pubsubManager == nil { - return // Safety check: only run if GossipSub is enabled - } - - // Tag mesh peers initially - r.pubsubManager.TagMeshPeers() - - // Start periodic tagging goroutine - r.wg.Add(1) - - go func() { - defer r.wg.Done() - - ticker := time.NewTicker(p2p.MeshPeerTaggingInterval) - defer ticker.Stop() - - remoteLogger.Info("Started periodic GossipSub mesh peer tagging", - "interval", p2p.MeshPeerTaggingInterval) - - for { - select { - case <-r.ctx.Done(): - remoteLogger.Debug("Stopping mesh peer tagging") - - return - case <-ticker.C: - r.pubsubManager.TagMeshPeers() - } - } - }() -} - -// handleCIDProviderNotification implements fallback label discovery via DHT+Pull. -// This is the secondary mechanism when GossipSub labels haven't arrived yet. -// -// Flow: -// 1. Check if labels already cached (from GossipSub) → Update timestamps, skip pull -// 2. If not cached → FALLBACK: Pull record, extract labels, cache -// -// Timing scenarios: -// - 90% case: GossipSub arrives first (~15ms) → This function skips pull (efficient!) -// - 10% case: DHT arrives first (~80ms) → This function pulls (fallback) -// -// This ensures labels are always cached regardless of network race conditions. -func (r *routeRemote) handleCIDProviderNotification(ctx context.Context, notif *handlerSync) { - peerIDStr := notif.Peer.ID.String() - - if peerIDStr == r.server.Host().ID().String() { - remoteLogger.Debug("Ignoring self announcement", "cid", notif.Ref.GetCid()) - - return - } - - // Store peer addresses for later use - r.storePeerAddresses(ctx, peerIDStr, notif.Peer.ID, notif.Peer.Addrs, notif.Ref.GetCid()) - - // Check if we already have labels cached (from GossipSub announcement) - if r.hasRemoteRecordCached(ctx, notif.Ref.GetCid(), peerIDStr) { - // Labels already cached via GossipSub or previous pull - // Just update lastSeen timestamps for freshness - remoteLogger.Debug("Labels already cached (likely from GossipSub), updating lastSeen", - "cid", notif.Ref.GetCid(), - "peer", peerIDStr, - "source", "gossipsub_or_previous_pull") - - r.updateRemoteRecordLastSeen(ctx, notif.Ref.GetCid(), peerIDStr) - - return - } - - // FALLBACK: Labels not cached yet, need to pull record - // This happens when: - // - GossipSub message hasn't arrived yet (race condition) - // - GossipSub is disabled - // - GossipSub message was lost - // - Peer doesn't support GossipSub - remoteLogger.Debug("No cached labels, falling back to pull-based discovery", - "cid", notif.Ref.GetCid(), - "peer", peerIDStr, - "reason", "gossipsub_not_received") - - record, err := r.service.Pull(ctx, notif.Peer.ID, notif.Ref) - if err != nil { - remoteLogger.Error("Failed to pull remote content for label caching", - "cid", notif.Ref.GetCid(), - "peer", peerIDStr, - "error", err) - - return - } - - adapter := adapters.NewRecordAdapter(record) - - labelList := types.GetLabelsFromRecord(adapter) - if len(labelList) == 0 { - remoteLogger.Warn("No labels found in remote record", - "cid", notif.Ref.GetCid(), - "peer", peerIDStr) - - return - } - - now := time.Now() - cachedCount := 0 - - for _, label := range labelList { - enhancedKey := BuildEnhancedLabelKey(label, notif.Ref.GetCid(), peerIDStr) - - metadata := &types.LabelMetadata{ - Timestamp: now, - LastSeen: now, - } - - metadataBytes, err := json.Marshal(metadata) - if err != nil { - remoteLogger.Warn("Failed to marshal label metadata", - "enhanced_key", enhancedKey, - "error", err) - - continue - } - - err = r.dstore.Put(ctx, datastore.NewKey(enhancedKey), metadataBytes) - if err != nil { - remoteLogger.Warn("Failed to cache remote label", - "enhanced_key", enhancedKey, - "error", err) - } else { - cachedCount++ - } - } - - remoteLogger.Info("Successfully cached labels via DHT+Pull fallback", - "cid", notif.Ref.GetCid(), - "peer", peerIDStr, - "totalLabels", len(labelList), - "cached", cachedCount, - "source", "pull_fallback") -} - -// hasRemoteRecordCached checks if we already have cached labels for this remote record. -// This helps avoid duplicate work and identifies reannouncement events. -func (r *routeRemote) hasRemoteRecordCached(ctx context.Context, cid, peerID string) bool { - entries, err := QueryAllNamespaces(ctx, r.dstore) - if err != nil { - remoteLogger.Error("Failed to get namespace entries for cache check", "error", err) - - return false - } - - for _, entry := range entries { - // Parse enhanced key to check if it matches our CID/PeerID - _, keyCID, keyPeerID, err := ParseEnhancedLabelKey(entry.Key) - if err != nil { - continue - } - - if keyCID == cid && keyPeerID == peerID { - return true - } - } - - return false -} - -// handleRecordPublishEvent processes incoming record publication events from GossipSub. -// This is the primary label discovery mechanism when GossipSub is enabled. -// It converts the wire format to storage format using existing infrastructure. -// -// Parameters: -// - ctx: Operation context -// - authenticatedPeerID: Cryptographically verified peer ID from msg.ReceivedFrom -// - event: The announcement payload (CID, labels, timestamp) -// -// Flow: -// 1. Skip own announcements (already cached locally) -// 2. Convert []string labels to types.Label -// 3. Build enhanced keys: /skills/AI/CID/PeerID -// 4. Store types.LabelMetadata in datastore -// -// Security: -// - Uses authenticatedPeerID from libp2p transport (cannot be spoofed) -// - Prevents malicious peers from poisoning the label cache -// -// This completely avoids pulling the entire record from remote peers, -// providing ~95% bandwidth savings and ~5-20ms propagation time. -func (r *routeRemote) handleRecordPublishEvent(ctx context.Context, authenticatedPeerID string, event *pubsub.RecordPublishEvent) { - // Skip our own announcements (already cached during local Publish) - if authenticatedPeerID == r.server.Host().ID().String() { - return - } - - remoteLogger.Info("Caching labels from GossipSub announcement", - "cid", event.CID, - "peer", authenticatedPeerID, - "labels", len(event.Labels)) - - now := time.Now() - cachedCount := 0 - - // Convert wire format ([]string) to storage format using existing infrastructure - for _, labelStr := range event.Labels { - label := types.Label(labelStr) - - // Use authenticated peer ID (cryptographically verified by libp2p) - enhancedKey := BuildEnhancedLabelKey(label, event.CID, authenticatedPeerID) - - // Use existing types.LabelMetadata structure - metadata := &types.LabelMetadata{ - Timestamp: event.Timestamp, // When label was announced - LastSeen: now, // When we received it - } - - metadataBytes, err := json.Marshal(metadata) - if err != nil { - remoteLogger.Warn("Failed to marshal label metadata", - "key", enhancedKey, - "error", err) - - continue - } - - err = r.dstore.Put(ctx, datastore.NewKey(enhancedKey), metadataBytes) - if err != nil { - remoteLogger.Warn("Failed to cache label from GossipSub", - "key", enhancedKey, - "error", err) - } else { - cachedCount++ - } - } - - remoteLogger.Info("Successfully cached labels from GossipSub", - "cid", event.CID, - "peer", authenticatedPeerID, - "total", len(event.Labels), - "cached", cachedCount) -} - -// updateLabelMetadataTimestamp updates the lastSeen timestamp for a single cached label entry. -func (r *routeRemote) updateLabelMetadataTimestamp(ctx context.Context, key string, value []byte, timestamp time.Time) error { - var metadata types.LabelMetadata - if err := json.Unmarshal(value, &metadata); err != nil { - return fmt.Errorf("failed to unmarshal label metadata: %w", err) - } - - metadata.LastSeen = timestamp - - metadataBytes, err := json.Marshal(metadata) - if err != nil { - return fmt.Errorf("failed to marshal label metadata: %w", err) - } - - err = r.dstore.Put(ctx, datastore.NewKey(key), metadataBytes) - if err != nil { - return fmt.Errorf("failed to save label metadata: %w", err) - } - - return nil -} - -// updateRemoteRecordLastSeen updates the lastSeen timestamp for all cached labels -// from a specific remote peer/CID combination (for reannouncement handling). -func (r *routeRemote) updateRemoteRecordLastSeen(ctx context.Context, cid, peerID string) { - now := time.Now() - updatedCount := 0 - - entries, err := QueryAllNamespaces(ctx, r.dstore) - if err != nil { - remoteLogger.Error("Failed to get namespace entries for lastSeen update", "error", err) - - return - } - - for _, entry := range entries { - // Parse enhanced key to check if it matches our CID/PeerID - _, keyCID, keyPeerID, err := ParseEnhancedLabelKey(entry.Key) - if err != nil { - continue - } - - if keyCID == cid && keyPeerID == peerID { - if err := r.updateLabelMetadataTimestamp(ctx, entry.Key, entry.Value, now); err != nil { - remoteLogger.Warn("Failed to update lastSeen for cached label", "key", entry.Key, "error", err) - } else { - updatedCount++ - - remoteLogger.Debug("Updated lastSeen for cached label", "key", entry.Key) - } - } - } - - remoteLogger.Debug("Updated lastSeen timestamps for reannounced record", - "cid", cid, "peer", peerID, "updatedLabels", updatedCount) -} - -// Stop stops the remote routing services and releases resources. -// This should be called during server shutdown to clean up gracefully. -func (r *routeRemote) Stop() error { - remoteLogger.Info("Stopping routing subsystem") - - // Cancel routing context to stop all background goroutines: - // - handleNotify (DHT provider notifications) - // - StartLabelRepublishTask (periodic republishing) - // - StartRemoteLabelCleanupTask (stale label cleanup) - r.cancel() - - // Wait for all goroutines to finish gracefully - r.wg.Wait() - remoteLogger.Debug("All routing background tasks stopped") - - // Close GossipSub manager if enabled - if r.pubsubManager != nil { - if err := r.pubsubManager.Close(); err != nil { - remoteLogger.Error("Failed to close GossipSub manager", "error", err) - - return fmt.Errorf("failed to close pubsub manager: %w", err) - } - - remoteLogger.Debug("GossipSub manager closed") - } - - // Close p2p server (host and DHT) - r.server.Close() - remoteLogger.Debug("P2P server closed") - - remoteLogger.Info("Routing subsystem stopped successfully") - - return nil -} - -// IsReady checks if the remote routing subsystem is ready to serve traffic. -// For bootstrap nodes (first peer in network): -// - Ready when DHT, host and datastore are initialized (0 peers is expected) -// -// For regular nodes (connecting to existing network): -// - DHT must have peers in routing table -// - Must have connected peers -// - GossipSub mesh must be formed (if enabled) -func (r *routeRemote) IsReady(ctx context.Context) bool { - if r.server == nil { - remoteLogger.Debug("Routing not ready: server is nil") - - return false - } - - // Check if host is initialized - host := r.server.Host() - if host == nil { - remoteLogger.Debug("Routing not ready: host is nil") - - return false - } - - // Check if DHT is initialized - dht := r.server.DHT() - if dht == nil { - remoteLogger.Debug("Routing not ready: DHT is nil") - - return false - } - - // Check if datastore is initialized - if r.dstore == nil { - remoteLogger.Debug("Routing not ready: datastore is nil") - - return false - } - - // Verify host is listening on addresses - // This ensures the libp2p transport layer is properly initialized - addrs := host.Addrs() - if len(addrs) == 0 { - remoteLogger.Debug("Routing not ready: host has no listen addresses") - - return false - } - - // Bootstrap nodes are ready when DHT is initialized, even with 0 peers - // They serve as entry points for the network and will accept incoming connections - if r.isBootstrapNode { - remoteLogger.Debug("Routing ready (bootstrap node)", "listenAddrs", len(addrs)) - - return true - } - - // For regular nodes, require peers in routing table (successful bootstrap) - routingTableSize := dht.RoutingTable().Size() - if routingTableSize == 0 { - remoteLogger.Debug("Routing not ready: DHT routing table is empty") - - return false - } - - // Require at least one connected peer for regular nodes - connectedPeers := len(host.Network().Peers()) - if connectedPeers == 0 { - remoteLogger.Debug("Routing not ready: no connected peers") - - return false - } - - // If GossipSub is enabled, check if mesh is formed - // Bootstrap nodes may have 0 mesh peers initially, which is acceptable - if r.pubsubManager != nil { - meshPeers := r.pubsubManager.GetMeshPeerCount() - if meshPeers == 0 { - remoteLogger.Debug("Routing not ready: GossipSub mesh has no peers") - - return false - } - - remoteLogger.Debug("Routing ready", "routingTableSize", routingTableSize, "connectedPeers", connectedPeers, "meshPeers", meshPeers) - } else { - remoteLogger.Debug("Routing ready", "routingTableSize", routingTableSize, "connectedPeers", connectedPeers) - } - - return true -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package routing + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "time" + + corev1 "github.com/agntcy/dir/api/core/v1" + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/server/routing/internal/p2p" + "github.com/agntcy/dir/server/routing/pubsub" + "github.com/agntcy/dir/server/routing/rpc" + validators "github.com/agntcy/dir/server/routing/validators" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/server/types/adapters" + "github.com/agntcy/dir/utils/logging" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p-kad-dht/providers" + record "github.com/libp2p/go-libp2p-record" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + ma "github.com/multiformats/go-multiaddr" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var remoteLogger = logging.Logger("routing/remote") + +// NamespaceEntry contains processed namespace query data. +// This is used by namespace iteration functions for routing operations. +type NamespaceEntry struct { + Namespace string + Key string + Value []byte +} + +// QueryAllNamespaces queries all supported label namespaces and returns processed entries. +// This centralizes namespace iteration and datastore querying, eliminating code duplication +// between local and remote routing operations. All resource management is handled internally. +func QueryAllNamespaces(ctx context.Context, dstore types.Datastore) ([]NamespaceEntry, error) { + var entries []NamespaceEntry + + // Query all label namespaces + namespaces := []string{ + types.LabelTypeSkill.Prefix(), + types.LabelTypeDomain.Prefix(), + types.LabelTypeModule.Prefix(), + types.LabelTypeLocator.Prefix(), + } + + for _, namespace := range namespaces { + // Check for context cancellation + select { + case <-ctx.Done(): + return nil, fmt.Errorf("namespace query canceled: %w", ctx.Err()) + default: + } + + results, err := dstore.Query(ctx, query.Query{Prefix: namespace}) + if err != nil { + remoteLogger.Warn("Failed to query namespace", "namespace", namespace, "error", err) + + continue + } + + // Process results and handle cleanup + func() { + defer results.Close() + + for result := range results.Next() { + if result.Error != nil { + continue + } + + entries = append(entries, NamespaceEntry{ + Namespace: namespace, + Key: result.Key, + Value: result.Value, + }) + } + }() + } + + return entries, nil +} + +// routeRemote handles routing across the network with hybrid label discovery. +// It uses both GossipSub (efficient, wide propagation) and DHT+Pull (fallback). +type routeRemote struct { + storeAPI types.StoreAPI + server *p2p.Server + service *rpc.Service + notifyCh chan *handlerSync + dstore types.Datastore + cleanupManager *CleanupManager + pubsubManager *pubsub.Manager // GossipSub manager for label announcements (nil if disabled) + isBootstrapNode bool // True if this node is a bootstrap node (no bootstrap peers configured) + + // Lifecycle management + //nolint:containedctx // Context needed for managing lifecycle of multiple long-running goroutines (handleNotify, cleanup tasks) + ctx context.Context // Routing subsystem context + cancel context.CancelFunc // Cancel function for graceful shutdown + wg sync.WaitGroup // Tracks all background goroutines +} + +func newRemote(parentCtx context.Context, + storeAPI types.StoreAPI, + dstore types.Datastore, + opts types.APIOptions, +) (*routeRemote, error) { + // Create routing subsystem context for lifecycle management of background tasks + routingCtx, cancel := context.WithCancel(parentCtx) + + // Determine if this is a bootstrap node (no bootstrap peers configured) + isBootstrapNode := len(opts.Config().Routing.BootstrapPeers) == 0 + + // Create routing + routeAPI := &routeRemote{ + storeAPI: storeAPI, + notifyCh: make(chan *handlerSync, NotificationChannelSize), + dstore: dstore, + ctx: routingCtx, + cancel: cancel, + isBootstrapNode: isBootstrapNode, + } + + refreshInterval := RefreshInterval + if opts.Config().Routing.RefreshInterval > 0 { + refreshInterval = opts.Config().Routing.RefreshInterval + } + + // Use parent context for p2p server (should live as long as the server) + server, err := p2p.New(parentCtx, + p2p.WithListenAddress(opts.Config().Routing.ListenAddress), + p2p.WithDirectoryAPIAddress(opts.Config().Routing.DirectoryAPIAddress), + p2p.WithBootstrapAddrs(opts.Config().Routing.BootstrapPeers), + p2p.WithRefreshInterval(refreshInterval), + p2p.WithRandevous(ProtocolRendezvous), // enable libp2p auto-discovery + p2p.WithIdentityKeyPath(opts.Config().Routing.KeyPath), + p2p.WithCustomDHTOpts( + func(h host.Host) ([]dht.Option, error) { + providerMgr, err := providers.NewProviderManager(h.ID(), h.Peerstore(), dstore) + if err != nil { + return nil, fmt.Errorf("failed to create provider manager: %w", err) + } + + labelValidators := validators.CreateLabelValidators() + validator := record.NamespacedValidator{ + types.LabelTypeSkill.String(): labelValidators[types.LabelTypeSkill.String()], + types.LabelTypeDomain.String(): labelValidators[types.LabelTypeDomain.String()], + types.LabelTypeModule.String(): labelValidators[types.LabelTypeModule.String()], + } + + return []dht.Option{ + dht.Datastore(dstore), // custom DHT datastore + dht.ProtocolPrefix(protocol.ID(ProtocolPrefix)), // custom DHT protocol prefix + dht.Validator(validator), // custom validators for label namespaces + dht.MaxRecordAge(RecordTTL), // set consistent TTL for all DHT records + dht.Mode(dht.ModeServer), + dht.ProviderStore(&handler{ + ProviderManager: providerMgr, + hostID: h.ID().String(), + notifyCh: routeAPI.notifyCh, + }), + }, nil + }, + ), + ) + if err != nil { + return nil, fmt.Errorf("failed to create p2p: %w", err) + } + + routeAPI.server = server + + rpcService, err := rpc.New(server.Host(), storeAPI) + if err != nil { + defer server.Close() + + return nil, fmt.Errorf("failed to create RPC service: %w", err) + } + + routeAPI.service = rpcService + + // Initialize GossipSub manager if enabled + // Protocol parameters (topic, message size) are defined in pubsub.constants + // and are NOT configurable to ensure network-wide compatibility + if opts.Config().Routing.GossipSub.Enabled { + // Use parent context for GossipSub (should live as long as the server) + pubsubManager, err := pubsub.New(parentCtx, server.Host()) + if err != nil { + defer server.Close() + + return nil, fmt.Errorf("failed to create pubsub manager: %w", err) + } + + routeAPI.pubsubManager = pubsubManager + + // Set callback for received label announcements + pubsubManager.SetOnRecordPublishEvent(routeAPI.handleRecordPublishEvent) + + // Start periodic mesh peer tagging to protect them from Connection Manager pruning + routeAPI.startMeshPeerTagging() + + remoteLogger.Info("GossipSub label announcements enabled") + } else { + remoteLogger.Info("GossipSub disabled, using DHT+Pull fallback only") + } + + // Pass Publish as callback to avoid circular dependency + // The method value captures routeAPI's state (server, pubsubManager) + routeAPI.cleanupManager = NewCleanupManager(dstore, storeAPI, server, routeAPI.Publish) + + // Start all background goroutines with routing context + routeAPI.wg.Add(1) + + go routeAPI.handleNotify() + + routeAPI.wg.Add(1) + //nolint:contextcheck // Intentionally passing routing context to child goroutine for lifecycle management + go routeAPI.cleanupManager.StartLabelRepublishTask(routeAPI.ctx, &routeAPI.wg) + + routeAPI.wg.Add(1) + //nolint:contextcheck // Intentionally passing routing context to child goroutine for lifecycle management + go routeAPI.cleanupManager.StartRemoteLabelCleanupTask(routeAPI.ctx, &routeAPI.wg) + + return routeAPI, nil +} + +// Publish announces a record to the network via DHT and GossipSub. +// This method is part of the RoutingAPI interface and is also used +// by CleanupManager for republishing via method value injection. +// +// Flow: +// 1. Validate and extract CID from record +// 2. Announce CID to DHT (critical - returns error if fails) +// 3. Publish record via GossipSub (best-effort - logs warning if fails) +// +// Parameters: +// - ctx: Operation context +// - record: Record interface (caller must wrap corev1.Record with adapter) +// +// Returns: +// - error: If critical operations fail (validation, CID parsing, DHT announcement) +func (r *routeRemote) Publish(ctx context.Context, record types.Record) error { + // Validation + if record == nil { + return status.Error(codes.InvalidArgument, "record is required") //nolint:wrapcheck + } + + // Extract and validate CID + cidStr := record.GetCid() + if cidStr == "" { + return status.Error(codes.InvalidArgument, "record has no CID") //nolint:wrapcheck + } + + remoteLogger.Debug("Publishing record to network", "cid", cidStr) + + // Parse CID + decodedCID, err := cid.Decode(cidStr) + if err != nil { + return status.Errorf(codes.InvalidArgument, "invalid CID %q: %v", cidStr, err) + } + + // 1. Announce CID to DHT network (content discovery) + err = r.server.DHT().Provide(ctx, decodedCID, true) + if err != nil { + return status.Errorf(codes.Internal, "failed to announce CID to DHT: %v", err) + } + + // 2. Publish record via GossipSub (if enabled) + // This provides efficient label propagation to ALL subscribed peers + if r.pubsubManager != nil { + if err := r.pubsubManager.PublishRecord(ctx, record); err != nil { + // Log warning but don't fail - DHT announcement already succeeded + // Remote peers can still discover via DHT+Pull fallback + remoteLogger.Warn("Failed to publish record via GossipSub", + "cid", cidStr, + "error", err, + "fallback", "DHT+Pull will handle discovery") + } else { + remoteLogger.Debug("Successfully published record via GossipSub", + "cid", cidStr, + "topicPeers", len(r.pubsubManager.GetTopicPeers())) + } + } + + remoteLogger.Debug("Successfully announced record to network", + "cid", cidStr, + "dhtPeers", r.server.DHT().RoutingTable().Size(), + "gossipSubEnabled", r.pubsubManager != nil) + + return nil +} + +// Search queries remote records using cached labels with OR logic and minimum threshold. +// Records are returned if they match at least minMatchScore queries (OR relationship). +func (r *routeRemote) Search(ctx context.Context, req *routingv1.SearchRequest) (<-chan *routingv1.SearchResponse, error) { + remoteLogger.Debug("Called remote routing's Search method", "req", req) + + // Deduplicate queries to ensure consistent scoring regardless of client behavior + originalQueries := req.GetQueries() + deduplicatedQueries := deduplicateQueries(originalQueries) + + if len(originalQueries) != len(deduplicatedQueries) { + remoteLogger.Info("Deduplicated search queries for consistent scoring", + "originalCount", len(originalQueries), "deduplicatedCount", len(deduplicatedQueries)) + } + + // Enforce minimum match score for proto compliance + // Proto: "If not set, it will return records that match at least one query" + minMatchScore := req.GetMinMatchScore() + if minMatchScore < DefaultMinMatchScore { + minMatchScore = DefaultMinMatchScore + remoteLogger.Debug("Applied minimum match score for production safety", "original", req.GetMinMatchScore(), "applied", minMatchScore) + } + + outCh := make(chan *routingv1.SearchResponse) + + go func() { + defer close(outCh) + + r.searchRemoteRecords(ctx, deduplicatedQueries, req.GetLimit(), minMatchScore, outCh) + }() + + return outCh, nil +} + +// searchRemoteRecords searches for remote records using cached labels with OR logic. +// Records are returned if they match at least minMatchScore queries. +// +//nolint:gocognit // Core search algorithm requires complex logic for namespace iteration, filtering, and scoring +func (r *routeRemote) searchRemoteRecords(ctx context.Context, queries []*routingv1.RecordQuery, limit uint32, minMatchScore uint32, outCh chan<- *routingv1.SearchResponse) { + localPeerID := r.server.Host().ID().String() + processedCIDs := make(map[string]bool) // Avoid duplicates + processedCount := 0 + limitInt := int(limit) + + remoteLogger.Debug("Starting remote search with OR logic and minimum threshold", "queries", len(queries), "minMatchScore", minMatchScore, "localPeerID", localPeerID) + + // Query all namespaces to find remote records + entries, err := QueryAllNamespaces(ctx, r.dstore) + if err != nil { + remoteLogger.Error("Failed to get namespace entries for search", "error", err) + + return + } + + for _, entry := range entries { + if limitInt > 0 && processedCount >= limitInt { + break + } + + _, keyCID, keyPeerID, err := ParseEnhancedLabelKey(entry.Key) + if err != nil { + remoteLogger.Warn("Failed to parse enhanced label key", "key", entry.Key, "error", err) + + continue + } + + // Filter for remote records only (exclude local records) + if keyPeerID == localPeerID { + continue // Skip local records + } + + // Avoid duplicate CIDs (same record might have multiple matching labels) + if processedCIDs[keyCID] { + continue + } + + // Calculate match score using OR logic (how many queries match this record) + matchQueries, score := r.calculateMatchScore(ctx, keyCID, queries, keyPeerID) + + remoteLogger.Debug("Calculated match score for remote record", "cid", keyCID, "score", score, "minMatchScore", minMatchScore, "matchingQueries", len(matchQueries)) + + // Apply minimum match score filter (record included if score ≥ threshold) + if score >= minMatchScore { + peer := r.createPeerInfo(ctx, keyPeerID) + + outCh <- &routingv1.SearchResponse{ + RecordRef: &corev1.RecordRef{Cid: keyCID}, + Peer: peer, + MatchQueries: matchQueries, + MatchScore: score, + } + + processedCIDs[keyCID] = true + processedCount++ + + remoteLogger.Debug("Record meets minimum threshold, including in results", "cid", keyCID, "score", score) + + if limitInt > 0 && processedCount >= limitInt { + break + } + } else { + remoteLogger.Debug("Record does not meet minimum threshold, excluding from results", "cid", keyCID, "score", score, "minMatchScore", minMatchScore) + } + } + + remoteLogger.Debug("Completed Search operation", "processed", processedCount, "queries", len(queries)) +} + +// calculateMatchScore calculates how many queries match a remote record (OR logic). +// Returns the matching queries and the match score for minimum threshold filtering. +func (r *routeRemote) calculateMatchScore(ctx context.Context, cid string, queries []*routingv1.RecordQuery, peerID string) ([]*routingv1.RecordQuery, uint32) { + if len(queries) == 0 { + return nil, 0 + } + + labels := r.getRemoteRecordLabels(ctx, cid, peerID) + if len(labels) == 0 { + return nil, 0 + } + + var matchingQueries []*routingv1.RecordQuery + + // Check each query against all labels - any match counts toward the score (OR logic) + for _, query := range queries { + if QueryMatchesLabels(query, labels) { + matchingQueries = append(matchingQueries, query) + } + } + + score := safeIntToUint32(len(matchingQueries)) + + remoteLogger.Debug("OR logic match score calculated", "cid", cid, "total_queries", len(queries), "matching_queries", len(matchingQueries), "score", score) + + return matchingQueries, score +} + +// getRemoteRecordLabels gets labels for a remote record by finding all enhanced keys for this CID/PeerID. +func (r *routeRemote) getRemoteRecordLabels(ctx context.Context, cid, peerID string) []types.Label { + var labelList []types.Label + + entries, err := QueryAllNamespaces(ctx, r.dstore) + if err != nil { + remoteLogger.Error("Failed to get namespace entries for labels", "error", err) + + return nil + } + + for _, entry := range entries { + label, keyCID, keyPeerID, err := ParseEnhancedLabelKey(entry.Key) + if err != nil { + continue + } + + if keyCID == cid && keyPeerID == peerID { + labelList = append(labelList, label) + } + } + + return labelList +} + +// createPeerInfo creates a Peer message from a PeerID string. +func (r *routeRemote) createPeerInfo(ctx context.Context, peerID string) *routingv1.Peer { + dirAPIAddr := r.getDirectoryAPIAddress(ctx, peerID) + + return &routingv1.Peer{ + Id: peerID, + Addrs: []string{dirAPIAddr}, + } +} + +func (r *routeRemote) getDirectoryAPIAddress(ctx context.Context, peerID string) string { + // Try datastore cache first (fast path) + if dirAddr := r.getDirectoryAPIAddressFromDatastore(ctx, peerID); dirAddr != "" { + return dirAddr + } + + // Fallback: Try live peerstore (handles mDNS and DHT without addresses) + pid, err := peer.Decode(peerID) + if err != nil { + remoteLogger.Error("Failed to decode peer ID", "peerID", peerID, "error", err) + + return "" + } + + peerstoreAddrs := r.server.Host().Peerstore().Addrs(pid) + if len(peerstoreAddrs) == 0 { + remoteLogger.Warn("No Directory API address found for peer", + "peerID", peerID, + "note", "Peer might be discovered via mDNS or DHT without /dir/ configuration") + + return "" + } + + remoteLogger.Debug("Trying peerstore addresses for /dir/ protocol", + "peerID", peerID, + "addrs", len(peerstoreAddrs)) + + if dirAddr := extractDirProtocol(peerstoreAddrs, peerID); dirAddr != "" { + return dirAddr + } + + remoteLogger.Warn("No /dir/ protocol found in peerstore addresses", + "peerID", peerID) + + return "" +} + +// getDirectoryAPIAddressFromDatastore checks datastore cache for peer addresses. +func (r *routeRemote) getDirectoryAPIAddressFromDatastore(ctx context.Context, peerID string) string { + key := datastore.NewKey("peer_addrs/" + peerID) + + addresses, err := r.dstore.Get(ctx, key) + if err != nil { + remoteLogger.Debug("No cached peer addresses in datastore", "peerID", peerID) + + return "" + } + + var multiaddrs []ma.Multiaddr + if err := json.Unmarshal(addresses, &multiaddrs); err != nil { + remoteLogger.Error("Failed to unmarshal peer addresses", "error", err) + + return "" + } + + return extractDirProtocol(multiaddrs, peerID) +} + +// storePeerAddresses stores peer addresses in datastore for later retrieval. +// Tries DHT notification addresses first, falls back to peerstore if empty. +func (r *routeRemote) storePeerAddresses(ctx context.Context, peerIDStr string, peerID peer.ID, notifAddrs []ma.Multiaddr, cid string) { + // Try DHT notification addresses first + peerAddrs := notifAddrs + if len(peerAddrs) == 0 { + // Fallback: get addresses from libp2p peerstore + peerAddrs = r.server.Host().Peerstore().Addrs(peerID) + remoteLogger.Debug("DHT notification had no addresses, using peerstore", + "peerID", peerIDStr, + "peerstoreAddrs", len(peerAddrs)) + } + + if len(peerAddrs) == 0 { + remoteLogger.Warn("No peer addresses available from DHT or peerstore", + "peerID", peerIDStr, + "cid", cid) + + return + } + + // Check if already stored + key := datastore.NewKey("peer_addrs/" + peerIDStr) + if _, err := r.dstore.Get(ctx, key); err == nil { + return // Already have addresses + } + + // Marshal and store + addresses, err := json.Marshal(peerAddrs) + if err != nil { + remoteLogger.Error("Failed to marshal peer addresses", "error", err) + + return + } + + if err := r.dstore.Put(ctx, key, addresses); err != nil { + remoteLogger.Error("Failed to store peer addresses", "error", err) + + return + } + + remoteLogger.Debug("Stored peer addresses", "peerID", peerIDStr, "count", len(peerAddrs)) +} + +// extractDirProtocol extracts the /dir/ protocol value from a list of multiaddrs. +// Returns empty string if no /dir/ protocol is found. +func extractDirProtocol(multiaddrs []ma.Multiaddr, peerID string) string { + for _, addr := range multiaddrs { + protocols := addr.Protocols() + for _, protocol := range protocols { + if protocol.Code == p2p.DirProtocolCode { + value, err := addr.ValueForProtocol(p2p.DirProtocolCode) + if err != nil { + remoteLogger.Debug("Failed to extract /dir/ protocol value", + "peerID", peerID, + "addr", addr.String(), + "error", err) + } else { + remoteLogger.Debug("Found Directory API address", + "peerID", peerID, + "dirAddress", value) + + return value + } + } + } + } + + return "" +} + +func (r *routeRemote) handleNotify() { + defer r.wg.Done() + + cleanupLogger.Debug("Started DHT provider notification handler") + + // Process DHT provider notifications and handle pull-based label discovery + for { + select { + case <-r.ctx.Done(): + cleanupLogger.Debug("DHT provider notification handler stopped") + + return + case notif := <-r.notifyCh: + // All announcements are now CID provider announcements + // Labels are discovered via pull-based mechanism + r.handleCIDProviderNotification(r.ctx, notif) + } + } +} + +// startMeshPeerTagging starts a background goroutine that periodically tags +// GossipSub mesh peers to protect them from Connection Manager pruning. +// +// GossipSub mesh changes over time as peers join/leave and mesh prunes/grafts. +// This periodic tagging ensures current mesh peers are always protected with +// high priority (50 points), preventing the Connection Manager from disconnecting +// them when connection limits are reached. +// +// The goroutine: +// - Tags mesh peers immediately (initial protection) +// - Re-tags every 30 seconds (maintain protection as mesh changes) +// - Stops when routing context is cancelled (clean shutdown) +// +// This method should only be called when GossipSub is enabled. +func (r *routeRemote) startMeshPeerTagging() { + if r.pubsubManager == nil { + return // Safety check: only run if GossipSub is enabled + } + + // Tag mesh peers initially + r.pubsubManager.TagMeshPeers() + + // Start periodic tagging goroutine + r.wg.Add(1) + + go func() { + defer r.wg.Done() + + ticker := time.NewTicker(p2p.MeshPeerTaggingInterval) + defer ticker.Stop() + + remoteLogger.Info("Started periodic GossipSub mesh peer tagging", + "interval", p2p.MeshPeerTaggingInterval) + + for { + select { + case <-r.ctx.Done(): + remoteLogger.Debug("Stopping mesh peer tagging") + + return + case <-ticker.C: + r.pubsubManager.TagMeshPeers() + } + } + }() +} + +// handleCIDProviderNotification implements fallback label discovery via DHT+Pull. +// This is the secondary mechanism when GossipSub labels haven't arrived yet. +// +// Flow: +// 1. Check if labels already cached (from GossipSub) → Update timestamps, skip pull +// 2. If not cached → FALLBACK: Pull record, extract labels, cache +// +// Timing scenarios: +// - 90% case: GossipSub arrives first (~15ms) → This function skips pull (efficient!) +// - 10% case: DHT arrives first (~80ms) → This function pulls (fallback) +// +// This ensures labels are always cached regardless of network race conditions. +func (r *routeRemote) handleCIDProviderNotification(ctx context.Context, notif *handlerSync) { + peerIDStr := notif.Peer.ID.String() + + if peerIDStr == r.server.Host().ID().String() { + remoteLogger.Debug("Ignoring self announcement", "cid", notif.Ref.GetCid()) + + return + } + + // Store peer addresses for later use + r.storePeerAddresses(ctx, peerIDStr, notif.Peer.ID, notif.Peer.Addrs, notif.Ref.GetCid()) + + // Check if we already have labels cached (from GossipSub announcement) + if r.hasRemoteRecordCached(ctx, notif.Ref.GetCid(), peerIDStr) { + // Labels already cached via GossipSub or previous pull + // Just update lastSeen timestamps for freshness + remoteLogger.Debug("Labels already cached (likely from GossipSub), updating lastSeen", + "cid", notif.Ref.GetCid(), + "peer", peerIDStr, + "source", "gossipsub_or_previous_pull") + + r.updateRemoteRecordLastSeen(ctx, notif.Ref.GetCid(), peerIDStr) + + return + } + + // FALLBACK: Labels not cached yet, need to pull record + // This happens when: + // - GossipSub message hasn't arrived yet (race condition) + // - GossipSub is disabled + // - GossipSub message was lost + // - Peer doesn't support GossipSub + remoteLogger.Debug("No cached labels, falling back to pull-based discovery", + "cid", notif.Ref.GetCid(), + "peer", peerIDStr, + "reason", "gossipsub_not_received") + + record, err := r.service.Pull(ctx, notif.Peer.ID, notif.Ref) + if err != nil { + remoteLogger.Error("Failed to pull remote content for label caching", + "cid", notif.Ref.GetCid(), + "peer", peerIDStr, + "error", err) + + return + } + + adapter := adapters.NewRecordAdapter(record) + + labelList := types.GetLabelsFromRecord(adapter) + if len(labelList) == 0 { + remoteLogger.Warn("No labels found in remote record", + "cid", notif.Ref.GetCid(), + "peer", peerIDStr) + + return + } + + now := time.Now() + cachedCount := 0 + + for _, label := range labelList { + enhancedKey := BuildEnhancedLabelKey(label, notif.Ref.GetCid(), peerIDStr) + + metadata := &types.LabelMetadata{ + Timestamp: now, + LastSeen: now, + } + + metadataBytes, err := json.Marshal(metadata) + if err != nil { + remoteLogger.Warn("Failed to marshal label metadata", + "enhanced_key", enhancedKey, + "error", err) + + continue + } + + err = r.dstore.Put(ctx, datastore.NewKey(enhancedKey), metadataBytes) + if err != nil { + remoteLogger.Warn("Failed to cache remote label", + "enhanced_key", enhancedKey, + "error", err) + } else { + cachedCount++ + } + } + + remoteLogger.Info("Successfully cached labels via DHT+Pull fallback", + "cid", notif.Ref.GetCid(), + "peer", peerIDStr, + "totalLabels", len(labelList), + "cached", cachedCount, + "source", "pull_fallback") +} + +// hasRemoteRecordCached checks if we already have cached labels for this remote record. +// This helps avoid duplicate work and identifies reannouncement events. +func (r *routeRemote) hasRemoteRecordCached(ctx context.Context, cid, peerID string) bool { + entries, err := QueryAllNamespaces(ctx, r.dstore) + if err != nil { + remoteLogger.Error("Failed to get namespace entries for cache check", "error", err) + + return false + } + + for _, entry := range entries { + // Parse enhanced key to check if it matches our CID/PeerID + _, keyCID, keyPeerID, err := ParseEnhancedLabelKey(entry.Key) + if err != nil { + continue + } + + if keyCID == cid && keyPeerID == peerID { + return true + } + } + + return false +} + +// handleRecordPublishEvent processes incoming record publication events from GossipSub. +// This is the primary label discovery mechanism when GossipSub is enabled. +// It converts the wire format to storage format using existing infrastructure. +// +// Parameters: +// - ctx: Operation context +// - authenticatedPeerID: Cryptographically verified peer ID from msg.ReceivedFrom +// - event: The announcement payload (CID, labels, timestamp) +// +// Flow: +// 1. Skip own announcements (already cached locally) +// 2. Convert []string labels to types.Label +// 3. Build enhanced keys: /skills/AI/CID/PeerID +// 4. Store types.LabelMetadata in datastore +// +// Security: +// - Uses authenticatedPeerID from libp2p transport (cannot be spoofed) +// - Prevents malicious peers from poisoning the label cache +// +// This completely avoids pulling the entire record from remote peers, +// providing ~95% bandwidth savings and ~5-20ms propagation time. +func (r *routeRemote) handleRecordPublishEvent(ctx context.Context, authenticatedPeerID string, event *pubsub.RecordPublishEvent) { + // Skip our own announcements (already cached during local Publish) + if authenticatedPeerID == r.server.Host().ID().String() { + return + } + + remoteLogger.Info("Caching labels from GossipSub announcement", + "cid", event.CID, + "peer", authenticatedPeerID, + "labels", len(event.Labels)) + + now := time.Now() + cachedCount := 0 + + // Convert wire format ([]string) to storage format using existing infrastructure + for _, labelStr := range event.Labels { + label := types.Label(labelStr) + + // Use authenticated peer ID (cryptographically verified by libp2p) + enhancedKey := BuildEnhancedLabelKey(label, event.CID, authenticatedPeerID) + + // Use existing types.LabelMetadata structure + metadata := &types.LabelMetadata{ + Timestamp: event.Timestamp, // When label was announced + LastSeen: now, // When we received it + } + + metadataBytes, err := json.Marshal(metadata) + if err != nil { + remoteLogger.Warn("Failed to marshal label metadata", + "key", enhancedKey, + "error", err) + + continue + } + + err = r.dstore.Put(ctx, datastore.NewKey(enhancedKey), metadataBytes) + if err != nil { + remoteLogger.Warn("Failed to cache label from GossipSub", + "key", enhancedKey, + "error", err) + } else { + cachedCount++ + } + } + + remoteLogger.Info("Successfully cached labels from GossipSub", + "cid", event.CID, + "peer", authenticatedPeerID, + "total", len(event.Labels), + "cached", cachedCount) +} + +// updateLabelMetadataTimestamp updates the lastSeen timestamp for a single cached label entry. +func (r *routeRemote) updateLabelMetadataTimestamp(ctx context.Context, key string, value []byte, timestamp time.Time) error { + var metadata types.LabelMetadata + if err := json.Unmarshal(value, &metadata); err != nil { + return fmt.Errorf("failed to unmarshal label metadata: %w", err) + } + + metadata.LastSeen = timestamp + + metadataBytes, err := json.Marshal(metadata) + if err != nil { + return fmt.Errorf("failed to marshal label metadata: %w", err) + } + + err = r.dstore.Put(ctx, datastore.NewKey(key), metadataBytes) + if err != nil { + return fmt.Errorf("failed to save label metadata: %w", err) + } + + return nil +} + +// updateRemoteRecordLastSeen updates the lastSeen timestamp for all cached labels +// from a specific remote peer/CID combination (for reannouncement handling). +func (r *routeRemote) updateRemoteRecordLastSeen(ctx context.Context, cid, peerID string) { + now := time.Now() + updatedCount := 0 + + entries, err := QueryAllNamespaces(ctx, r.dstore) + if err != nil { + remoteLogger.Error("Failed to get namespace entries for lastSeen update", "error", err) + + return + } + + for _, entry := range entries { + // Parse enhanced key to check if it matches our CID/PeerID + _, keyCID, keyPeerID, err := ParseEnhancedLabelKey(entry.Key) + if err != nil { + continue + } + + if keyCID == cid && keyPeerID == peerID { + if err := r.updateLabelMetadataTimestamp(ctx, entry.Key, entry.Value, now); err != nil { + remoteLogger.Warn("Failed to update lastSeen for cached label", "key", entry.Key, "error", err) + } else { + updatedCount++ + + remoteLogger.Debug("Updated lastSeen for cached label", "key", entry.Key) + } + } + } + + remoteLogger.Debug("Updated lastSeen timestamps for reannounced record", + "cid", cid, "peer", peerID, "updatedLabels", updatedCount) +} + +// Stop stops the remote routing services and releases resources. +// This should be called during server shutdown to clean up gracefully. +func (r *routeRemote) Stop() error { + remoteLogger.Info("Stopping routing subsystem") + + // Cancel routing context to stop all background goroutines: + // - handleNotify (DHT provider notifications) + // - StartLabelRepublishTask (periodic republishing) + // - StartRemoteLabelCleanupTask (stale label cleanup) + r.cancel() + + // Wait for all goroutines to finish gracefully + r.wg.Wait() + remoteLogger.Debug("All routing background tasks stopped") + + // Close GossipSub manager if enabled + if r.pubsubManager != nil { + if err := r.pubsubManager.Close(); err != nil { + remoteLogger.Error("Failed to close GossipSub manager", "error", err) + + return fmt.Errorf("failed to close pubsub manager: %w", err) + } + + remoteLogger.Debug("GossipSub manager closed") + } + + // Close p2p server (host and DHT) + r.server.Close() + remoteLogger.Debug("P2P server closed") + + remoteLogger.Info("Routing subsystem stopped successfully") + + return nil +} + +// IsReady checks if the remote routing subsystem is ready to serve traffic. +// For bootstrap nodes (first peer in network): +// - Ready when DHT, host and datastore are initialized (0 peers is expected) +// +// For regular nodes (connecting to existing network): +// - DHT must have peers in routing table +// - Must have connected peers +// - GossipSub mesh must be formed (if enabled) +func (r *routeRemote) IsReady(ctx context.Context) bool { + if r.server == nil { + remoteLogger.Debug("Routing not ready: server is nil") + + return false + } + + // Check if host is initialized + host := r.server.Host() + if host == nil { + remoteLogger.Debug("Routing not ready: host is nil") + + return false + } + + // Check if DHT is initialized + dht := r.server.DHT() + if dht == nil { + remoteLogger.Debug("Routing not ready: DHT is nil") + + return false + } + + // Check if datastore is initialized + if r.dstore == nil { + remoteLogger.Debug("Routing not ready: datastore is nil") + + return false + } + + // Verify host is listening on addresses + // This ensures the libp2p transport layer is properly initialized + addrs := host.Addrs() + if len(addrs) == 0 { + remoteLogger.Debug("Routing not ready: host has no listen addresses") + + return false + } + + // Bootstrap nodes are ready when DHT is initialized, even with 0 peers + // They serve as entry points for the network and will accept incoming connections + if r.isBootstrapNode { + remoteLogger.Debug("Routing ready (bootstrap node)", "listenAddrs", len(addrs)) + + return true + } + + // For regular nodes, require peers in routing table (successful bootstrap) + routingTableSize := dht.RoutingTable().Size() + if routingTableSize == 0 { + remoteLogger.Debug("Routing not ready: DHT routing table is empty") + + return false + } + + // Require at least one connected peer for regular nodes + connectedPeers := len(host.Network().Peers()) + if connectedPeers == 0 { + remoteLogger.Debug("Routing not ready: no connected peers") + + return false + } + + // If GossipSub is enabled, check if mesh is formed + // Bootstrap nodes may have 0 mesh peers initially, which is acceptable + if r.pubsubManager != nil { + meshPeers := r.pubsubManager.GetMeshPeerCount() + if meshPeers == 0 { + remoteLogger.Debug("Routing not ready: GossipSub mesh has no peers") + + return false + } + + remoteLogger.Debug("Routing ready", "routingTableSize", routingTableSize, "connectedPeers", connectedPeers, "meshPeers", meshPeers) + } else { + remoteLogger.Debug("Routing ready", "routingTableSize", routingTableSize, "connectedPeers", connectedPeers) + } + + return true +} diff --git a/server/routing/routing_remote_or_logic_test.go b/server/routing/routing_remote_or_logic_test.go index 4c92637f3..c9d53eaed 100644 --- a/server/routing/routing_remote_or_logic_test.go +++ b/server/routing/routing_remote_or_logic_test.go @@ -1,162 +1,162 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package routing - -import ( - "encoding/json" - "testing" - "time" - - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/server/datastore" - "github.com/agntcy/dir/server/types" - ipfsdatastore "github.com/ipfs/go-datastore" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// This test bypasses DHT infrastructure issues and directly tests the calculateMatchScore method. -func TestRemoteSearch_ORLogicWithMinMatchScore(t *testing.T) { - ctx := t.Context() - - // Create test datastore - dstore, cleanup := setupTestDatastore(t) - defer cleanup() - - // Create routeRemote instance for testing - r := &routeRemote{ - dstore: dstore, - } - - // Setup test scenario: simulate cached remote announcements - testPeerID := "remote-peer-test" - testCID := "test-record-cid" - - // Simulate Peer 1 announced these skills for the test record - skillLabels := []string{ - "/skills/Natural Language Processing/Text Completion", - "/skills/Natural Language Processing/Problem Solving", - } - - // Store enhanced label announcements in datastore (simulating DHT cache) - for _, label := range skillLabels { - enhancedKey := BuildEnhancedLabelKey(types.Label(label), testCID, testPeerID) - metadata := &types.LabelMetadata{ - Timestamp: time.Now(), - LastSeen: time.Now(), - } - metadataBytes, err := json.Marshal(metadata) - require.NoError(t, err) - - err = dstore.Put(ctx, ipfsdatastore.NewKey(enhancedKey), metadataBytes) - require.NoError(t, err) - } - - t.Run("OR Logic Success - 2/3 queries match", func(t *testing.T) { - // Test queries: 2 real skills + 1 fake skill - queries := []*routingv1.RecordQuery{ - {Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, Value: "Natural Language Processing/Text Completion"}, - {Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, Value: "Natural Language Processing/Problem Solving"}, - {Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, Value: "NonexistentSkill"}, - } - - // Test calculateMatchScore directly (avoids server dependency) - matchQueries, score := r.calculateMatchScore(ctx, testCID, queries, testPeerID) - - // Should have 2 matching queries out of 3 - assert.Len(t, matchQueries, 2, "Should have 2 matching queries") - assert.Equal(t, uint32(2), score, "Score should be 2 (2 out of 3 queries matched)") - - // Test that minScore=2 would include this record - assert.GreaterOrEqual(t, score, uint32(2), "Score meets minScore=2 threshold") - - // Test that minScore=3 would exclude this record - assert.Less(t, score, uint32(3), "Score does not meet minScore=3 threshold") - }) - - t.Run("Single Query Match", func(t *testing.T) { - // Single query that should match - queries := []*routingv1.RecordQuery{ - {Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, Value: "Natural Language Processing/Text Completion"}, - } - - // Test calculateMatchScore - matchQueries, score := r.calculateMatchScore(ctx, testCID, queries, testPeerID) - - // Should have 1 matching query - assert.Len(t, matchQueries, 1, "Should have 1 matching query") - assert.Equal(t, uint32(1), score, "Score should be 1") - }) - - t.Run("Perfect Match - 2/2 queries match", func(t *testing.T) { - // Two queries that should both match - queries := []*routingv1.RecordQuery{ - {Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, Value: "Natural Language Processing/Text Completion"}, - {Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, Value: "Natural Language Processing/Problem Solving"}, - } - - // Test calculateMatchScore - matchQueries, score := r.calculateMatchScore(ctx, testCID, queries, testPeerID) - - // Should have 2 matching queries out of 2 - assert.Len(t, matchQueries, 2, "Should have 2 matching queries") - assert.Equal(t, uint32(2), score, "Score should be 2 (both queries matched)") - }) - - t.Run("No Queries Match", func(t *testing.T) { - // Query that doesn't match anything - queries := []*routingv1.RecordQuery{ - {Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, Value: "NonexistentSkill"}, - } - - // Test calculateMatchScore - matchQueries, score := r.calculateMatchScore(ctx, testCID, queries, testPeerID) - - // Should have 0 matching queries - assert.Empty(t, matchQueries, "Should have 0 matching queries") - assert.Equal(t, uint32(0), score, "Score should be 0") - }) - - t.Run("Empty Queries", func(t *testing.T) { - // No queries - var queries []*routingv1.RecordQuery - - // Test calculateMatchScore - matchQueries, score := r.calculateMatchScore(ctx, testCID, queries, testPeerID) - - // Should have 0 matching queries and 0 score - assert.Empty(t, matchQueries, "Should have 0 matching queries with empty query list") - assert.Equal(t, uint32(0), score, "Score should be 0 with empty queries") - }) - - t.Run("Hierarchical Skill Matching", func(t *testing.T) { - // Test hierarchical skill matching (prefix matching) - queries := []*routingv1.RecordQuery{ - {Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, Value: "Natural Language Processing"}, // Should match both skills via prefix - } - - // Test calculateMatchScore - matchQueries, score := r.calculateMatchScore(ctx, testCID, queries, testPeerID) - - // Should match at least 1 query (hierarchical matching) - assert.GreaterOrEqual(t, len(matchQueries), 1, "Should have at least 1 matching query with hierarchical matching") - assert.GreaterOrEqual(t, score, uint32(1), "Score should be at least 1 with hierarchical matching") - }) -} - -// setupTestDatastore creates a test datastore for routing tests. -func setupTestDatastore(t *testing.T) (types.Datastore, func()) { - t.Helper() - - dstore, err := datastore.New() - require.NoError(t, err) - - cleanup := func() { - if closer, ok := dstore.(interface{ Close() error }); ok { - closer.Close() - } - } - - return dstore, cleanup -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package routing + +import ( + "encoding/json" + "testing" + "time" + + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/server/datastore" + "github.com/agntcy/dir/server/types" + ipfsdatastore "github.com/ipfs/go-datastore" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// This test bypasses DHT infrastructure issues and directly tests the calculateMatchScore method. +func TestRemoteSearch_ORLogicWithMinMatchScore(t *testing.T) { + ctx := t.Context() + + // Create test datastore + dstore, cleanup := setupTestDatastore(t) + defer cleanup() + + // Create routeRemote instance for testing + r := &routeRemote{ + dstore: dstore, + } + + // Setup test scenario: simulate cached remote announcements + testPeerID := "remote-peer-test" + testCID := "test-record-cid" + + // Simulate Peer 1 announced these skills for the test record + skillLabels := []string{ + "/skills/Natural Language Processing/Text Completion", + "/skills/Natural Language Processing/Problem Solving", + } + + // Store enhanced label announcements in datastore (simulating DHT cache) + for _, label := range skillLabels { + enhancedKey := BuildEnhancedLabelKey(types.Label(label), testCID, testPeerID) + metadata := &types.LabelMetadata{ + Timestamp: time.Now(), + LastSeen: time.Now(), + } + metadataBytes, err := json.Marshal(metadata) + require.NoError(t, err) + + err = dstore.Put(ctx, ipfsdatastore.NewKey(enhancedKey), metadataBytes) + require.NoError(t, err) + } + + t.Run("OR Logic Success - 2/3 queries match", func(t *testing.T) { + // Test queries: 2 real skills + 1 fake skill + queries := []*routingv1.RecordQuery{ + {Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, Value: "Natural Language Processing/Text Completion"}, + {Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, Value: "Natural Language Processing/Problem Solving"}, + {Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, Value: "NonexistentSkill"}, + } + + // Test calculateMatchScore directly (avoids server dependency) + matchQueries, score := r.calculateMatchScore(ctx, testCID, queries, testPeerID) + + // Should have 2 matching queries out of 3 + assert.Len(t, matchQueries, 2, "Should have 2 matching queries") + assert.Equal(t, uint32(2), score, "Score should be 2 (2 out of 3 queries matched)") + + // Test that minScore=2 would include this record + assert.GreaterOrEqual(t, score, uint32(2), "Score meets minScore=2 threshold") + + // Test that minScore=3 would exclude this record + assert.Less(t, score, uint32(3), "Score does not meet minScore=3 threshold") + }) + + t.Run("Single Query Match", func(t *testing.T) { + // Single query that should match + queries := []*routingv1.RecordQuery{ + {Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, Value: "Natural Language Processing/Text Completion"}, + } + + // Test calculateMatchScore + matchQueries, score := r.calculateMatchScore(ctx, testCID, queries, testPeerID) + + // Should have 1 matching query + assert.Len(t, matchQueries, 1, "Should have 1 matching query") + assert.Equal(t, uint32(1), score, "Score should be 1") + }) + + t.Run("Perfect Match - 2/2 queries match", func(t *testing.T) { + // Two queries that should both match + queries := []*routingv1.RecordQuery{ + {Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, Value: "Natural Language Processing/Text Completion"}, + {Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, Value: "Natural Language Processing/Problem Solving"}, + } + + // Test calculateMatchScore + matchQueries, score := r.calculateMatchScore(ctx, testCID, queries, testPeerID) + + // Should have 2 matching queries out of 2 + assert.Len(t, matchQueries, 2, "Should have 2 matching queries") + assert.Equal(t, uint32(2), score, "Score should be 2 (both queries matched)") + }) + + t.Run("No Queries Match", func(t *testing.T) { + // Query that doesn't match anything + queries := []*routingv1.RecordQuery{ + {Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, Value: "NonexistentSkill"}, + } + + // Test calculateMatchScore + matchQueries, score := r.calculateMatchScore(ctx, testCID, queries, testPeerID) + + // Should have 0 matching queries + assert.Empty(t, matchQueries, "Should have 0 matching queries") + assert.Equal(t, uint32(0), score, "Score should be 0") + }) + + t.Run("Empty Queries", func(t *testing.T) { + // No queries + var queries []*routingv1.RecordQuery + + // Test calculateMatchScore + matchQueries, score := r.calculateMatchScore(ctx, testCID, queries, testPeerID) + + // Should have 0 matching queries and 0 score + assert.Empty(t, matchQueries, "Should have 0 matching queries with empty query list") + assert.Equal(t, uint32(0), score, "Score should be 0 with empty queries") + }) + + t.Run("Hierarchical Skill Matching", func(t *testing.T) { + // Test hierarchical skill matching (prefix matching) + queries := []*routingv1.RecordQuery{ + {Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, Value: "Natural Language Processing"}, // Should match both skills via prefix + } + + // Test calculateMatchScore + matchQueries, score := r.calculateMatchScore(ctx, testCID, queries, testPeerID) + + // Should match at least 1 query (hierarchical matching) + assert.GreaterOrEqual(t, len(matchQueries), 1, "Should have at least 1 matching query with hierarchical matching") + assert.GreaterOrEqual(t, score, uint32(1), "Score should be at least 1 with hierarchical matching") + }) +} + +// setupTestDatastore creates a test datastore for routing tests. +func setupTestDatastore(t *testing.T) (types.Datastore, func()) { + t.Helper() + + dstore, err := datastore.New() + require.NoError(t, err) + + cleanup := func() { + if closer, ok := dstore.(interface{ Close() error }); ok { + closer.Close() + } + } + + return dstore, cleanup +} diff --git a/server/routing/rpc/rpc.go b/server/routing/rpc/rpc.go index 6d0a8a390..2a121b7c5 100644 --- a/server/routing/rpc/rpc.go +++ b/server/routing/rpc/rpc.go @@ -1,179 +1,179 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:revive -package rpc - -import ( - "context" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/utils/logging" - rpc "github.com/libp2p/go-libp2p-gorpc" - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/protocol" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var logger = logging.Logger("rpc") - -// TODO: proper cleanup and implementation needed! - -const ( - Protocol = protocol.ID("/dir/rpc/1.0.0") - DirService = "RPCAPI" - DirServiceFuncLookup = "Lookup" - DirServiceFuncPull = "Pull" - MaxPullSize = 4 * 1024 * 1024 // 4 MB -) - -type RPCAPI struct { - service *Service -} - -type PullResponse struct { - Cid string - Annotations map[string]string - Data []byte -} - -type LookupResponse struct { - Cid string - Annotations map[string]string -} - -// NOTE: List-related types removed since List is a local-only operation -// and should not be part of peer-to-peer RPC communication - -func (r *RPCAPI) Lookup(ctx context.Context, in *corev1.RecordRef, out *LookupResponse) error { - logger.Debug("P2p RPC: Executing Lookup request on remote peer", "peer", r.service.host.ID()) - - // validate request - if in == nil || out == nil { - return status.Error(codes.InvalidArgument, "invalid request: nil request/response") //nolint:wrapcheck - } - - // handle lookup - meta, err := r.service.store.Lookup(ctx, in) - if err != nil { - st := status.Convert(err) - - return status.Errorf(st.Code(), "failed to lookup: %s", st.Message()) - } - - // write result - *out = LookupResponse{ - Cid: meta.GetCid(), - Annotations: meta.GetAnnotations(), - } - - return nil -} - -func (r *RPCAPI) Pull(ctx context.Context, in *corev1.RecordRef, out *PullResponse) error { - logger.Debug("P2p RPC: Executing Pull request on remote peer", "peer", r.service.host.ID()) - - // validate request - if in == nil || out == nil { - return status.Error(codes.InvalidArgument, "invalid request: nil request/response") //nolint:wrapcheck - } - - // lookup - meta, err := r.service.store.Lookup(ctx, in) - if err != nil { - st := status.Convert(err) - - return status.Errorf(st.Code(), "failed to lookup: %s", st.Message()) - } - - // pull data - record, err := r.service.store.Pull(ctx, in) - if err != nil { - st := status.Convert(err) - - return status.Errorf(st.Code(), "failed to pull: %s", st.Message()) - } - - canonicalBytes, err := record.Marshal() - if err != nil { - return status.Errorf(codes.Internal, "failed to marshal record: %v", err) - } - - // set output - *out = PullResponse{ - Cid: meta.GetCid(), - Data: canonicalBytes, - Annotations: meta.GetAnnotations(), - } - - return nil -} - -// NOTE: List RPC method removed since List is a local-only operation - -type Service struct { - rpcServer *rpc.Server - rpcClient *rpc.Client - host host.Host - store types.StoreAPI -} - -func New(host host.Host, store types.StoreAPI) (*Service, error) { - service := &Service{ - rpcServer: rpc.NewServer(host, Protocol), - host: host, - store: store, - } - - // register api - rpcAPI := RPCAPI{service: service} - - err := service.rpcServer.Register(&rpcAPI) - if err != nil { - return nil, err //nolint:wrapcheck - } - - // update client - service.rpcClient = rpc.NewClientWithServer(host, Protocol, service.rpcServer) - - return service, nil -} - -func (s *Service) Lookup(ctx context.Context, peer peer.ID, req *corev1.RecordRef) (*corev1.RecordRef, error) { - logger.Debug("P2p RPC: Executing Lookup request on remote peer", "peer", peer, "req", req) - - var resp LookupResponse - - err := s.rpcClient.CallContext(ctx, peer, DirService, DirServiceFuncLookup, req, &resp) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to call remote peer: %v", err) - } - - return &corev1.RecordRef{ - Cid: resp.Cid, - }, nil -} - -func (s *Service) Pull(ctx context.Context, peer peer.ID, req *corev1.RecordRef) (*corev1.Record, error) { - logger.Debug("P2p RPC: Executing Pull request on remote peer", "peer", peer, "req", req) - - var resp PullResponse - - err := s.rpcClient.CallContext(ctx, peer, DirService, DirServiceFuncPull, req, &resp) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to call remote peer: %v", err) - } - - record, err := corev1.UnmarshalRecord(resp.Data) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to unmarshal record: %v", err) - } - - return record, nil -} - -// NOTE: List RPC client method removed since List is a local-only operation -// Use Search for network-wide record discovery instead +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:revive +package rpc + +import ( + "context" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/utils/logging" + rpc "github.com/libp2p/go-libp2p-gorpc" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var logger = logging.Logger("rpc") + +// TODO: proper cleanup and implementation needed! + +const ( + Protocol = protocol.ID("/dir/rpc/1.0.0") + DirService = "RPCAPI" + DirServiceFuncLookup = "Lookup" + DirServiceFuncPull = "Pull" + MaxPullSize = 4 * 1024 * 1024 // 4 MB +) + +type RPCAPI struct { + service *Service +} + +type PullResponse struct { + Cid string + Annotations map[string]string + Data []byte +} + +type LookupResponse struct { + Cid string + Annotations map[string]string +} + +// NOTE: List-related types removed since List is a local-only operation +// and should not be part of peer-to-peer RPC communication + +func (r *RPCAPI) Lookup(ctx context.Context, in *corev1.RecordRef, out *LookupResponse) error { + logger.Debug("P2p RPC: Executing Lookup request on remote peer", "peer", r.service.host.ID()) + + // validate request + if in == nil || out == nil { + return status.Error(codes.InvalidArgument, "invalid request: nil request/response") //nolint:wrapcheck + } + + // handle lookup + meta, err := r.service.store.Lookup(ctx, in) + if err != nil { + st := status.Convert(err) + + return status.Errorf(st.Code(), "failed to lookup: %s", st.Message()) + } + + // write result + *out = LookupResponse{ + Cid: meta.GetCid(), + Annotations: meta.GetAnnotations(), + } + + return nil +} + +func (r *RPCAPI) Pull(ctx context.Context, in *corev1.RecordRef, out *PullResponse) error { + logger.Debug("P2p RPC: Executing Pull request on remote peer", "peer", r.service.host.ID()) + + // validate request + if in == nil || out == nil { + return status.Error(codes.InvalidArgument, "invalid request: nil request/response") //nolint:wrapcheck + } + + // lookup + meta, err := r.service.store.Lookup(ctx, in) + if err != nil { + st := status.Convert(err) + + return status.Errorf(st.Code(), "failed to lookup: %s", st.Message()) + } + + // pull data + record, err := r.service.store.Pull(ctx, in) + if err != nil { + st := status.Convert(err) + + return status.Errorf(st.Code(), "failed to pull: %s", st.Message()) + } + + canonicalBytes, err := record.Marshal() + if err != nil { + return status.Errorf(codes.Internal, "failed to marshal record: %v", err) + } + + // set output + *out = PullResponse{ + Cid: meta.GetCid(), + Data: canonicalBytes, + Annotations: meta.GetAnnotations(), + } + + return nil +} + +// NOTE: List RPC method removed since List is a local-only operation + +type Service struct { + rpcServer *rpc.Server + rpcClient *rpc.Client + host host.Host + store types.StoreAPI +} + +func New(host host.Host, store types.StoreAPI) (*Service, error) { + service := &Service{ + rpcServer: rpc.NewServer(host, Protocol), + host: host, + store: store, + } + + // register api + rpcAPI := RPCAPI{service: service} + + err := service.rpcServer.Register(&rpcAPI) + if err != nil { + return nil, err //nolint:wrapcheck + } + + // update client + service.rpcClient = rpc.NewClientWithServer(host, Protocol, service.rpcServer) + + return service, nil +} + +func (s *Service) Lookup(ctx context.Context, peer peer.ID, req *corev1.RecordRef) (*corev1.RecordRef, error) { + logger.Debug("P2p RPC: Executing Lookup request on remote peer", "peer", peer, "req", req) + + var resp LookupResponse + + err := s.rpcClient.CallContext(ctx, peer, DirService, DirServiceFuncLookup, req, &resp) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to call remote peer: %v", err) + } + + return &corev1.RecordRef{ + Cid: resp.Cid, + }, nil +} + +func (s *Service) Pull(ctx context.Context, peer peer.ID, req *corev1.RecordRef) (*corev1.Record, error) { + logger.Debug("P2p RPC: Executing Pull request on remote peer", "peer", peer, "req", req) + + var resp PullResponse + + err := s.rpcClient.CallContext(ctx, peer, DirService, DirServiceFuncPull, req, &resp) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to call remote peer: %v", err) + } + + record, err := corev1.UnmarshalRecord(resp.Data) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to unmarshal record: %v", err) + } + + return record, nil +} + +// NOTE: List RPC client method removed since List is a local-only operation +// Use Search for network-wide record discovery instead diff --git a/server/routing/search_simple_test.go b/server/routing/search_simple_test.go index 25a332b61..e0c460680 100644 --- a/server/routing/search_simple_test.go +++ b/server/routing/search_simple_test.go @@ -1,373 +1,373 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package routing - -import ( - "context" - "encoding/json" - "os" - "testing" - "time" - - corev1 "github.com/agntcy/dir/api/core/v1" - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/agntcy/dir/server/datastore" - "github.com/agntcy/dir/server/types" - ipfsdatastore "github.com/ipfs/go-datastore" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// Test the core Search functionality using a simplified approach. -func TestSearch_CoreLogic(t *testing.T) { - ctx := t.Context() - - // Create test datastore - dstore, cleanup := setupSearchTestDatastore(t) - defer cleanup() - - // Setup test data - simulate remote announcements from different peers - testData := []struct { - cid string - peerID string - labels []string - }{ - { - cid: "ai-record-1", - peerID: "remote-peer-1", - labels: []string{"/skills/AI", "/skills/AI/ML"}, - }, - { - cid: "ai-record-2", - peerID: "remote-peer-2", - labels: []string{"/skills/AI/NLP"}, - }, - { - cid: "web-record", - peerID: "remote-peer-3", - labels: []string{"/skills/web-development", "/skills/javascript"}, - }, - { - cid: "local-record", - peerID: testLocalPeerID, // This should be filtered out - labels: []string{"/skills/AI"}, - }, - } - - // Store test label metadata - for _, td := range testData { - for _, label := range td.labels { - enhancedKey := BuildEnhancedLabelKey(types.Label(label), td.cid, td.peerID) - metadata := &types.LabelMetadata{ - Timestamp: time.Now(), - LastSeen: time.Now(), - } - metadataBytes, err := json.Marshal(metadata) - require.NoError(t, err) - - err = dstore.Put(ctx, ipfsdatastore.NewKey(enhancedKey), metadataBytes) - require.NoError(t, err) - } - } - - t.Run("search_filters_remote_records_only", func(t *testing.T) { - // Test that we can find remote records and filter out local ones - localPeerID := testLocalPeerID - - // Simulate searching for AI skills - queries := []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI", - }, - } - - // Use our simplified search logic - results := simulateSearch(ctx, dstore, localPeerID, queries, 10, 1) - - // Should return 2 remote records (ai-record-1, ai-record-2) but not local-record - assert.Len(t, results, 2) - - expectedCIDs := []string{"ai-record-1", "ai-record-2"} - - foundCIDs := make(map[string]bool) - for _, result := range results { - foundCIDs[result.GetRecordRef().GetCid()] = true - - // Verify it's not from local peer - assert.NotEqual(t, localPeerID, result.GetPeer().GetId()) - - // Verify structure - assert.NotNil(t, result.GetRecordRef()) - assert.NotNil(t, result.GetPeer()) - assert.Positive(t, result.GetMatchScore()) - } - - for _, expectedCID := range expectedCIDs { - assert.True(t, foundCIDs[expectedCID], "Expected CID %s not found", expectedCID) - } - }) - - t.Run("search_with_and_logic", func(t *testing.T) { - // Test AND logic with multiple queries - localPeerID := testLocalPeerID - - queries := []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI", - }, - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI/ML", - }, - } - - results := simulateSearch(ctx, dstore, localPeerID, queries, 10, 2) - - // Only ai-record-1 should match both AI and AI/ML - assert.Len(t, results, 1) - assert.Equal(t, "ai-record-1", results[0].GetRecordRef().GetCid()) - assert.Equal(t, "remote-peer-1", results[0].GetPeer().GetId()) - assert.Equal(t, uint32(2), results[0].GetMatchScore()) - }) - - t.Run("search_with_limit", func(t *testing.T) { - // Test result limiting - localPeerID := testLocalPeerID - - queries := []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI", - }, - } - - results := simulateSearch(ctx, dstore, localPeerID, queries, 1, 1) // Limit to 1 - - assert.Len(t, results, 1) - assert.NotEqual(t, localPeerID, results[0].GetPeer().GetId()) - }) - - t.Run("search_with_high_min_score", func(t *testing.T) { - // Test minimum match score filtering - localPeerID := testLocalPeerID - - queries := []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI", - }, - } - - results := simulateSearch(ctx, dstore, localPeerID, queries, 10, 5) // Very high score - - assert.Empty(t, results) // No results should meet the high score requirement - }) - - t.Run("search_no_queries_returns_all_remote", func(t *testing.T) { - // Test that no queries returns all remote records - localPeerID := testLocalPeerID - - results := simulateSearch(ctx, dstore, localPeerID, []*routingv1.RecordQuery{}, 10, 0) - - // Should return 3 remote records (excluding local-peer) - assert.Len(t, results, 3) - - for _, result := range results { - assert.NotEqual(t, localPeerID, result.GetPeer().GetId()) - } - }) - - t.Run("search_with_different_local_peer_id", func(t *testing.T) { - // Test with a different localPeerID to validate the filtering logic - differentLocalPeer := "remote-peer-1" // This peer has records in our test data - - queries := []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI", - }, - } - - results := simulateSearch(ctx, dstore, differentLocalPeer, queries, 10, 1) - - // Should return different results since "remote-peer-1" is now considered "local" - // and should be filtered out. Should find records from other peers: remote-peer-2, remote-peer-3, local-peer - assert.Len(t, results, 2) // ai-record-2 (remote-peer-2) + local-record (local-peer) - - foundPeers := make(map[string]bool) - - for _, result := range results { - assert.NotEqual(t, differentLocalPeer, result.GetPeer().GetId()) - - foundPeers[result.GetPeer().GetId()] = true - } - - // Should contain records from peers other than "remote-peer-1" - expectedPeers := []string{"remote-peer-2", testLocalPeerID} - for _, expectedPeer := range expectedPeers { - assert.True(t, foundPeers[expectedPeer], "Should find record from peer %s", expectedPeer) - } - }) - - t.Run("search_validates_peer_filtering_logic", func(t *testing.T) { - // Test that changing localPeerID actually changes which records are filtered - queries := []*routingv1.RecordQuery{ - { - Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, - Value: "AI", - }, - } - - // Search with testLocalPeerID as local - resultsA := simulateSearch(ctx, dstore, testLocalPeerID, queries, 10, 1) - - // Search with "remote-peer-1" as local - resultsB := simulateSearch(ctx, dstore, "remote-peer-1", queries, 10, 1) - - // Results may have same count but should contain different peers - // resultsA filters out testLocalPeerID, resultsB filters out "remote-peer-1" - // Both should return 2 results, but from different peer combinations - - // Collect all peer IDs from each result set - peersA := make(map[string]bool) - for _, result := range resultsA { - peersA[result.GetPeer().GetId()] = true - } - - peersB := make(map[string]bool) - for _, result := range resultsB { - peersB[result.GetPeer().GetId()] = true - } - - // The peer sets should be different (different peers filtered out) - assert.NotEqual(t, peersA, peersB, "Different localPeerID should result in different peer sets") - }) -} - -// Simplified search simulation for testing. -// -//nolint:gocognit // Test helper function that replicates search logic - complexity is necessary -func simulateSearch(ctx context.Context, dstore types.Datastore, localPeerID string, queries []*routingv1.RecordQuery, limit uint32, minMatchScore uint32) []*routingv1.SearchResponse { - var results []*routingv1.SearchResponse - - processedCIDs := make(map[string]bool) - processedCount := 0 - limitInt := int(limit) - - // Query all namespaces using shared function - entries, err := QueryAllNamespaces(ctx, dstore) - if err != nil { - return results - } - - for _, entry := range entries { - if limitInt > 0 && processedCount >= limitInt { - break - } - - // Parse enhanced key - _, keyCID, keyPeerID, err := ParseEnhancedLabelKey(entry.Key) - if err != nil { - continue - } - - // Filter for REMOTE records only - if keyPeerID == localPeerID { - continue - } - - // Avoid duplicates - if processedCIDs[keyCID] { - continue - } - - // Check if matches all queries - if testMatchesAllQueriesSimple(ctx, dstore, keyCID, queries, keyPeerID) { - // Calculate score safely - score := safeIntToUint32(len(queries)) - if len(queries) == 0 { - score = 1 - } - - if score >= minMatchScore { - results = append(results, &routingv1.SearchResponse{ - RecordRef: &corev1.RecordRef{Cid: keyCID}, - Peer: &routingv1.Peer{Id: keyPeerID}, - MatchQueries: queries, - MatchScore: score, - }) - - processedCIDs[keyCID] = true - processedCount++ - - if limitInt > 0 && processedCount >= limitInt { - break - } - } - } - } - - return results -} - -// Simplified query matching for testing. -func testMatchesAllQueriesSimple(ctx context.Context, dstore types.Datastore, cid string, queries []*routingv1.RecordQuery, peerID string) bool { - if len(queries) == 0 { - return true - } - - // Get labels for this CID/PeerID using shared namespace iteration - entries, err := QueryAllNamespaces(ctx, dstore) - if err != nil { - return false - } - - var labelStrings []string - - for _, entry := range entries { - label, keyCID, keyPeerID, err := ParseEnhancedLabelKey(entry.Key) - if err != nil { - continue - } - - if keyCID == cid && keyPeerID == peerID { - labelStrings = append(labelStrings, label.String()) - } - } - - // Use shared query matching logic - convert strings to labels - labelRetriever := func(_ context.Context, _ string) []types.Label { - labelList := make([]types.Label, len(labelStrings)) - for i, labelStr := range labelStrings { - labelList[i] = types.Label(labelStr) - } - - return labelList - } - - return MatchesAllQueries(ctx, cid, queries, labelRetriever) -} - -// Helper functions for testing - -// setupSearchTestDatastore creates a temporary datastore for search testing. -func setupSearchTestDatastore(t *testing.T) (types.Datastore, func()) { - t.Helper() - - dsOpts := []datastore.Option{ - datastore.WithFsProvider("/tmp/test-search-" + t.Name()), - } - - dstore, err := datastore.New(dsOpts...) - require.NoError(t, err) - - cleanup := func() { - _ = dstore.Close() - _ = os.RemoveAll("/tmp/test-search-" + t.Name()) - } - - return dstore, cleanup -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package routing + +import ( + "context" + "encoding/json" + "os" + "testing" + "time" + + corev1 "github.com/agntcy/dir/api/core/v1" + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/agntcy/dir/server/datastore" + "github.com/agntcy/dir/server/types" + ipfsdatastore "github.com/ipfs/go-datastore" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Test the core Search functionality using a simplified approach. +func TestSearch_CoreLogic(t *testing.T) { + ctx := t.Context() + + // Create test datastore + dstore, cleanup := setupSearchTestDatastore(t) + defer cleanup() + + // Setup test data - simulate remote announcements from different peers + testData := []struct { + cid string + peerID string + labels []string + }{ + { + cid: "ai-record-1", + peerID: "remote-peer-1", + labels: []string{"/skills/AI", "/skills/AI/ML"}, + }, + { + cid: "ai-record-2", + peerID: "remote-peer-2", + labels: []string{"/skills/AI/NLP"}, + }, + { + cid: "web-record", + peerID: "remote-peer-3", + labels: []string{"/skills/web-development", "/skills/javascript"}, + }, + { + cid: "local-record", + peerID: testLocalPeerID, // This should be filtered out + labels: []string{"/skills/AI"}, + }, + } + + // Store test label metadata + for _, td := range testData { + for _, label := range td.labels { + enhancedKey := BuildEnhancedLabelKey(types.Label(label), td.cid, td.peerID) + metadata := &types.LabelMetadata{ + Timestamp: time.Now(), + LastSeen: time.Now(), + } + metadataBytes, err := json.Marshal(metadata) + require.NoError(t, err) + + err = dstore.Put(ctx, ipfsdatastore.NewKey(enhancedKey), metadataBytes) + require.NoError(t, err) + } + } + + t.Run("search_filters_remote_records_only", func(t *testing.T) { + // Test that we can find remote records and filter out local ones + localPeerID := testLocalPeerID + + // Simulate searching for AI skills + queries := []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI", + }, + } + + // Use our simplified search logic + results := simulateSearch(ctx, dstore, localPeerID, queries, 10, 1) + + // Should return 2 remote records (ai-record-1, ai-record-2) but not local-record + assert.Len(t, results, 2) + + expectedCIDs := []string{"ai-record-1", "ai-record-2"} + + foundCIDs := make(map[string]bool) + for _, result := range results { + foundCIDs[result.GetRecordRef().GetCid()] = true + + // Verify it's not from local peer + assert.NotEqual(t, localPeerID, result.GetPeer().GetId()) + + // Verify structure + assert.NotNil(t, result.GetRecordRef()) + assert.NotNil(t, result.GetPeer()) + assert.Positive(t, result.GetMatchScore()) + } + + for _, expectedCID := range expectedCIDs { + assert.True(t, foundCIDs[expectedCID], "Expected CID %s not found", expectedCID) + } + }) + + t.Run("search_with_and_logic", func(t *testing.T) { + // Test AND logic with multiple queries + localPeerID := testLocalPeerID + + queries := []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI", + }, + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI/ML", + }, + } + + results := simulateSearch(ctx, dstore, localPeerID, queries, 10, 2) + + // Only ai-record-1 should match both AI and AI/ML + assert.Len(t, results, 1) + assert.Equal(t, "ai-record-1", results[0].GetRecordRef().GetCid()) + assert.Equal(t, "remote-peer-1", results[0].GetPeer().GetId()) + assert.Equal(t, uint32(2), results[0].GetMatchScore()) + }) + + t.Run("search_with_limit", func(t *testing.T) { + // Test result limiting + localPeerID := testLocalPeerID + + queries := []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI", + }, + } + + results := simulateSearch(ctx, dstore, localPeerID, queries, 1, 1) // Limit to 1 + + assert.Len(t, results, 1) + assert.NotEqual(t, localPeerID, results[0].GetPeer().GetId()) + }) + + t.Run("search_with_high_min_score", func(t *testing.T) { + // Test minimum match score filtering + localPeerID := testLocalPeerID + + queries := []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI", + }, + } + + results := simulateSearch(ctx, dstore, localPeerID, queries, 10, 5) // Very high score + + assert.Empty(t, results) // No results should meet the high score requirement + }) + + t.Run("search_no_queries_returns_all_remote", func(t *testing.T) { + // Test that no queries returns all remote records + localPeerID := testLocalPeerID + + results := simulateSearch(ctx, dstore, localPeerID, []*routingv1.RecordQuery{}, 10, 0) + + // Should return 3 remote records (excluding local-peer) + assert.Len(t, results, 3) + + for _, result := range results { + assert.NotEqual(t, localPeerID, result.GetPeer().GetId()) + } + }) + + t.Run("search_with_different_local_peer_id", func(t *testing.T) { + // Test with a different localPeerID to validate the filtering logic + differentLocalPeer := "remote-peer-1" // This peer has records in our test data + + queries := []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI", + }, + } + + results := simulateSearch(ctx, dstore, differentLocalPeer, queries, 10, 1) + + // Should return different results since "remote-peer-1" is now considered "local" + // and should be filtered out. Should find records from other peers: remote-peer-2, remote-peer-3, local-peer + assert.Len(t, results, 2) // ai-record-2 (remote-peer-2) + local-record (local-peer) + + foundPeers := make(map[string]bool) + + for _, result := range results { + assert.NotEqual(t, differentLocalPeer, result.GetPeer().GetId()) + + foundPeers[result.GetPeer().GetId()] = true + } + + // Should contain records from peers other than "remote-peer-1" + expectedPeers := []string{"remote-peer-2", testLocalPeerID} + for _, expectedPeer := range expectedPeers { + assert.True(t, foundPeers[expectedPeer], "Should find record from peer %s", expectedPeer) + } + }) + + t.Run("search_validates_peer_filtering_logic", func(t *testing.T) { + // Test that changing localPeerID actually changes which records are filtered + queries := []*routingv1.RecordQuery{ + { + Type: routingv1.RecordQueryType_RECORD_QUERY_TYPE_SKILL, + Value: "AI", + }, + } + + // Search with testLocalPeerID as local + resultsA := simulateSearch(ctx, dstore, testLocalPeerID, queries, 10, 1) + + // Search with "remote-peer-1" as local + resultsB := simulateSearch(ctx, dstore, "remote-peer-1", queries, 10, 1) + + // Results may have same count but should contain different peers + // resultsA filters out testLocalPeerID, resultsB filters out "remote-peer-1" + // Both should return 2 results, but from different peer combinations + + // Collect all peer IDs from each result set + peersA := make(map[string]bool) + for _, result := range resultsA { + peersA[result.GetPeer().GetId()] = true + } + + peersB := make(map[string]bool) + for _, result := range resultsB { + peersB[result.GetPeer().GetId()] = true + } + + // The peer sets should be different (different peers filtered out) + assert.NotEqual(t, peersA, peersB, "Different localPeerID should result in different peer sets") + }) +} + +// Simplified search simulation for testing. +// +//nolint:gocognit // Test helper function that replicates search logic - complexity is necessary +func simulateSearch(ctx context.Context, dstore types.Datastore, localPeerID string, queries []*routingv1.RecordQuery, limit uint32, minMatchScore uint32) []*routingv1.SearchResponse { + var results []*routingv1.SearchResponse + + processedCIDs := make(map[string]bool) + processedCount := 0 + limitInt := int(limit) + + // Query all namespaces using shared function + entries, err := QueryAllNamespaces(ctx, dstore) + if err != nil { + return results + } + + for _, entry := range entries { + if limitInt > 0 && processedCount >= limitInt { + break + } + + // Parse enhanced key + _, keyCID, keyPeerID, err := ParseEnhancedLabelKey(entry.Key) + if err != nil { + continue + } + + // Filter for REMOTE records only + if keyPeerID == localPeerID { + continue + } + + // Avoid duplicates + if processedCIDs[keyCID] { + continue + } + + // Check if matches all queries + if testMatchesAllQueriesSimple(ctx, dstore, keyCID, queries, keyPeerID) { + // Calculate score safely + score := safeIntToUint32(len(queries)) + if len(queries) == 0 { + score = 1 + } + + if score >= minMatchScore { + results = append(results, &routingv1.SearchResponse{ + RecordRef: &corev1.RecordRef{Cid: keyCID}, + Peer: &routingv1.Peer{Id: keyPeerID}, + MatchQueries: queries, + MatchScore: score, + }) + + processedCIDs[keyCID] = true + processedCount++ + + if limitInt > 0 && processedCount >= limitInt { + break + } + } + } + } + + return results +} + +// Simplified query matching for testing. +func testMatchesAllQueriesSimple(ctx context.Context, dstore types.Datastore, cid string, queries []*routingv1.RecordQuery, peerID string) bool { + if len(queries) == 0 { + return true + } + + // Get labels for this CID/PeerID using shared namespace iteration + entries, err := QueryAllNamespaces(ctx, dstore) + if err != nil { + return false + } + + var labelStrings []string + + for _, entry := range entries { + label, keyCID, keyPeerID, err := ParseEnhancedLabelKey(entry.Key) + if err != nil { + continue + } + + if keyCID == cid && keyPeerID == peerID { + labelStrings = append(labelStrings, label.String()) + } + } + + // Use shared query matching logic - convert strings to labels + labelRetriever := func(_ context.Context, _ string) []types.Label { + labelList := make([]types.Label, len(labelStrings)) + for i, labelStr := range labelStrings { + labelList[i] = types.Label(labelStr) + } + + return labelList + } + + return MatchesAllQueries(ctx, cid, queries, labelRetriever) +} + +// Helper functions for testing + +// setupSearchTestDatastore creates a temporary datastore for search testing. +func setupSearchTestDatastore(t *testing.T) (types.Datastore, func()) { + t.Helper() + + dsOpts := []datastore.Option{ + datastore.WithFsProvider("/tmp/test-search-" + t.Name()), + } + + dstore, err := datastore.New(dsOpts...) + require.NoError(t, err) + + cleanup := func() { + _ = dstore.Close() + _ = os.RemoveAll("/tmp/test-search-" + t.Name()) + } + + return dstore, cleanup +} diff --git a/server/routing/test_utils.go b/server/routing/test_utils.go index 09f2cace8..25e6e8dca 100644 --- a/server/routing/test_utils.go +++ b/server/routing/test_utils.go @@ -1,61 +1,61 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:testifylint -package routing - -import ( - "context" - "testing" - "time" - - "github.com/agntcy/dir/server/config" - routingconfig "github.com/agntcy/dir/server/routing/config" - "github.com/agntcy/dir/server/store" - storeconfig "github.com/agntcy/dir/server/store/config" - ociconfig "github.com/agntcy/dir/server/store/oci/config" - "github.com/agntcy/dir/server/types" - "github.com/stretchr/testify/assert" -) - -const testLocalPeerID = "local-peer" - -//nolint:revive -func newTestServer(t *testing.T, ctx context.Context, bootPeers []string) *route { - t.Helper() - - refreshInterval := 1 * time.Second - - // define opts with faster refresh interval for testing - // Use a unique temporary directory for each test to avoid datastore sharing - opts := types.NewOptions( - &config.Config{ - Store: storeconfig.Config{ - Provider: string(store.OCI), - OCI: ociconfig.Config{ - LocalDir: t.TempDir(), - }, - }, - Routing: routingconfig.Config{ - ListenAddress: "/ip4/0.0.0.0/tcp/0", - BootstrapPeers: bootPeers, - RefreshInterval: refreshInterval, // Fast refresh for testing - DatastoreDir: t.TempDir(), // Use isolated BadgerDB for each test - }, - }, - ) - - // create new store - s, err := store.New(opts) - assert.NoError(t, err) - - // create example server - r, err := New(ctx, s, opts) - assert.NoError(t, err) - - // check the type assertion - routeInstance, ok := r.(*route) - assert.True(t, ok, "expected r to be of type *route") - - return routeInstance -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:testifylint +package routing + +import ( + "context" + "testing" + "time" + + "github.com/agntcy/dir/server/config" + routingconfig "github.com/agntcy/dir/server/routing/config" + "github.com/agntcy/dir/server/store" + storeconfig "github.com/agntcy/dir/server/store/config" + ociconfig "github.com/agntcy/dir/server/store/oci/config" + "github.com/agntcy/dir/server/types" + "github.com/stretchr/testify/assert" +) + +const testLocalPeerID = "local-peer" + +//nolint:revive +func newTestServer(t *testing.T, ctx context.Context, bootPeers []string) *route { + t.Helper() + + refreshInterval := 1 * time.Second + + // define opts with faster refresh interval for testing + // Use a unique temporary directory for each test to avoid datastore sharing + opts := types.NewOptions( + &config.Config{ + Store: storeconfig.Config{ + Provider: string(store.OCI), + OCI: ociconfig.Config{ + LocalDir: t.TempDir(), + }, + }, + Routing: routingconfig.Config{ + ListenAddress: "/ip4/0.0.0.0/tcp/0", + BootstrapPeers: bootPeers, + RefreshInterval: refreshInterval, // Fast refresh for testing + DatastoreDir: t.TempDir(), // Use isolated BadgerDB for each test + }, + }, + ) + + // create new store + s, err := store.New(opts) + assert.NoError(t, err) + + // create example server + r, err := New(ctx, s, opts) + assert.NoError(t, err) + + // check the type assertion + routeInstance, ok := r.(*route) + assert.True(t, ok, "expected r to be of type *route") + + return routeInstance +} diff --git a/server/routing/utils.go b/server/routing/utils.go index fed5777c2..f6aaa4729 100644 --- a/server/routing/utils.go +++ b/server/routing/utils.go @@ -1,61 +1,61 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package routing - -import ( - "fmt" - "math" - - routingv1 "github.com/agntcy/dir/api/routing/v1" -) - -// toPtr converts a value to a pointer to that value. -// This is a generic helper function useful for creating pointers to literals. -func toPtr[T any](v T) *T { - return &v -} - -// safeIntToUint32 safely converts int to uint32, preventing integer overflow. -// This function provides secure conversion with bounds checking for production use. -func safeIntToUint32(val int) uint32 { - if val < 0 { - return 0 - } - - if val > math.MaxUint32 { - return math.MaxUint32 - } - - return uint32(val) -} - -// deduplicateQueries removes duplicate queries to ensure consistent scoring. -// Two queries are considered duplicates if they have the same Type and Value. -// This provides defensive programming against client bugs and ensures predictable API behavior. -func deduplicateQueries(queries []*routingv1.RecordQuery) []*routingv1.RecordQuery { - if len(queries) <= 1 { - return queries - } - - seen := make(map[string]bool) - - var deduplicated []*routingv1.RecordQuery - - for _, query := range queries { - if query == nil { - continue // Skip nil queries defensively - } - - // Create unique key from type and value - key := fmt.Sprintf("%s:%s", query.GetType().String(), query.GetValue()) - - if !seen[key] { - seen[key] = true - - deduplicated = append(deduplicated, query) - } - } - - return deduplicated -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package routing + +import ( + "fmt" + "math" + + routingv1 "github.com/agntcy/dir/api/routing/v1" +) + +// toPtr converts a value to a pointer to that value. +// This is a generic helper function useful for creating pointers to literals. +func toPtr[T any](v T) *T { + return &v +} + +// safeIntToUint32 safely converts int to uint32, preventing integer overflow. +// This function provides secure conversion with bounds checking for production use. +func safeIntToUint32(val int) uint32 { + if val < 0 { + return 0 + } + + if val > math.MaxUint32 { + return math.MaxUint32 + } + + return uint32(val) +} + +// deduplicateQueries removes duplicate queries to ensure consistent scoring. +// Two queries are considered duplicates if they have the same Type and Value. +// This provides defensive programming against client bugs and ensures predictable API behavior. +func deduplicateQueries(queries []*routingv1.RecordQuery) []*routingv1.RecordQuery { + if len(queries) <= 1 { + return queries + } + + seen := make(map[string]bool) + + var deduplicated []*routingv1.RecordQuery + + for _, query := range queries { + if query == nil { + continue // Skip nil queries defensively + } + + // Create unique key from type and value + key := fmt.Sprintf("%s:%s", query.GetType().String(), query.GetValue()) + + if !seen[key] { + seen[key] = true + + deduplicated = append(deduplicated, query) + } + } + + return deduplicated +} diff --git a/server/routing/validators/validators.go b/server/routing/validators/validators.go index 775893359..189fa0282 100644 --- a/server/routing/validators/validators.go +++ b/server/routing/validators/validators.go @@ -1,440 +1,440 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package validators - -import ( - "errors" - "strconv" - "strings" - - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/utils/logging" - "github.com/ipfs/go-cid" - record "github.com/libp2p/go-libp2p-record" -) - -// Import routing utilities for label validation -// Note: Since validators is a sub-package of routing, it can import from the parent - -// IsValidLabelKey checks if a key starts with any valid label type prefix. -func IsValidLabelKey(key string) bool { - for _, labelType := range types.AllLabelTypes() { - if strings.HasPrefix(key, labelType.Prefix()) { - return true - } - } - - return false -} - -var validatorLogger = logging.Logger("routing/validators") - -// BaseValidator provides common validation logic for all label validators. -type BaseValidator struct{} - -// validateKeyFormat validates the enhanced DHT key format with PeerID. -func (v *BaseValidator) validateKeyFormat(key string, expectedNamespace string) ([]string, error) { - // Parse enhanced key format: //// - // Minimum parts: ["", "namespace", "path", "cid", "peer_id"] - parts := strings.Split(key, "/") - if len(parts) < types.MinLabelKeyParts { - return nil, errors.New("invalid key format: expected ////") - } - - // Validate namespace - if parts[1] != expectedNamespace { - return nil, errors.New("invalid namespace: expected " + expectedNamespace + ", got " + parts[1]) - } - - // Extract and validate PeerID (last part) first - peerID := parts[len(parts)-1] - if peerID == "" { - return nil, errors.New("missing PeerID in key") - } - - // Check if the last part looks like a CID (common mistake) - if _, err := cid.Decode(peerID); err == nil { - return nil, errors.New("invalid key format: expected ////") - } - - // Extract and validate CID (second to last part) - cidStr := parts[len(parts)-2] - if cidStr == "" { - return nil, errors.New("missing CID in key") - } - - // Validate CID format - _, err := cid.Decode(cidStr) - if err != nil { - return nil, errors.New("invalid CID format: " + err.Error()) - } - - return parts, nil -} - -// validateValue validates the DHT value (if present). -func (v *BaseValidator) validateValue(value []byte) error { - if len(value) > 0 { - // Value should be a valid CID if present - _, err := cid.Decode(string(value)) - if err != nil { - return errors.New("invalid CID in value: " + err.Error()) - } - } - - return nil -} - -// selectFirstValid provides default selection logic for all validators. -func (v *BaseValidator) selectFirstValid(key string, values [][]byte, validateFunc func(string, []byte) error) (int, error) { - validatorLogger.Debug("Selecting from multiple DHT record values", "key", key, "count", len(values)) - - if len(values) == 0 { - return -1, errors.New("no values to select from") - } - - for i, value := range values { - err := validateFunc(key, value) - if err == nil { - validatorLogger.Debug("Selected DHT record value", "key", key, "index", i) - - return i, nil - } - } - - validatorLogger.Warn("No valid values found for DHT record", "key", key) - - return -1, errors.New("no valid values found") -} - -// SkillValidator validates DHT records for skill-based content discovery. -type SkillValidator struct { - BaseValidator -} - -// Validate validates a skills DHT record. -// Key format: /skills/// -// Future: Can validate against skill taxonomy, required levels, etc. -func (v *SkillValidator) Validate(key string, value []byte) error { - validatorLogger.Debug("Validating skills DHT record", "key", key) - - // Basic format validation - parts, err := v.validateKeyFormat(key, types.LabelTypeSkill.String()) - if err != nil { - return err - } - - // Skills-specific validation - if err := v.validateSkillsSpecific(parts); err != nil { - return err - } - - // Value validation - if err := v.validateValue(value); err != nil { - return err - } - - validatorLogger.Debug("Skills DHT record validation successful", "key", key) - - return nil -} - -// validateSkillsSpecific performs skills-specific validation logic. -func (v *SkillValidator) validateSkillsSpecific(parts []string) error { - // parts[0] = "", parts[1] = "skills", parts[2:len-2] = skill path components, parts[len-2] = cid, parts[len-1] = peer_id - // Enhanced format: /skills/// - if len(parts) < types.MinLabelKeyParts { - return errors.New("skills key must have format: /skills///") - } - - // Extract skill path (everything between "skills" and CID) - skillParts := parts[2 : len(parts)-2] // Exclude CID and PeerID - if len(skillParts) == 0 { - return errors.New("skill path cannot be empty") - } - - // Validate that none of the skill path components are empty - for i, part := range skillParts { - if part == "" { - return errors.New("skill path component cannot be empty at position " + strconv.Itoa(i+1)) - } - } - - // Future: validate against skill taxonomy - // skillPath := strings.Join(skillParts, "/") - // if !v.isValidSkillPath(skillPath) { - // return errors.New("invalid skill path: " + skillPath) - // } - - return nil -} - -// Select chooses between multiple values for skills records. -func (v *SkillValidator) Select(key string, values [][]byte) (int, error) { - return v.selectFirstValid(key, values, v.Validate) -} - -// DomainValidator validates DHT records for domain-based content discovery. -type DomainValidator struct { - BaseValidator -} - -// Validate validates a domains DHT record. -// Key format: /domains// -// Future: Can validate against domain ontology, registry, etc. -func (v *DomainValidator) Validate(key string, value []byte) error { - validatorLogger.Debug("Validating domains DHT record", "key", key) - - // Basic format validation - parts, err := v.validateKeyFormat(key, types.LabelTypeDomain.String()) - if err != nil { - return err - } - - // Domains-specific validation - if err := v.validateDomainsSpecific(parts); err != nil { - return err - } - - // Value validation - if err := v.validateValue(value); err != nil { - return err - } - - validatorLogger.Debug("Domains DHT record validation successful", "key", key) - - return nil -} - -// validateDomainsSpecific performs domains-specific validation logic. -func (v *DomainValidator) validateDomainsSpecific(parts []string) error { - // parts[0] = "", parts[1] = "domains", parts[2:len-2] = domain path components, parts[len-2] = cid, parts[len-1] = peer_id - // Enhanced format: /domains/// - if len(parts) < types.MinLabelKeyParts { - return errors.New("domains key must have format: /domains///") - } - - // Extract domain path (everything between "domains" and CID) - domainParts := parts[2 : len(parts)-2] // Exclude CID and PeerID - if len(domainParts) == 0 { - return errors.New("domain path cannot be empty") - } - - // Future: validate against domain registry/ontology - // domain := strings.Join(domainParts, "/") - // if !v.isValidDomain(domain) { - // return errors.New("invalid domain: " + domain) - // } - - return nil -} - -// Select chooses between multiple values for domains records. -func (v *DomainValidator) Select(key string, values [][]byte) (int, error) { - return v.selectFirstValid(key, values, v.Validate) -} - -// ModuleValidator validates DHT records for module-based content discovery. -type ModuleValidator struct { - BaseValidator -} - -// Validate validates a modules DHT record. -// Key format: /modules// -// Future: Can validate against module specifications, versions, etc. -func (v *ModuleValidator) Validate(key string, value []byte) error { - validatorLogger.Debug("Validating modules DHT record", "key", key) - - // Basic format validation - parts, err := v.validateKeyFormat(key, types.LabelTypeModule.String()) - if err != nil { - return err - } - - // Modules-specific validation - if err := v.validateModulesSpecific(parts); err != nil { - return err - } - - // Value validation - if err := v.validateValue(value); err != nil { - return err - } - - validatorLogger.Debug("Modules DHT record validation successful", "key", key) - - return nil -} - -// validateModulesSpecific performs modules-specific validation logic. -func (v *ModuleValidator) validateModulesSpecific(parts []string) error { - // parts[0] = "", parts[1] = "modules", parts[2:len-2] = module path components, parts[len-2] = cid, parts[len-1] = peer_id - // Enhanced format: /modules/// - if len(parts) < types.MinLabelKeyParts { - return errors.New("modules key must have format: /modules///") - } - - // Extract module path (everything between "modules" and CID) - moduleParts := parts[2 : len(parts)-2] // Exclude CID and PeerID - if len(moduleParts) == 0 { - return errors.New("module path cannot be empty") - } - - // Future: validate against module specifications - // module := strings.Join(moduleParts, "/") - // if !v.isValidModule(module) { - // return errors.New("invalid module: " + module) - // } - - return nil -} - -// Select chooses between multiple values for modules records. -func (v *ModuleValidator) Select(key string, values [][]byte) (int, error) { - return v.selectFirstValid(key, values, v.Validate) -} - -// LocatorValidator validates DHT records for locator-based content discovery. -type LocatorValidator struct { - BaseValidator -} - -// Validate validates a locators DHT record. -// Key format: /locators/// -// Future: Can validate against supported locator types, registry, etc. -func (v *LocatorValidator) Validate(key string, value []byte) error { - validatorLogger.Debug("Validating locators DHT record", "key", key) - - // Basic format validation - parts, err := v.validateKeyFormat(key, types.LabelTypeLocator.String()) - if err != nil { - return err - } - - // Locators-specific validation - if err := v.validateLocatorsSpecific(parts); err != nil { - return err - } - - // Value validation - if err := v.validateValue(value); err != nil { - return err - } - - validatorLogger.Debug("Locators DHT record validation successful", "key", key) - - return nil -} - -// validateLocatorsSpecific performs locators-specific validation logic. -func (v *LocatorValidator) validateLocatorsSpecific(parts []string) error { - // parts[0] = "", parts[1] = "locators", parts[2:len-2] = locator path components, parts[len-2] = cid, parts[len-1] = peer_id - // Enhanced format: /locators/// - if len(parts) < types.MinLabelKeyParts { - return errors.New("locators key must have format: /locators///") - } - - // Extract locator type (everything between "locators" and CID) - locatorParts := parts[2 : len(parts)-2] // Exclude CID and PeerID - if len(locatorParts) == 0 { - return errors.New("locator type cannot be empty") - } - - // Validate that none of the locator path components are empty - for i, part := range locatorParts { - if part == "" { - return errors.New("locator path component cannot be empty at position " + strconv.Itoa(i+1)) - } - } - - // Future: validate against supported locator types - // locatorType := strings.Join(locatorParts, "/") - // if !v.isValidLocatorType(locatorType) { - // return errors.New("invalid locator type: " + locatorType) - // } - - return nil -} - -// Select chooses between multiple values for locators records. -func (v *LocatorValidator) Select(key string, values [][]byte) (int, error) { - return v.selectFirstValid(key, values, v.Validate) -} - -// CreateLabelValidators creates separate validators for each label namespace. -func CreateLabelValidators() map[string]record.Validator { - return map[string]record.Validator{ - types.LabelTypeSkill.String(): &SkillValidator{}, - types.LabelTypeDomain.String(): &DomainValidator{}, - types.LabelTypeModule.String(): &ModuleValidator{}, - types.LabelTypeLocator.String(): &LocatorValidator{}, - } -} - -// ValidateLabelKey validates a label key format before storing in DHT. -func ValidateLabelKey(key string) error { - parts := strings.Split(key, "/") - if len(parts) < types.MinLabelKeyParts { - return errors.New("invalid key format: expected ///") - } - - namespace := parts[1] - if _, valid := types.ParseLabelType(namespace); !valid { - return errors.New("unsupported namespace: " + namespace) - } - - // Extract and validate CID (last part) - cidStr := parts[len(parts)-1] - if cidStr == "" { - return errors.New("missing CID in key") - } - - _, err := cid.Decode(cidStr) - if err != nil { - return errors.New("invalid CID format: " + err.Error()) - } - - return nil -} - -// FormatLabelKey formats a label and CID into a proper DHT key. -func FormatLabelKey(label, cidStr string) string { - // Ensure label starts with / - if !strings.HasPrefix(label, "/") { - label = "/" + label - } - - // Ensure no double slashes and add CID - key := strings.TrimSuffix(label, "/") + "/" + cidStr - - return key -} - -// ExtractCIDFromLabelKey extracts CID from enhanced label key format. -// Example: "/skills/golang/CID123/Peer1" → "CID123", nil. -func ExtractCIDFromLabelKey(labelKey string) (string, error) { - parts := strings.Split(labelKey, "/") - if len(parts) < types.MinLabelKeyParts { - return "", errors.New("invalid enhanced key format: expected ////") - } - - // Validate it's a proper label key - if !IsValidLabelKey(labelKey) { - return "", errors.New("invalid namespace in label key") - } - - // Extract and validate CID (second to last part) - cidStr := parts[len(parts)-2] - if cidStr == "" { - return "", errors.New("missing CID in label key") - } - - // Validate CID format - _, err := cid.Decode(cidStr) - if err != nil { - return "", errors.New("invalid CID format: " + err.Error()) - } - - return cidStr, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package validators + +import ( + "errors" + "strconv" + "strings" + + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/utils/logging" + "github.com/ipfs/go-cid" + record "github.com/libp2p/go-libp2p-record" +) + +// Import routing utilities for label validation +// Note: Since validators is a sub-package of routing, it can import from the parent + +// IsValidLabelKey checks if a key starts with any valid label type prefix. +func IsValidLabelKey(key string) bool { + for _, labelType := range types.AllLabelTypes() { + if strings.HasPrefix(key, labelType.Prefix()) { + return true + } + } + + return false +} + +var validatorLogger = logging.Logger("routing/validators") + +// BaseValidator provides common validation logic for all label validators. +type BaseValidator struct{} + +// validateKeyFormat validates the enhanced DHT key format with PeerID. +func (v *BaseValidator) validateKeyFormat(key string, expectedNamespace string) ([]string, error) { + // Parse enhanced key format: //// + // Minimum parts: ["", "namespace", "path", "cid", "peer_id"] + parts := strings.Split(key, "/") + if len(parts) < types.MinLabelKeyParts { + return nil, errors.New("invalid key format: expected ////") + } + + // Validate namespace + if parts[1] != expectedNamespace { + return nil, errors.New("invalid namespace: expected " + expectedNamespace + ", got " + parts[1]) + } + + // Extract and validate PeerID (last part) first + peerID := parts[len(parts)-1] + if peerID == "" { + return nil, errors.New("missing PeerID in key") + } + + // Check if the last part looks like a CID (common mistake) + if _, err := cid.Decode(peerID); err == nil { + return nil, errors.New("invalid key format: expected ////") + } + + // Extract and validate CID (second to last part) + cidStr := parts[len(parts)-2] + if cidStr == "" { + return nil, errors.New("missing CID in key") + } + + // Validate CID format + _, err := cid.Decode(cidStr) + if err != nil { + return nil, errors.New("invalid CID format: " + err.Error()) + } + + return parts, nil +} + +// validateValue validates the DHT value (if present). +func (v *BaseValidator) validateValue(value []byte) error { + if len(value) > 0 { + // Value should be a valid CID if present + _, err := cid.Decode(string(value)) + if err != nil { + return errors.New("invalid CID in value: " + err.Error()) + } + } + + return nil +} + +// selectFirstValid provides default selection logic for all validators. +func (v *BaseValidator) selectFirstValid(key string, values [][]byte, validateFunc func(string, []byte) error) (int, error) { + validatorLogger.Debug("Selecting from multiple DHT record values", "key", key, "count", len(values)) + + if len(values) == 0 { + return -1, errors.New("no values to select from") + } + + for i, value := range values { + err := validateFunc(key, value) + if err == nil { + validatorLogger.Debug("Selected DHT record value", "key", key, "index", i) + + return i, nil + } + } + + validatorLogger.Warn("No valid values found for DHT record", "key", key) + + return -1, errors.New("no valid values found") +} + +// SkillValidator validates DHT records for skill-based content discovery. +type SkillValidator struct { + BaseValidator +} + +// Validate validates a skills DHT record. +// Key format: /skills/// +// Future: Can validate against skill taxonomy, required levels, etc. +func (v *SkillValidator) Validate(key string, value []byte) error { + validatorLogger.Debug("Validating skills DHT record", "key", key) + + // Basic format validation + parts, err := v.validateKeyFormat(key, types.LabelTypeSkill.String()) + if err != nil { + return err + } + + // Skills-specific validation + if err := v.validateSkillsSpecific(parts); err != nil { + return err + } + + // Value validation + if err := v.validateValue(value); err != nil { + return err + } + + validatorLogger.Debug("Skills DHT record validation successful", "key", key) + + return nil +} + +// validateSkillsSpecific performs skills-specific validation logic. +func (v *SkillValidator) validateSkillsSpecific(parts []string) error { + // parts[0] = "", parts[1] = "skills", parts[2:len-2] = skill path components, parts[len-2] = cid, parts[len-1] = peer_id + // Enhanced format: /skills/// + if len(parts) < types.MinLabelKeyParts { + return errors.New("skills key must have format: /skills///") + } + + // Extract skill path (everything between "skills" and CID) + skillParts := parts[2 : len(parts)-2] // Exclude CID and PeerID + if len(skillParts) == 0 { + return errors.New("skill path cannot be empty") + } + + // Validate that none of the skill path components are empty + for i, part := range skillParts { + if part == "" { + return errors.New("skill path component cannot be empty at position " + strconv.Itoa(i+1)) + } + } + + // Future: validate against skill taxonomy + // skillPath := strings.Join(skillParts, "/") + // if !v.isValidSkillPath(skillPath) { + // return errors.New("invalid skill path: " + skillPath) + // } + + return nil +} + +// Select chooses between multiple values for skills records. +func (v *SkillValidator) Select(key string, values [][]byte) (int, error) { + return v.selectFirstValid(key, values, v.Validate) +} + +// DomainValidator validates DHT records for domain-based content discovery. +type DomainValidator struct { + BaseValidator +} + +// Validate validates a domains DHT record. +// Key format: /domains// +// Future: Can validate against domain ontology, registry, etc. +func (v *DomainValidator) Validate(key string, value []byte) error { + validatorLogger.Debug("Validating domains DHT record", "key", key) + + // Basic format validation + parts, err := v.validateKeyFormat(key, types.LabelTypeDomain.String()) + if err != nil { + return err + } + + // Domains-specific validation + if err := v.validateDomainsSpecific(parts); err != nil { + return err + } + + // Value validation + if err := v.validateValue(value); err != nil { + return err + } + + validatorLogger.Debug("Domains DHT record validation successful", "key", key) + + return nil +} + +// validateDomainsSpecific performs domains-specific validation logic. +func (v *DomainValidator) validateDomainsSpecific(parts []string) error { + // parts[0] = "", parts[1] = "domains", parts[2:len-2] = domain path components, parts[len-2] = cid, parts[len-1] = peer_id + // Enhanced format: /domains/// + if len(parts) < types.MinLabelKeyParts { + return errors.New("domains key must have format: /domains///") + } + + // Extract domain path (everything between "domains" and CID) + domainParts := parts[2 : len(parts)-2] // Exclude CID and PeerID + if len(domainParts) == 0 { + return errors.New("domain path cannot be empty") + } + + // Future: validate against domain registry/ontology + // domain := strings.Join(domainParts, "/") + // if !v.isValidDomain(domain) { + // return errors.New("invalid domain: " + domain) + // } + + return nil +} + +// Select chooses between multiple values for domains records. +func (v *DomainValidator) Select(key string, values [][]byte) (int, error) { + return v.selectFirstValid(key, values, v.Validate) +} + +// ModuleValidator validates DHT records for module-based content discovery. +type ModuleValidator struct { + BaseValidator +} + +// Validate validates a modules DHT record. +// Key format: /modules// +// Future: Can validate against module specifications, versions, etc. +func (v *ModuleValidator) Validate(key string, value []byte) error { + validatorLogger.Debug("Validating modules DHT record", "key", key) + + // Basic format validation + parts, err := v.validateKeyFormat(key, types.LabelTypeModule.String()) + if err != nil { + return err + } + + // Modules-specific validation + if err := v.validateModulesSpecific(parts); err != nil { + return err + } + + // Value validation + if err := v.validateValue(value); err != nil { + return err + } + + validatorLogger.Debug("Modules DHT record validation successful", "key", key) + + return nil +} + +// validateModulesSpecific performs modules-specific validation logic. +func (v *ModuleValidator) validateModulesSpecific(parts []string) error { + // parts[0] = "", parts[1] = "modules", parts[2:len-2] = module path components, parts[len-2] = cid, parts[len-1] = peer_id + // Enhanced format: /modules/// + if len(parts) < types.MinLabelKeyParts { + return errors.New("modules key must have format: /modules///") + } + + // Extract module path (everything between "modules" and CID) + moduleParts := parts[2 : len(parts)-2] // Exclude CID and PeerID + if len(moduleParts) == 0 { + return errors.New("module path cannot be empty") + } + + // Future: validate against module specifications + // module := strings.Join(moduleParts, "/") + // if !v.isValidModule(module) { + // return errors.New("invalid module: " + module) + // } + + return nil +} + +// Select chooses between multiple values for modules records. +func (v *ModuleValidator) Select(key string, values [][]byte) (int, error) { + return v.selectFirstValid(key, values, v.Validate) +} + +// LocatorValidator validates DHT records for locator-based content discovery. +type LocatorValidator struct { + BaseValidator +} + +// Validate validates a locators DHT record. +// Key format: /locators/// +// Future: Can validate against supported locator types, registry, etc. +func (v *LocatorValidator) Validate(key string, value []byte) error { + validatorLogger.Debug("Validating locators DHT record", "key", key) + + // Basic format validation + parts, err := v.validateKeyFormat(key, types.LabelTypeLocator.String()) + if err != nil { + return err + } + + // Locators-specific validation + if err := v.validateLocatorsSpecific(parts); err != nil { + return err + } + + // Value validation + if err := v.validateValue(value); err != nil { + return err + } + + validatorLogger.Debug("Locators DHT record validation successful", "key", key) + + return nil +} + +// validateLocatorsSpecific performs locators-specific validation logic. +func (v *LocatorValidator) validateLocatorsSpecific(parts []string) error { + // parts[0] = "", parts[1] = "locators", parts[2:len-2] = locator path components, parts[len-2] = cid, parts[len-1] = peer_id + // Enhanced format: /locators/// + if len(parts) < types.MinLabelKeyParts { + return errors.New("locators key must have format: /locators///") + } + + // Extract locator type (everything between "locators" and CID) + locatorParts := parts[2 : len(parts)-2] // Exclude CID and PeerID + if len(locatorParts) == 0 { + return errors.New("locator type cannot be empty") + } + + // Validate that none of the locator path components are empty + for i, part := range locatorParts { + if part == "" { + return errors.New("locator path component cannot be empty at position " + strconv.Itoa(i+1)) + } + } + + // Future: validate against supported locator types + // locatorType := strings.Join(locatorParts, "/") + // if !v.isValidLocatorType(locatorType) { + // return errors.New("invalid locator type: " + locatorType) + // } + + return nil +} + +// Select chooses between multiple values for locators records. +func (v *LocatorValidator) Select(key string, values [][]byte) (int, error) { + return v.selectFirstValid(key, values, v.Validate) +} + +// CreateLabelValidators creates separate validators for each label namespace. +func CreateLabelValidators() map[string]record.Validator { + return map[string]record.Validator{ + types.LabelTypeSkill.String(): &SkillValidator{}, + types.LabelTypeDomain.String(): &DomainValidator{}, + types.LabelTypeModule.String(): &ModuleValidator{}, + types.LabelTypeLocator.String(): &LocatorValidator{}, + } +} + +// ValidateLabelKey validates a label key format before storing in DHT. +func ValidateLabelKey(key string) error { + parts := strings.Split(key, "/") + if len(parts) < types.MinLabelKeyParts { + return errors.New("invalid key format: expected ///") + } + + namespace := parts[1] + if _, valid := types.ParseLabelType(namespace); !valid { + return errors.New("unsupported namespace: " + namespace) + } + + // Extract and validate CID (last part) + cidStr := parts[len(parts)-1] + if cidStr == "" { + return errors.New("missing CID in key") + } + + _, err := cid.Decode(cidStr) + if err != nil { + return errors.New("invalid CID format: " + err.Error()) + } + + return nil +} + +// FormatLabelKey formats a label and CID into a proper DHT key. +func FormatLabelKey(label, cidStr string) string { + // Ensure label starts with / + if !strings.HasPrefix(label, "/") { + label = "/" + label + } + + // Ensure no double slashes and add CID + key := strings.TrimSuffix(label, "/") + "/" + cidStr + + return key +} + +// ExtractCIDFromLabelKey extracts CID from enhanced label key format. +// Example: "/skills/golang/CID123/Peer1" → "CID123", nil. +func ExtractCIDFromLabelKey(labelKey string) (string, error) { + parts := strings.Split(labelKey, "/") + if len(parts) < types.MinLabelKeyParts { + return "", errors.New("invalid enhanced key format: expected ////") + } + + // Validate it's a proper label key + if !IsValidLabelKey(labelKey) { + return "", errors.New("invalid namespace in label key") + } + + // Extract and validate CID (second to last part) + cidStr := parts[len(parts)-2] + if cidStr == "" { + return "", errors.New("missing CID in label key") + } + + // Validate CID format + _, err := cid.Decode(cidStr) + if err != nil { + return "", errors.New("invalid CID format: " + err.Error()) + } + + return cidStr, nil +} diff --git a/server/routing/validators/validators_test.go b/server/routing/validators/validators_test.go index bc1b73cda..eddf9d2a2 100644 --- a/server/routing/validators/validators_test.go +++ b/server/routing/validators/validators_test.go @@ -1,905 +1,905 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package validators - -import ( - "strings" - "testing" - - "github.com/agntcy/dir/server/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// Add utility functions for testing. -func GetLabelTypeFromKey(key string) (types.LabelType, bool) { - for _, labelType := range types.AllLabelTypes() { - if strings.HasPrefix(key, labelType.Prefix()) { - return labelType, true - } - } - - return types.LabelTypeUnknown, false -} - -func TestSkillValidator_Validate(t *testing.T) { - validator := &SkillValidator{} - - tests := []struct { - name string - key string - value []byte - wantError bool - errorMsg string - }{ - { - name: "valid skills key with category and class", - key: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte{}, - wantError: false, - }, - { - name: "valid skills key with value", - key: "/skills/ai/machine-learning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer2", - value: []byte("bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"), - wantError: false, - }, - { - name: "invalid namespace", - key: "/domains/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte{}, - wantError: true, - errorMsg: "invalid namespace: expected skills, got domains", - }, - { - name: "missing skill path", - key: "/skills/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte{}, - wantError: true, - errorMsg: "invalid key format: expected ////", - }, - { - name: "valid single skill path", - key: "/skills/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte{}, - wantError: false, - }, - { - name: "empty skill path component", - key: "/skills//golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte{}, - wantError: true, - errorMsg: "skill path component cannot be empty at position 1", - }, - { - name: "empty skill path component in middle", - key: "/skills/programming//advanced/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte{}, - wantError: true, - errorMsg: "skill path component cannot be empty at position 2", - }, - { - name: "invalid CID format", - key: "/skills/programming/golang/invalid-cid/Peer1", - value: []byte{}, - wantError: true, - errorMsg: "invalid CID format", - }, - { - name: "invalid value CID", - key: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte("invalid-cid-value"), - wantError: true, - errorMsg: "invalid CID in value", - }, - { - name: "missing CID", - key: "/skills/programming/golang//Peer1", - value: []byte{}, - wantError: true, - errorMsg: "missing CID in key", - }, - { - name: "missing PeerID", - key: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", - value: []byte{}, - wantError: true, - errorMsg: "invalid key format: expected ////", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := validator.Validate(tt.key, tt.value) - - if tt.wantError { - require.Error(t, err) - assert.Contains(t, err.Error(), tt.errorMsg) - } else { - require.NoError(t, err) - } - }) - } -} - -//nolint:dupl // Similar test structure is intentional for different validators -func TestDomainValidator_Validate(t *testing.T) { - validator := &DomainValidator{} - - tests := []struct { - name string - key string - value []byte - wantError bool - errorMsg string - }{ - { - name: "valid domains key with single domain", - key: "/domains/ai/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte{}, - wantError: false, - }, - { - name: "valid domains key with nested domain path", - key: "/domains/ai/machine-learning/nlp/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer2", - value: []byte{}, - wantError: false, - }, - { - name: "valid domains key with value", - key: "/domains/software/web-development/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer3", - value: []byte("bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"), - wantError: false, - }, - { - name: "invalid namespace", - key: "/skills/ai/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte{}, - wantError: true, - errorMsg: "invalid namespace: expected domains, got skills", - }, - { - name: "missing domain path", - key: "/domains/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte{}, - wantError: true, - errorMsg: "invalid key format: expected ////", - }, - { - name: "invalid CID format", - key: "/domains/ai/invalid-cid/Peer1", - value: []byte{}, - wantError: true, - errorMsg: "invalid CID format", - }, - { - name: "invalid value CID", - key: "/domains/ai/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte("invalid-cid-value"), - wantError: true, - errorMsg: "invalid CID in value", - }, - { - name: "missing CID", - key: "/domains/ai//Peer1", - value: []byte{}, - wantError: true, - errorMsg: "missing CID in key", - }, - { - name: "missing PeerID", - key: "/domains/ai/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", - value: []byte{}, - wantError: true, - errorMsg: "invalid key format: expected ////", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := validator.Validate(tt.key, tt.value) - - if tt.wantError { - require.Error(t, err) - assert.Contains(t, err.Error(), tt.errorMsg) - } else { - require.NoError(t, err) - } - }) - } -} - -//nolint:dupl // Similar test structure is intentional for different validators -func TestModuleValidator_Validate(t *testing.T) { - validator := &ModuleValidator{} - - tests := []struct { - name string - key string - value []byte - wantError bool - errorMsg string - }{ - { - name: "valid modules key with single module", - key: "/modules/llm/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte{}, - wantError: false, - }, - { - name: "valid modules key with nested module path", - key: "/modules/ai/reasoning/logical/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer2", - value: []byte{}, - wantError: false, - }, - { - name: "valid modules key with value", - key: "/modules/search/semantic/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer3", - value: []byte("bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"), - wantError: false, - }, - { - name: "invalid namespace", - key: "/domains/llm/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte{}, - wantError: true, - errorMsg: "invalid namespace: expected modules, got domains", - }, - { - name: "missing module path", - key: "/modules/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte{}, - wantError: true, - errorMsg: "invalid key format: expected ////", - }, - { - name: "invalid CID format", - key: "/modules/llm/invalid-cid/Peer1", - value: []byte{}, - wantError: true, - errorMsg: "invalid CID format", - }, - { - name: "invalid value CID", - key: "/modules/llm/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte("invalid-cid-value"), - wantError: true, - errorMsg: "invalid CID in value", - }, - { - name: "missing CID", - key: "/modules/llm//Peer1", - value: []byte{}, - wantError: true, - errorMsg: "missing CID in key", - }, - { - name: "missing PeerID", - key: "/modules/llm/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", - value: []byte{}, - wantError: true, - errorMsg: "invalid key format: expected ////", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := validator.Validate(tt.key, tt.value) - - if tt.wantError { - require.Error(t, err) - assert.Contains(t, err.Error(), tt.errorMsg) - } else { - require.NoError(t, err) - } - }) - } -} - -//nolint:dupl // Similar test structure is intentional for different validators -func TestLocatorValidator_Validate(t *testing.T) { - validator := &LocatorValidator{} - - tests := []struct { - name string - key string - value []byte - wantError bool - errorMsg string - }{ - { - name: "valid locators key with single locator type", - key: "/locators/docker-image/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte{}, - wantError: false, - }, - { - name: "valid locators key with nested locator path", - key: "/locators/container/docker/alpine/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer2", - value: []byte{}, - wantError: false, - }, - { - name: "valid locators key with value", - key: "/locators/npm-package/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer3", - value: []byte("bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"), - wantError: false, - }, - { - name: "invalid namespace", - key: "/modules/docker-image/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte{}, - wantError: true, - errorMsg: "invalid namespace: expected locators, got modules", - }, - { - name: "missing locator type", - key: "/locators/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte{}, - wantError: true, - errorMsg: "invalid key format: expected ////", - }, - { - name: "invalid CID format", - key: "/locators/docker-image/invalid-cid/Peer1", - value: []byte{}, - wantError: true, - errorMsg: "invalid CID format", - }, - { - name: "invalid value CID", - key: "/locators/docker-image/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte("invalid-cid-value"), - wantError: true, - errorMsg: "invalid CID in value", - }, - { - name: "empty locator path component", - key: "/locators//docker-image/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte{}, - wantError: true, - errorMsg: "locator path component cannot be empty at position 1", - }, - { - name: "empty locator path component in middle", - key: "/locators/container//alpine/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - value: []byte{}, - wantError: true, - errorMsg: "locator path component cannot be empty at position 2", - }, - { - name: "missing CID", - key: "/locators/docker-image//Peer1", - value: []byte{}, - wantError: true, - errorMsg: "missing CID in key", - }, - { - name: "missing PeerID", - key: "/locators/docker-image/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", - value: []byte{}, - wantError: true, - errorMsg: "invalid key format: expected ////", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := validator.Validate(tt.key, tt.value) - - if tt.wantError { - require.Error(t, err) - assert.Contains(t, err.Error(), tt.errorMsg) - } else { - require.NoError(t, err) - } - }) - } -} - -func TestValidators_Select(t *testing.T) { - tests := []struct { - name string - validator interface { - Select(string, [][]byte) (int, error) - } - key string - values [][]byte - wantIndex int - wantError bool - errorMsg string - }{ - { - name: "skills validator - select first valid value", - validator: &SkillValidator{}, - key: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - values: [][]byte{ - []byte("bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"), - []byte("invalid-cid"), - }, - wantIndex: 0, - wantError: false, - }, - { - name: "domains validator - select first valid from multiple", - validator: &DomainValidator{}, - key: "/domains/ai/machine-learning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer2", - values: [][]byte{ - []byte("invalid-cid"), - []byte("bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"), - []byte(""), - }, - wantIndex: 1, - wantError: false, - }, - { - name: "modules validator - no valid values", - validator: &ModuleValidator{}, - key: "/modules/llm/reasoning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer3", - values: [][]byte{ - []byte("invalid-cid-1"), - []byte("invalid-cid-2"), - }, - wantIndex: -1, - wantError: true, - errorMsg: "no valid values found", - }, - { - name: "locators validator - select first valid value", - validator: &LocatorValidator{}, - key: "/locators/docker-image/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - values: [][]byte{ - []byte("bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"), - []byte("invalid-cid"), - }, - wantIndex: 0, - wantError: false, - }, - { - name: "empty values slice", - validator: &SkillValidator{}, - key: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - values: [][]byte{}, - wantIndex: -1, - wantError: true, - errorMsg: "no values to select from", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - index, err := tt.validator.Select(tt.key, tt.values) - - if tt.wantError { - require.Error(t, err) - assert.Contains(t, err.Error(), tt.errorMsg) - assert.Equal(t, tt.wantIndex, index) - } else { - require.NoError(t, err) - assert.Equal(t, tt.wantIndex, index) - } - }) - } -} - -func TestLabelTypeIntegration(t *testing.T) { - // Test that LabelType works correctly with validators - // Test String() method - assert.Equal(t, "skills", types.LabelTypeSkill.String()) - assert.Equal(t, "domains", types.LabelTypeDomain.String()) - assert.Equal(t, "modules", types.LabelTypeModule.String()) - assert.Equal(t, "locators", types.LabelTypeLocator.String()) - - // Test Prefix() method - assert.Equal(t, "/skills/", types.LabelTypeSkill.Prefix()) - assert.Equal(t, "/domains/", types.LabelTypeDomain.Prefix()) - assert.Equal(t, "/modules/", types.LabelTypeModule.Prefix()) - assert.Equal(t, "/locators/", types.LabelTypeLocator.Prefix()) - - // Test IsValid() method - assert.True(t, types.LabelTypeSkill.IsValid()) - assert.True(t, types.LabelTypeDomain.IsValid()) - assert.True(t, types.LabelTypeModule.IsValid()) - assert.True(t, types.LabelTypeLocator.IsValid()) - assert.False(t, types.LabelType("invalid").IsValid()) - - // Test ParseLabelType() function - lt, valid := types.ParseLabelType("skills") - assert.True(t, valid) - assert.Equal(t, types.LabelTypeSkill, lt) - - lt, valid = types.ParseLabelType("invalid") - assert.False(t, valid) - assert.Equal(t, types.LabelTypeUnknown, lt) - - // Test AllLabelTypes() function - all := types.AllLabelTypes() - assert.Len(t, all, 4) - assert.Contains(t, all, types.LabelTypeSkill) - assert.Contains(t, all, types.LabelTypeDomain) - assert.Contains(t, all, types.LabelTypeModule) - assert.Contains(t, all, types.LabelTypeLocator) - - // Test IsValidLabelKey() function - assert.True(t, IsValidLabelKey("/skills/golang/CID123")) - assert.True(t, IsValidLabelKey("/domains/web/CID123")) - assert.True(t, IsValidLabelKey("/modules/chat/CID123")) - assert.True(t, IsValidLabelKey("/locators/docker-image/CID123")) - assert.False(t, IsValidLabelKey("/invalid/test/CID123")) - assert.False(t, IsValidLabelKey("/records/CID123")) - assert.False(t, IsValidLabelKey("skills/golang/CID123")) // missing leading slash - - // Test GetLabelTypeFromKey() function - lt, found := GetLabelTypeFromKey("/skills/golang/CID123") - assert.True(t, found) - assert.Equal(t, types.LabelTypeSkill, lt) - - lt, found = GetLabelTypeFromKey("/domains/web/CID123") - assert.True(t, found) - assert.Equal(t, types.LabelTypeDomain, lt) - - lt, found = GetLabelTypeFromKey("/modules/chat/CID123") - assert.True(t, found) - assert.Equal(t, types.LabelTypeModule, lt) - - lt, found = GetLabelTypeFromKey("/invalid/test/CID123") - assert.False(t, found) - assert.Equal(t, types.LabelTypeUnknown, lt) -} - -func TestCreateLabelValidators(t *testing.T) { - validators := CreateLabelValidators() - - // Test that all expected validators are created - assert.Len(t, validators, 4) - assert.Contains(t, validators, types.LabelTypeSkill.String()) - assert.Contains(t, validators, types.LabelTypeDomain.String()) - assert.Contains(t, validators, types.LabelTypeModule.String()) - assert.Contains(t, validators, types.LabelTypeLocator.String()) - - // Test that validators are of correct types - assert.IsType(t, &SkillValidator{}, validators[types.LabelTypeSkill.String()]) - assert.IsType(t, &DomainValidator{}, validators[types.LabelTypeDomain.String()]) - assert.IsType(t, &ModuleValidator{}, validators[types.LabelTypeModule.String()]) - assert.IsType(t, &LocatorValidator{}, validators[types.LabelTypeLocator.String()]) -} - -func TestValidateLabelKey(t *testing.T) { - tests := []struct { - name string - key string - wantError bool - errorMsg string - }{ - { - name: "valid skills key", - key: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", - wantError: false, - }, - { - name: "valid domains key", - key: "/domains/ai/machine-learning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", - wantError: false, - }, - { - name: "valid modules key", - key: "/modules/llm/reasoning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", - wantError: false, - }, - { - name: "invalid format - too few parts", - key: "/skills/programming", - wantError: true, - errorMsg: "invalid key format: expected ///", - }, - { - name: "unsupported namespace", - key: "/unknown/path/value/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", - wantError: true, - errorMsg: "unsupported namespace: unknown", - }, - { - name: "missing CID", - key: "/skills/programming/golang/", - wantError: true, - errorMsg: "missing CID in key", - }, - { - name: "invalid CID format", - key: "/skills/programming/golang/invalid-cid-format", - wantError: true, - errorMsg: "invalid CID format", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := ValidateLabelKey(tt.key) - - if tt.wantError { - require.Error(t, err) - assert.Contains(t, err.Error(), tt.errorMsg) - } else { - require.NoError(t, err) - } - }) - } -} - -func TestFormatLabelKey(t *testing.T) { - tests := []struct { - name string - label string - cid string - expected string - }{ - { - name: "label with leading slash", - label: "/skills/programming/golang", - cid: "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", - expected: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", - }, - { - name: "label without leading slash", - label: "skills/programming/golang", - cid: "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", - expected: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", - }, - { - name: "label with trailing slash", - label: "/domains/ai/machine-learning/", - cid: "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", - expected: "/domains/ai/machine-learning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", - }, - { - name: "single component label", - label: "/modules/llm", - cid: "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", - expected: "/modules/llm/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := FormatLabelKey(tt.label, tt.cid) - assert.Equal(t, tt.expected, result) - }) - } -} - -func TestBaseValidator_validateKeyFormat(t *testing.T) { - validator := &BaseValidator{} - - tests := []struct { - name string - key string - expectedNamespace string - wantError bool - errorMsg string - expectedParts []string - }{ - { - name: "valid key format", - key: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - expectedNamespace: types.LabelTypeSkill.String(), - wantError: false, - expectedParts: []string{"", "skills", "programming", "golang", "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", "Peer1"}, - }, - { - name: "invalid format - too few parts", - key: "/skills/programming", - expectedNamespace: types.LabelTypeSkill.String(), - wantError: true, - errorMsg: "invalid key format: expected ////", - }, - { - name: "wrong namespace", - key: "/domains/ai/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - expectedNamespace: types.LabelTypeSkill.String(), - wantError: true, - errorMsg: "invalid namespace: expected skills, got domains", - }, - { - name: "invalid CID", - key: "/skills/programming/golang/invalid-cid/Peer1", - expectedNamespace: types.LabelTypeSkill.String(), - wantError: true, - errorMsg: "invalid CID format", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - parts, err := validator.validateKeyFormat(tt.key, tt.expectedNamespace) - - if tt.wantError { - require.Error(t, err) - assert.Contains(t, err.Error(), tt.errorMsg) - assert.Nil(t, parts) - } else { - require.NoError(t, err) - assert.Equal(t, tt.expectedParts, parts) - } - }) - } -} - -func TestBaseValidator_validateValue(t *testing.T) { - validator := &BaseValidator{} - - tests := []struct { - name string - value []byte - wantError bool - errorMsg string - }{ - { - name: "empty value", - value: []byte{}, - wantError: false, - }, - { - name: "valid CID value", - value: []byte("bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"), - wantError: false, - }, - { - name: "invalid CID value", - value: []byte("invalid-cid-format"), - wantError: true, - errorMsg: "invalid CID in value", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := validator.validateValue(tt.value) - - if tt.wantError { - require.Error(t, err) - assert.Contains(t, err.Error(), tt.errorMsg) - } else { - require.NoError(t, err) - } - }) - } -} - -// Benchmark tests to ensure validators perform well. -func BenchmarkSkillValidator_Validate(b *testing.B) { - validator := &SkillValidator{} - key := "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1" - value := []byte{} - - b.ResetTimer() - - for range b.N { - _ = validator.Validate(key, value) - } -} - -func BenchmarkDomainValidator_Validate(b *testing.B) { - validator := &DomainValidator{} - key := "/domains/ai/machine-learning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer2" - value := []byte{} - - b.ResetTimer() - - for range b.N { - _ = validator.Validate(key, value) - } -} - -func BenchmarkModuleValidator_Validate(b *testing.B) { - validator := &ModuleValidator{} - key := "/modules/llm/reasoning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer3" - value := []byte{} - - b.ResetTimer() - - for range b.N { - _ = validator.Validate(key, value) - } -} - -func BenchmarkLocatorValidator_Validate(b *testing.B) { - validator := &LocatorValidator{} - key := "/locators/docker-image/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1" - value := []byte{} - - b.ResetTimer() - - for range b.N { - _ = validator.Validate(key, value) - } -} - -func TestExtractCIDFromLabelKey(t *testing.T) { - tests := []struct { - name string - labelKey string - wantCID string - wantError bool - errorMsg string - }{ - { - name: "valid skills key", - labelKey: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - wantCID: "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", - wantError: false, - }, - { - name: "valid domains key", - labelKey: "/domains/ai/machine-learning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer2", - wantCID: "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", - wantError: false, - }, - { - name: "valid modules key", - labelKey: "/modules/llm/reasoning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer3", - wantCID: "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", - wantError: false, - }, - { - name: "invalid format - too few parts", - labelKey: "/skills/programming", - wantError: true, - errorMsg: "invalid enhanced key format", - }, - { - name: "invalid namespace", - labelKey: "/unknown/test/value/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", - wantError: true, - errorMsg: "invalid namespace", - }, - { - name: "invalid CID format", - labelKey: "/skills/programming/golang/invalid-cid/Peer1", - wantError: true, - errorMsg: "invalid CID format", - }, - { - name: "missing CID", - labelKey: "/skills/programming/golang//Peer1", - wantError: true, - errorMsg: "missing CID", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cid, err := ExtractCIDFromLabelKey(tt.labelKey) - - if tt.wantError { - require.Error(t, err) - assert.Contains(t, err.Error(), tt.errorMsg) - assert.Empty(t, cid) - } else { - require.NoError(t, err) - assert.Equal(t, tt.wantCID, cid) - } - }) - } -} - -func BenchmarkFormatLabelKey(b *testing.B) { - label := "/skills/programming/golang" - cid := "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku" - - b.ResetTimer() - - for range b.N { - _ = FormatLabelKey(label, cid) - } -} - -func BenchmarkExtractCIDFromLabelKey(b *testing.B) { - labelKey := "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1" - - b.ResetTimer() - - for range b.N { - _, _ = ExtractCIDFromLabelKey(labelKey) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package validators + +import ( + "strings" + "testing" + + "github.com/agntcy/dir/server/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Add utility functions for testing. +func GetLabelTypeFromKey(key string) (types.LabelType, bool) { + for _, labelType := range types.AllLabelTypes() { + if strings.HasPrefix(key, labelType.Prefix()) { + return labelType, true + } + } + + return types.LabelTypeUnknown, false +} + +func TestSkillValidator_Validate(t *testing.T) { + validator := &SkillValidator{} + + tests := []struct { + name string + key string + value []byte + wantError bool + errorMsg string + }{ + { + name: "valid skills key with category and class", + key: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte{}, + wantError: false, + }, + { + name: "valid skills key with value", + key: "/skills/ai/machine-learning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer2", + value: []byte("bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"), + wantError: false, + }, + { + name: "invalid namespace", + key: "/domains/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte{}, + wantError: true, + errorMsg: "invalid namespace: expected skills, got domains", + }, + { + name: "missing skill path", + key: "/skills/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte{}, + wantError: true, + errorMsg: "invalid key format: expected ////", + }, + { + name: "valid single skill path", + key: "/skills/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte{}, + wantError: false, + }, + { + name: "empty skill path component", + key: "/skills//golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte{}, + wantError: true, + errorMsg: "skill path component cannot be empty at position 1", + }, + { + name: "empty skill path component in middle", + key: "/skills/programming//advanced/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte{}, + wantError: true, + errorMsg: "skill path component cannot be empty at position 2", + }, + { + name: "invalid CID format", + key: "/skills/programming/golang/invalid-cid/Peer1", + value: []byte{}, + wantError: true, + errorMsg: "invalid CID format", + }, + { + name: "invalid value CID", + key: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte("invalid-cid-value"), + wantError: true, + errorMsg: "invalid CID in value", + }, + { + name: "missing CID", + key: "/skills/programming/golang//Peer1", + value: []byte{}, + wantError: true, + errorMsg: "missing CID in key", + }, + { + name: "missing PeerID", + key: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + value: []byte{}, + wantError: true, + errorMsg: "invalid key format: expected ////", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validator.Validate(tt.key, tt.value) + + if tt.wantError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMsg) + } else { + require.NoError(t, err) + } + }) + } +} + +//nolint:dupl // Similar test structure is intentional for different validators +func TestDomainValidator_Validate(t *testing.T) { + validator := &DomainValidator{} + + tests := []struct { + name string + key string + value []byte + wantError bool + errorMsg string + }{ + { + name: "valid domains key with single domain", + key: "/domains/ai/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte{}, + wantError: false, + }, + { + name: "valid domains key with nested domain path", + key: "/domains/ai/machine-learning/nlp/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer2", + value: []byte{}, + wantError: false, + }, + { + name: "valid domains key with value", + key: "/domains/software/web-development/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer3", + value: []byte("bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"), + wantError: false, + }, + { + name: "invalid namespace", + key: "/skills/ai/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte{}, + wantError: true, + errorMsg: "invalid namespace: expected domains, got skills", + }, + { + name: "missing domain path", + key: "/domains/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte{}, + wantError: true, + errorMsg: "invalid key format: expected ////", + }, + { + name: "invalid CID format", + key: "/domains/ai/invalid-cid/Peer1", + value: []byte{}, + wantError: true, + errorMsg: "invalid CID format", + }, + { + name: "invalid value CID", + key: "/domains/ai/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte("invalid-cid-value"), + wantError: true, + errorMsg: "invalid CID in value", + }, + { + name: "missing CID", + key: "/domains/ai//Peer1", + value: []byte{}, + wantError: true, + errorMsg: "missing CID in key", + }, + { + name: "missing PeerID", + key: "/domains/ai/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + value: []byte{}, + wantError: true, + errorMsg: "invalid key format: expected ////", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validator.Validate(tt.key, tt.value) + + if tt.wantError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMsg) + } else { + require.NoError(t, err) + } + }) + } +} + +//nolint:dupl // Similar test structure is intentional for different validators +func TestModuleValidator_Validate(t *testing.T) { + validator := &ModuleValidator{} + + tests := []struct { + name string + key string + value []byte + wantError bool + errorMsg string + }{ + { + name: "valid modules key with single module", + key: "/modules/llm/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte{}, + wantError: false, + }, + { + name: "valid modules key with nested module path", + key: "/modules/ai/reasoning/logical/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer2", + value: []byte{}, + wantError: false, + }, + { + name: "valid modules key with value", + key: "/modules/search/semantic/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer3", + value: []byte("bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"), + wantError: false, + }, + { + name: "invalid namespace", + key: "/domains/llm/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte{}, + wantError: true, + errorMsg: "invalid namespace: expected modules, got domains", + }, + { + name: "missing module path", + key: "/modules/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte{}, + wantError: true, + errorMsg: "invalid key format: expected ////", + }, + { + name: "invalid CID format", + key: "/modules/llm/invalid-cid/Peer1", + value: []byte{}, + wantError: true, + errorMsg: "invalid CID format", + }, + { + name: "invalid value CID", + key: "/modules/llm/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte("invalid-cid-value"), + wantError: true, + errorMsg: "invalid CID in value", + }, + { + name: "missing CID", + key: "/modules/llm//Peer1", + value: []byte{}, + wantError: true, + errorMsg: "missing CID in key", + }, + { + name: "missing PeerID", + key: "/modules/llm/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + value: []byte{}, + wantError: true, + errorMsg: "invalid key format: expected ////", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validator.Validate(tt.key, tt.value) + + if tt.wantError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMsg) + } else { + require.NoError(t, err) + } + }) + } +} + +//nolint:dupl // Similar test structure is intentional for different validators +func TestLocatorValidator_Validate(t *testing.T) { + validator := &LocatorValidator{} + + tests := []struct { + name string + key string + value []byte + wantError bool + errorMsg string + }{ + { + name: "valid locators key with single locator type", + key: "/locators/docker-image/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte{}, + wantError: false, + }, + { + name: "valid locators key with nested locator path", + key: "/locators/container/docker/alpine/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer2", + value: []byte{}, + wantError: false, + }, + { + name: "valid locators key with value", + key: "/locators/npm-package/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer3", + value: []byte("bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"), + wantError: false, + }, + { + name: "invalid namespace", + key: "/modules/docker-image/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte{}, + wantError: true, + errorMsg: "invalid namespace: expected locators, got modules", + }, + { + name: "missing locator type", + key: "/locators/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte{}, + wantError: true, + errorMsg: "invalid key format: expected ////", + }, + { + name: "invalid CID format", + key: "/locators/docker-image/invalid-cid/Peer1", + value: []byte{}, + wantError: true, + errorMsg: "invalid CID format", + }, + { + name: "invalid value CID", + key: "/locators/docker-image/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte("invalid-cid-value"), + wantError: true, + errorMsg: "invalid CID in value", + }, + { + name: "empty locator path component", + key: "/locators//docker-image/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte{}, + wantError: true, + errorMsg: "locator path component cannot be empty at position 1", + }, + { + name: "empty locator path component in middle", + key: "/locators/container//alpine/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + value: []byte{}, + wantError: true, + errorMsg: "locator path component cannot be empty at position 2", + }, + { + name: "missing CID", + key: "/locators/docker-image//Peer1", + value: []byte{}, + wantError: true, + errorMsg: "missing CID in key", + }, + { + name: "missing PeerID", + key: "/locators/docker-image/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + value: []byte{}, + wantError: true, + errorMsg: "invalid key format: expected ////", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validator.Validate(tt.key, tt.value) + + if tt.wantError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMsg) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestValidators_Select(t *testing.T) { + tests := []struct { + name string + validator interface { + Select(string, [][]byte) (int, error) + } + key string + values [][]byte + wantIndex int + wantError bool + errorMsg string + }{ + { + name: "skills validator - select first valid value", + validator: &SkillValidator{}, + key: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + values: [][]byte{ + []byte("bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"), + []byte("invalid-cid"), + }, + wantIndex: 0, + wantError: false, + }, + { + name: "domains validator - select first valid from multiple", + validator: &DomainValidator{}, + key: "/domains/ai/machine-learning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer2", + values: [][]byte{ + []byte("invalid-cid"), + []byte("bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"), + []byte(""), + }, + wantIndex: 1, + wantError: false, + }, + { + name: "modules validator - no valid values", + validator: &ModuleValidator{}, + key: "/modules/llm/reasoning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer3", + values: [][]byte{ + []byte("invalid-cid-1"), + []byte("invalid-cid-2"), + }, + wantIndex: -1, + wantError: true, + errorMsg: "no valid values found", + }, + { + name: "locators validator - select first valid value", + validator: &LocatorValidator{}, + key: "/locators/docker-image/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + values: [][]byte{ + []byte("bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"), + []byte("invalid-cid"), + }, + wantIndex: 0, + wantError: false, + }, + { + name: "empty values slice", + validator: &SkillValidator{}, + key: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + values: [][]byte{}, + wantIndex: -1, + wantError: true, + errorMsg: "no values to select from", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + index, err := tt.validator.Select(tt.key, tt.values) + + if tt.wantError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMsg) + assert.Equal(t, tt.wantIndex, index) + } else { + require.NoError(t, err) + assert.Equal(t, tt.wantIndex, index) + } + }) + } +} + +func TestLabelTypeIntegration(t *testing.T) { + // Test that LabelType works correctly with validators + // Test String() method + assert.Equal(t, "skills", types.LabelTypeSkill.String()) + assert.Equal(t, "domains", types.LabelTypeDomain.String()) + assert.Equal(t, "modules", types.LabelTypeModule.String()) + assert.Equal(t, "locators", types.LabelTypeLocator.String()) + + // Test Prefix() method + assert.Equal(t, "/skills/", types.LabelTypeSkill.Prefix()) + assert.Equal(t, "/domains/", types.LabelTypeDomain.Prefix()) + assert.Equal(t, "/modules/", types.LabelTypeModule.Prefix()) + assert.Equal(t, "/locators/", types.LabelTypeLocator.Prefix()) + + // Test IsValid() method + assert.True(t, types.LabelTypeSkill.IsValid()) + assert.True(t, types.LabelTypeDomain.IsValid()) + assert.True(t, types.LabelTypeModule.IsValid()) + assert.True(t, types.LabelTypeLocator.IsValid()) + assert.False(t, types.LabelType("invalid").IsValid()) + + // Test ParseLabelType() function + lt, valid := types.ParseLabelType("skills") + assert.True(t, valid) + assert.Equal(t, types.LabelTypeSkill, lt) + + lt, valid = types.ParseLabelType("invalid") + assert.False(t, valid) + assert.Equal(t, types.LabelTypeUnknown, lt) + + // Test AllLabelTypes() function + all := types.AllLabelTypes() + assert.Len(t, all, 4) + assert.Contains(t, all, types.LabelTypeSkill) + assert.Contains(t, all, types.LabelTypeDomain) + assert.Contains(t, all, types.LabelTypeModule) + assert.Contains(t, all, types.LabelTypeLocator) + + // Test IsValidLabelKey() function + assert.True(t, IsValidLabelKey("/skills/golang/CID123")) + assert.True(t, IsValidLabelKey("/domains/web/CID123")) + assert.True(t, IsValidLabelKey("/modules/chat/CID123")) + assert.True(t, IsValidLabelKey("/locators/docker-image/CID123")) + assert.False(t, IsValidLabelKey("/invalid/test/CID123")) + assert.False(t, IsValidLabelKey("/records/CID123")) + assert.False(t, IsValidLabelKey("skills/golang/CID123")) // missing leading slash + + // Test GetLabelTypeFromKey() function + lt, found := GetLabelTypeFromKey("/skills/golang/CID123") + assert.True(t, found) + assert.Equal(t, types.LabelTypeSkill, lt) + + lt, found = GetLabelTypeFromKey("/domains/web/CID123") + assert.True(t, found) + assert.Equal(t, types.LabelTypeDomain, lt) + + lt, found = GetLabelTypeFromKey("/modules/chat/CID123") + assert.True(t, found) + assert.Equal(t, types.LabelTypeModule, lt) + + lt, found = GetLabelTypeFromKey("/invalid/test/CID123") + assert.False(t, found) + assert.Equal(t, types.LabelTypeUnknown, lt) +} + +func TestCreateLabelValidators(t *testing.T) { + validators := CreateLabelValidators() + + // Test that all expected validators are created + assert.Len(t, validators, 4) + assert.Contains(t, validators, types.LabelTypeSkill.String()) + assert.Contains(t, validators, types.LabelTypeDomain.String()) + assert.Contains(t, validators, types.LabelTypeModule.String()) + assert.Contains(t, validators, types.LabelTypeLocator.String()) + + // Test that validators are of correct types + assert.IsType(t, &SkillValidator{}, validators[types.LabelTypeSkill.String()]) + assert.IsType(t, &DomainValidator{}, validators[types.LabelTypeDomain.String()]) + assert.IsType(t, &ModuleValidator{}, validators[types.LabelTypeModule.String()]) + assert.IsType(t, &LocatorValidator{}, validators[types.LabelTypeLocator.String()]) +} + +func TestValidateLabelKey(t *testing.T) { + tests := []struct { + name string + key string + wantError bool + errorMsg string + }{ + { + name: "valid skills key", + key: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + wantError: false, + }, + { + name: "valid domains key", + key: "/domains/ai/machine-learning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + wantError: false, + }, + { + name: "valid modules key", + key: "/modules/llm/reasoning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + wantError: false, + }, + { + name: "invalid format - too few parts", + key: "/skills/programming", + wantError: true, + errorMsg: "invalid key format: expected ///", + }, + { + name: "unsupported namespace", + key: "/unknown/path/value/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + wantError: true, + errorMsg: "unsupported namespace: unknown", + }, + { + name: "missing CID", + key: "/skills/programming/golang/", + wantError: true, + errorMsg: "missing CID in key", + }, + { + name: "invalid CID format", + key: "/skills/programming/golang/invalid-cid-format", + wantError: true, + errorMsg: "invalid CID format", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ValidateLabelKey(tt.key) + + if tt.wantError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMsg) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestFormatLabelKey(t *testing.T) { + tests := []struct { + name string + label string + cid string + expected string + }{ + { + name: "label with leading slash", + label: "/skills/programming/golang", + cid: "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + expected: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + }, + { + name: "label without leading slash", + label: "skills/programming/golang", + cid: "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + expected: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + }, + { + name: "label with trailing slash", + label: "/domains/ai/machine-learning/", + cid: "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + expected: "/domains/ai/machine-learning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + }, + { + name: "single component label", + label: "/modules/llm", + cid: "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + expected: "/modules/llm/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := FormatLabelKey(tt.label, tt.cid) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestBaseValidator_validateKeyFormat(t *testing.T) { + validator := &BaseValidator{} + + tests := []struct { + name string + key string + expectedNamespace string + wantError bool + errorMsg string + expectedParts []string + }{ + { + name: "valid key format", + key: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + expectedNamespace: types.LabelTypeSkill.String(), + wantError: false, + expectedParts: []string{"", "skills", "programming", "golang", "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", "Peer1"}, + }, + { + name: "invalid format - too few parts", + key: "/skills/programming", + expectedNamespace: types.LabelTypeSkill.String(), + wantError: true, + errorMsg: "invalid key format: expected ////", + }, + { + name: "wrong namespace", + key: "/domains/ai/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + expectedNamespace: types.LabelTypeSkill.String(), + wantError: true, + errorMsg: "invalid namespace: expected skills, got domains", + }, + { + name: "invalid CID", + key: "/skills/programming/golang/invalid-cid/Peer1", + expectedNamespace: types.LabelTypeSkill.String(), + wantError: true, + errorMsg: "invalid CID format", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parts, err := validator.validateKeyFormat(tt.key, tt.expectedNamespace) + + if tt.wantError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMsg) + assert.Nil(t, parts) + } else { + require.NoError(t, err) + assert.Equal(t, tt.expectedParts, parts) + } + }) + } +} + +func TestBaseValidator_validateValue(t *testing.T) { + validator := &BaseValidator{} + + tests := []struct { + name string + value []byte + wantError bool + errorMsg string + }{ + { + name: "empty value", + value: []byte{}, + wantError: false, + }, + { + name: "valid CID value", + value: []byte("bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"), + wantError: false, + }, + { + name: "invalid CID value", + value: []byte("invalid-cid-format"), + wantError: true, + errorMsg: "invalid CID in value", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validator.validateValue(tt.value) + + if tt.wantError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMsg) + } else { + require.NoError(t, err) + } + }) + } +} + +// Benchmark tests to ensure validators perform well. +func BenchmarkSkillValidator_Validate(b *testing.B) { + validator := &SkillValidator{} + key := "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1" + value := []byte{} + + b.ResetTimer() + + for range b.N { + _ = validator.Validate(key, value) + } +} + +func BenchmarkDomainValidator_Validate(b *testing.B) { + validator := &DomainValidator{} + key := "/domains/ai/machine-learning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer2" + value := []byte{} + + b.ResetTimer() + + for range b.N { + _ = validator.Validate(key, value) + } +} + +func BenchmarkModuleValidator_Validate(b *testing.B) { + validator := &ModuleValidator{} + key := "/modules/llm/reasoning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer3" + value := []byte{} + + b.ResetTimer() + + for range b.N { + _ = validator.Validate(key, value) + } +} + +func BenchmarkLocatorValidator_Validate(b *testing.B) { + validator := &LocatorValidator{} + key := "/locators/docker-image/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1" + value := []byte{} + + b.ResetTimer() + + for range b.N { + _ = validator.Validate(key, value) + } +} + +func TestExtractCIDFromLabelKey(t *testing.T) { + tests := []struct { + name string + labelKey string + wantCID string + wantError bool + errorMsg string + }{ + { + name: "valid skills key", + labelKey: "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + wantCID: "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + wantError: false, + }, + { + name: "valid domains key", + labelKey: "/domains/ai/machine-learning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer2", + wantCID: "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + wantError: false, + }, + { + name: "valid modules key", + labelKey: "/modules/llm/reasoning/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer3", + wantCID: "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + wantError: false, + }, + { + name: "invalid format - too few parts", + labelKey: "/skills/programming", + wantError: true, + errorMsg: "invalid enhanced key format", + }, + { + name: "invalid namespace", + labelKey: "/unknown/test/value/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1", + wantError: true, + errorMsg: "invalid namespace", + }, + { + name: "invalid CID format", + labelKey: "/skills/programming/golang/invalid-cid/Peer1", + wantError: true, + errorMsg: "invalid CID format", + }, + { + name: "missing CID", + labelKey: "/skills/programming/golang//Peer1", + wantError: true, + errorMsg: "missing CID", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cid, err := ExtractCIDFromLabelKey(tt.labelKey) + + if tt.wantError { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errorMsg) + assert.Empty(t, cid) + } else { + require.NoError(t, err) + assert.Equal(t, tt.wantCID, cid) + } + }) + } +} + +func BenchmarkFormatLabelKey(b *testing.B) { + label := "/skills/programming/golang" + cid := "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku" + + b.ResetTimer() + + for range b.N { + _ = FormatLabelKey(label, cid) + } +} + +func BenchmarkExtractCIDFromLabelKey(b *testing.B) { + labelKey := "/skills/programming/golang/bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/Peer1" + + b.ResetTimer() + + for range b.N { + _, _ = ExtractCIDFromLabelKey(labelKey) + } +} diff --git a/server/server.go b/server/server.go index 9f897d43a..4a6326544 100644 --- a/server/server.go +++ b/server/server.go @@ -1,439 +1,439 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package server - -import ( - "context" - "fmt" - "net" - "os" - "os/signal" - "syscall" - "time" - - corev1 "github.com/agntcy/dir/api/core/v1" - eventsv1 "github.com/agntcy/dir/api/events/v1" - routingv1 "github.com/agntcy/dir/api/routing/v1" - searchv1 "github.com/agntcy/dir/api/search/v1" - signv1 "github.com/agntcy/dir/api/sign/v1" - storev1 "github.com/agntcy/dir/api/store/v1" - "github.com/agntcy/dir/api/version" - "github.com/agntcy/dir/server/authn" - "github.com/agntcy/dir/server/authz" - "github.com/agntcy/dir/server/config" - "github.com/agntcy/dir/server/controller" - "github.com/agntcy/dir/server/database" - "github.com/agntcy/dir/server/events" - "github.com/agntcy/dir/server/healthcheck" - "github.com/agntcy/dir/server/metrics" - grpclogging "github.com/agntcy/dir/server/middleware/logging" - grpcratelimit "github.com/agntcy/dir/server/middleware/ratelimit" - grpcrecovery "github.com/agntcy/dir/server/middleware/recovery" - "github.com/agntcy/dir/server/publication" - "github.com/agntcy/dir/server/routing" - "github.com/agntcy/dir/server/store" - "github.com/agntcy/dir/server/sync" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/utils/logging" - "google.golang.org/grpc" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/reflection" -) - -const ( - // bytesToMB is the conversion factor from bytes to megabytes. - bytesToMB = 1024 * 1024 -) - -var ( - _ types.API = &Server{} - logger = logging.Logger("server") -) - -type Server struct { - options types.APIOptions - store types.StoreAPI - routing types.RoutingAPI - database types.DatabaseAPI - eventService *events.Service - syncService *sync.Service - authnService *authn.Service - authzService *authz.Service - publicationService *publication.Service - health *healthcheck.Checker - grpcServer *grpc.Server - metricsServer *metrics.Server -} - -// buildConnectionOptions creates gRPC server options for connection management. -// These options configure connection limits, keepalive parameters, and message size limits -// to prevent resource exhaustion and detect dead connections. -// -// Connection management is applied BEFORE all interceptors to ensure limits are enforced -// at the lowest level, protecting all other server components. -func buildConnectionOptions(cfg config.ConnectionConfig) []grpc.ServerOption { - opts := []grpc.ServerOption{ - // Connection limits - prevent resource monopolization - grpc.MaxConcurrentStreams(cfg.MaxConcurrentStreams), - grpc.MaxRecvMsgSize(cfg.MaxRecvMsgSize), - grpc.MaxSendMsgSize(cfg.MaxSendMsgSize), - grpc.ConnectionTimeout(cfg.ConnectionTimeout), - - // Keepalive parameters - detect dead connections and rotate aged connections - grpc.KeepaliveParams(keepalive.ServerParameters{ - MaxConnectionIdle: cfg.Keepalive.MaxConnectionIdle, - MaxConnectionAge: cfg.Keepalive.MaxConnectionAge, - MaxConnectionAgeGrace: cfg.Keepalive.MaxConnectionAgeGrace, - Time: cfg.Keepalive.Time, - Timeout: cfg.Keepalive.Timeout, - }), - - // Keepalive enforcement policy - prevent client abuse - grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - MinTime: cfg.Keepalive.MinTime, - PermitWithoutStream: cfg.Keepalive.PermitWithoutStream, - }), - } - - logger.Info("Connection management configured", - "max_concurrent_streams", cfg.MaxConcurrentStreams, - "max_recv_msg_size_mb", cfg.MaxRecvMsgSize/bytesToMB, - "max_send_msg_size_mb", cfg.MaxSendMsgSize/bytesToMB, - "connection_timeout", cfg.ConnectionTimeout, - "max_connection_idle", cfg.Keepalive.MaxConnectionIdle, - "max_connection_age", cfg.Keepalive.MaxConnectionAge, - "keepalive_time", cfg.Keepalive.Time, - "keepalive_timeout", cfg.Keepalive.Timeout, - ) - - return opts -} - -func Run(ctx context.Context, cfg *config.Config) error { - errCh := make(chan error) - - server, err := New(ctx, cfg) - if err != nil { - return fmt.Errorf("failed to create server: %w", err) - } - - // Start server - if err := server.start(ctx); err != nil { - return fmt.Errorf("failed to start server: %w", err) - } - defer server.Close(ctx) - - // Wait for deactivation - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - - select { - case <-ctx.Done(): - return fmt.Errorf("stopping server due to context cancellation: %w", ctx.Err()) - case sig := <-sigCh: - return fmt.Errorf("stopping server due to signal: %v", sig) - case err := <-errCh: - return fmt.Errorf("stopping server due to error: %w", err) - } -} - -func New(ctx context.Context, cfg *config.Config) (*Server, error) { - logger.Debug("Creating server with config", "config", cfg, "version", version.String()) - - // Configure OASF validation based on server configuration - corev1.SetSchemaURL(cfg.OASFAPIValidation.SchemaURL) - corev1.SetDisableAPIValidation(cfg.OASFAPIValidation.Disable) - corev1.SetStrictValidation(cfg.OASFAPIValidation.StrictMode) - - logger.Info("OASF validator configured", - "schema_url", cfg.OASFAPIValidation.SchemaURL, - "disable_api_validation", cfg.OASFAPIValidation.Disable, - "strict_validation", cfg.OASFAPIValidation.StrictMode) - - // Load options - options := types.NewOptions(cfg) - serverOpts := []grpc.ServerOption{} - - // Add connection management options FIRST (lowest level - applies to all connections) - // This must be before interceptors to ensure connection limits protect all server components - connConfig := cfg.Connection.WithDefaults() - connectionOpts := buildConnectionOptions(connConfig) - serverOpts = append(serverOpts, connectionOpts...) - - // Add panic recovery interceptors (after connection management, before other interceptors) - // This prevents server crashes from panics in handlers or other interceptors - serverOpts = append(serverOpts, grpcrecovery.ServerOptions()...) - - // Add rate limiting interceptors (after recovery, before logging and auth) - // This protects authentication and other downstream processes from DDoS attacks - if cfg.RateLimit.Enabled { - rateLimitOpts, err := grpcratelimit.ServerOptions(&cfg.RateLimit) - if err != nil { - return nil, fmt.Errorf("failed to create rate limit interceptors: %w", err) - } - - serverOpts = append(serverOpts, rateLimitOpts...) - - logger.Info("Rate limiting enabled", - "global_rps", cfg.RateLimit.GlobalRPS, - "per_client_rps", cfg.RateLimit.PerClientRPS, - ) - } - - // Initialize metrics server (if enabled) - var metricsServer *metrics.Server - - if cfg.Metrics.Enabled { - metricsServer = metrics.New(cfg.Metrics.Address) - - // Add gRPC metrics interceptors (after recovery/rate limit, before logging) - // Metrics should capture all requests, independent of logging configuration - metricsOpts := metrics.ServerOptions() - serverOpts = append(serverOpts, metricsOpts...) - - logger.Info("Metrics enabled", "address", cfg.Metrics.Address) - } - - // Add gRPC logging interceptors (after metrics, before auth/authz) - grpcLogger := logging.Logger("grpc") - loggingOpts := grpclogging.ServerOptions(grpcLogger, cfg.Logging.Verbose) - serverOpts = append(serverOpts, loggingOpts...) - - // Create event service first (so other services can emit events) - eventService := events.New() - safeEventBus := events.NewSafeEventBus(eventService.Bus()) - - // Add event bus to options for other services - options = options.WithEventBus(safeEventBus) - - // Create APIs - storeAPI, err := store.New(options) //nolint:staticcheck - if err != nil { - return nil, fmt.Errorf("failed to create store: %w", err) - } - - routingAPI, err := routing.New(ctx, storeAPI, options) - if err != nil { - return nil, fmt.Errorf("failed to create routing: %w", err) - } - - databaseAPI, err := database.New(options) - if err != nil { - return nil, fmt.Errorf("failed to create database API: %w", err) - } - - // Create services - syncService, err := sync.New(databaseAPI, storeAPI, options) - if err != nil { - return nil, fmt.Errorf("failed to create sync service: %w", err) - } - - // Create JWT authentication service if enabled - var authnService *authn.Service - if cfg.Authn.Enabled { - authnService, err = authn.New(ctx, cfg.Authn) - if err != nil { - return nil, fmt.Errorf("failed to create authn service: %w", err) - } - - //nolint:contextcheck - serverOpts = append(serverOpts, authnService.GetServerOptions()...) - } - - var authzService *authz.Service - if cfg.Authz.Enabled { - authzService, err = authz.New(ctx, cfg.Authz) - if err != nil { - return nil, fmt.Errorf("failed to create authz service: %w", err) - } - - //nolint:contextcheck - serverOpts = append(serverOpts, authzService.GetServerOptions()...) - } - - // Create publication service - publicationService, err := publication.New(databaseAPI, storeAPI, routingAPI, options) - if err != nil { - return nil, fmt.Errorf("failed to create publication service: %w", err) - } - - // Create a server - grpcServer := grpc.NewServer(serverOpts...) - - // Create health checker - healthChecker := healthcheck.New() - - // Register APIs - eventsv1.RegisterEventServiceServer(grpcServer, controller.NewEventsController(eventService)) - storev1.RegisterStoreServiceServer(grpcServer, controller.NewStoreController(storeAPI, databaseAPI, options.EventBus())) - routingv1.RegisterRoutingServiceServer(grpcServer, controller.NewRoutingController(routingAPI, storeAPI, publicationService)) - routingv1.RegisterPublicationServiceServer(grpcServer, controller.NewPublicationController(databaseAPI, options)) - searchv1.RegisterSearchServiceServer(grpcServer, controller.NewSearchController(databaseAPI, storeAPI)) - storev1.RegisterSyncServiceServer(grpcServer, controller.NewSyncController(databaseAPI, options)) - signv1.RegisterSignServiceServer(grpcServer, controller.NewSignController(storeAPI)) - - // Register health service - healthChecker.Register(grpcServer) - - // Register reflection service - reflection.Register(grpcServer) - - // Initialize metrics after service registration - if metricsServer != nil { - metrics.InitializeMetrics(grpcServer, metricsServer) - - logger.Info("gRPC metrics registered") - } - - return &Server{ - options: options, - store: storeAPI, - routing: routingAPI, - database: databaseAPI, - eventService: eventService, - syncService: syncService, - authnService: authnService, - authzService: authzService, - publicationService: publicationService, - health: healthChecker, - grpcServer: grpcServer, - metricsServer: metricsServer, - }, nil -} - -func (s Server) Options() types.APIOptions { return s.options } - -func (s Server) Store() types.StoreAPI { return s.store } - -func (s Server) Routing() types.RoutingAPI { return s.routing } - -func (s Server) Database() types.DatabaseAPI { return s.database } - -// Close gracefully shuts down all server components. -// Complexity is acceptable for cleanup functions with independent service shutdowns. -// -//nolint:cyclop // Cleanup function requires checking each service independently -func (s Server) Close(ctx context.Context) { - // Stop health check monitoring - if s.health != nil { - stopCtx, cancel := context.WithTimeout(ctx, 5*time.Second) //nolint:mnd - defer cancel() - - if err := s.health.Stop(stopCtx); err != nil { - logger.Error("Failed to stop health check service", "error", err) - } - } - - // Stop event service - if s.eventService != nil { - if err := s.eventService.Stop(); err != nil { - logger.Error("Failed to stop event service", "error", err) - } - } - - // Stop metrics server - if s.metricsServer != nil { - stopCtx, cancel := context.WithTimeout(ctx, 10*time.Second) //nolint:mnd - defer cancel() - - if err := s.metricsServer.Stop(stopCtx); err != nil { - logger.Error("Failed to stop metrics server", "error", err) - } - } - - // Stop routing service (closes GossipSub, p2p server, DHT) - if s.routing != nil { - if err := s.routing.Stop(); err != nil { - logger.Error("Failed to stop routing service", "error", err) - } - } - - // Stop sync service if running - if s.syncService != nil { - if err := s.syncService.Stop(); err != nil { - logger.Error("Failed to stop sync service", "error", err) - } - } - - // Stop authn service if running - if s.authnService != nil { - if err := s.authnService.Stop(); err != nil { - logger.Error("Failed to stop authn service", "error", err) - } - } - - // Stop authz service if running - if s.authzService != nil { - if err := s.authzService.Stop(); err != nil { - logger.Error("Failed to stop authz service", "error", err) - } - } - - // Stop publication service if running - if s.publicationService != nil { - if err := s.publicationService.Stop(); err != nil { - logger.Error("Failed to stop publication service", "error", err) - } - } - - s.grpcServer.GracefulStop() -} - -func (s Server) start(ctx context.Context) error { - // Start metrics server - if s.metricsServer != nil { - if err := s.metricsServer.Start(); err != nil { - return fmt.Errorf("failed to start metrics server: %w", err) - } - - logger.Info("Metrics server started") - } - - // Start sync service - if s.syncService != nil { - if err := s.syncService.Start(ctx); err != nil { - return fmt.Errorf("failed to start sync service: %w", err) - } - - logger.Info("Sync service started") - } - - // Start publication service - if s.publicationService != nil { - if err := s.publicationService.Start(ctx); err != nil { - return fmt.Errorf("failed to start publication service: %w", err) - } - - logger.Info("Publication service started") - } - - // Create a listener on TCP port - listen, err := net.Listen("tcp", s.Options().Config().ListenAddress) //nolint:noctx - if err != nil { - return fmt.Errorf("failed to listen on %s: %w", s.Options().Config().ListenAddress, err) - } - - // Add readiness checks - s.health.AddReadinessCheck("database", s.database.IsReady) - s.health.AddReadinessCheck("sync", s.syncService.IsReady) - s.health.AddReadinessCheck("publication", s.publicationService.IsReady) - s.health.AddReadinessCheck("store", s.store.IsReady) - s.health.AddReadinessCheck("routing", s.routing.IsReady) - - // Start health check monitoring - if err := s.health.Start(ctx); err != nil { - return fmt.Errorf("failed to start health check monitoring: %w", err) - } - - // Serve gRPC server in the background - go func() { - logger.Info("Server starting", "address", s.Options().Config().ListenAddress) - - if err := s.grpcServer.Serve(listen); err != nil { - logger.Error("Failed to start server", "error", err) - } - }() - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package server + +import ( + "context" + "fmt" + "net" + "os" + "os/signal" + "syscall" + "time" + + corev1 "github.com/agntcy/dir/api/core/v1" + eventsv1 "github.com/agntcy/dir/api/events/v1" + routingv1 "github.com/agntcy/dir/api/routing/v1" + searchv1 "github.com/agntcy/dir/api/search/v1" + signv1 "github.com/agntcy/dir/api/sign/v1" + storev1 "github.com/agntcy/dir/api/store/v1" + "github.com/agntcy/dir/api/version" + "github.com/agntcy/dir/server/authn" + "github.com/agntcy/dir/server/authz" + "github.com/agntcy/dir/server/config" + "github.com/agntcy/dir/server/controller" + "github.com/agntcy/dir/server/database" + "github.com/agntcy/dir/server/events" + "github.com/agntcy/dir/server/healthcheck" + "github.com/agntcy/dir/server/metrics" + grpclogging "github.com/agntcy/dir/server/middleware/logging" + grpcratelimit "github.com/agntcy/dir/server/middleware/ratelimit" + grpcrecovery "github.com/agntcy/dir/server/middleware/recovery" + "github.com/agntcy/dir/server/publication" + "github.com/agntcy/dir/server/routing" + "github.com/agntcy/dir/server/store" + "github.com/agntcy/dir/server/sync" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/utils/logging" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/reflection" +) + +const ( + // bytesToMB is the conversion factor from bytes to megabytes. + bytesToMB = 1024 * 1024 +) + +var ( + _ types.API = &Server{} + logger = logging.Logger("server") +) + +type Server struct { + options types.APIOptions + store types.StoreAPI + routing types.RoutingAPI + database types.DatabaseAPI + eventService *events.Service + syncService *sync.Service + authnService *authn.Service + authzService *authz.Service + publicationService *publication.Service + health *healthcheck.Checker + grpcServer *grpc.Server + metricsServer *metrics.Server +} + +// buildConnectionOptions creates gRPC server options for connection management. +// These options configure connection limits, keepalive parameters, and message size limits +// to prevent resource exhaustion and detect dead connections. +// +// Connection management is applied BEFORE all interceptors to ensure limits are enforced +// at the lowest level, protecting all other server components. +func buildConnectionOptions(cfg config.ConnectionConfig) []grpc.ServerOption { + opts := []grpc.ServerOption{ + // Connection limits - prevent resource monopolization + grpc.MaxConcurrentStreams(cfg.MaxConcurrentStreams), + grpc.MaxRecvMsgSize(cfg.MaxRecvMsgSize), + grpc.MaxSendMsgSize(cfg.MaxSendMsgSize), + grpc.ConnectionTimeout(cfg.ConnectionTimeout), + + // Keepalive parameters - detect dead connections and rotate aged connections + grpc.KeepaliveParams(keepalive.ServerParameters{ + MaxConnectionIdle: cfg.Keepalive.MaxConnectionIdle, + MaxConnectionAge: cfg.Keepalive.MaxConnectionAge, + MaxConnectionAgeGrace: cfg.Keepalive.MaxConnectionAgeGrace, + Time: cfg.Keepalive.Time, + Timeout: cfg.Keepalive.Timeout, + }), + + // Keepalive enforcement policy - prevent client abuse + grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: cfg.Keepalive.MinTime, + PermitWithoutStream: cfg.Keepalive.PermitWithoutStream, + }), + } + + logger.Info("Connection management configured", + "max_concurrent_streams", cfg.MaxConcurrentStreams, + "max_recv_msg_size_mb", cfg.MaxRecvMsgSize/bytesToMB, + "max_send_msg_size_mb", cfg.MaxSendMsgSize/bytesToMB, + "connection_timeout", cfg.ConnectionTimeout, + "max_connection_idle", cfg.Keepalive.MaxConnectionIdle, + "max_connection_age", cfg.Keepalive.MaxConnectionAge, + "keepalive_time", cfg.Keepalive.Time, + "keepalive_timeout", cfg.Keepalive.Timeout, + ) + + return opts +} + +func Run(ctx context.Context, cfg *config.Config) error { + errCh := make(chan error) + + server, err := New(ctx, cfg) + if err != nil { + return fmt.Errorf("failed to create server: %w", err) + } + + // Start server + if err := server.start(ctx); err != nil { + return fmt.Errorf("failed to start server: %w", err) + } + defer server.Close(ctx) + + // Wait for deactivation + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + + select { + case <-ctx.Done(): + return fmt.Errorf("stopping server due to context cancellation: %w", ctx.Err()) + case sig := <-sigCh: + return fmt.Errorf("stopping server due to signal: %v", sig) + case err := <-errCh: + return fmt.Errorf("stopping server due to error: %w", err) + } +} + +func New(ctx context.Context, cfg *config.Config) (*Server, error) { + logger.Debug("Creating server with config", "config", cfg, "version", version.String()) + + // Configure OASF validation based on server configuration + corev1.SetSchemaURL(cfg.OASFAPIValidation.SchemaURL) + corev1.SetDisableAPIValidation(cfg.OASFAPIValidation.Disable) + corev1.SetStrictValidation(cfg.OASFAPIValidation.StrictMode) + + logger.Info("OASF validator configured", + "schema_url", cfg.OASFAPIValidation.SchemaURL, + "disable_api_validation", cfg.OASFAPIValidation.Disable, + "strict_validation", cfg.OASFAPIValidation.StrictMode) + + // Load options + options := types.NewOptions(cfg) + serverOpts := []grpc.ServerOption{} + + // Add connection management options FIRST (lowest level - applies to all connections) + // This must be before interceptors to ensure connection limits protect all server components + connConfig := cfg.Connection.WithDefaults() + connectionOpts := buildConnectionOptions(connConfig) + serverOpts = append(serverOpts, connectionOpts...) + + // Add panic recovery interceptors (after connection management, before other interceptors) + // This prevents server crashes from panics in handlers or other interceptors + serverOpts = append(serverOpts, grpcrecovery.ServerOptions()...) + + // Add rate limiting interceptors (after recovery, before logging and auth) + // This protects authentication and other downstream processes from DDoS attacks + if cfg.RateLimit.Enabled { + rateLimitOpts, err := grpcratelimit.ServerOptions(&cfg.RateLimit) + if err != nil { + return nil, fmt.Errorf("failed to create rate limit interceptors: %w", err) + } + + serverOpts = append(serverOpts, rateLimitOpts...) + + logger.Info("Rate limiting enabled", + "global_rps", cfg.RateLimit.GlobalRPS, + "per_client_rps", cfg.RateLimit.PerClientRPS, + ) + } + + // Initialize metrics server (if enabled) + var metricsServer *metrics.Server + + if cfg.Metrics.Enabled { + metricsServer = metrics.New(cfg.Metrics.Address) + + // Add gRPC metrics interceptors (after recovery/rate limit, before logging) + // Metrics should capture all requests, independent of logging configuration + metricsOpts := metrics.ServerOptions() + serverOpts = append(serverOpts, metricsOpts...) + + logger.Info("Metrics enabled", "address", cfg.Metrics.Address) + } + + // Add gRPC logging interceptors (after metrics, before auth/authz) + grpcLogger := logging.Logger("grpc") + loggingOpts := grpclogging.ServerOptions(grpcLogger, cfg.Logging.Verbose) + serverOpts = append(serverOpts, loggingOpts...) + + // Create event service first (so other services can emit events) + eventService := events.New() + safeEventBus := events.NewSafeEventBus(eventService.Bus()) + + // Add event bus to options for other services + options = options.WithEventBus(safeEventBus) + + // Create APIs + storeAPI, err := store.New(options) //nolint:staticcheck + if err != nil { + return nil, fmt.Errorf("failed to create store: %w", err) + } + + routingAPI, err := routing.New(ctx, storeAPI, options) + if err != nil { + return nil, fmt.Errorf("failed to create routing: %w", err) + } + + databaseAPI, err := database.New(options) + if err != nil { + return nil, fmt.Errorf("failed to create database API: %w", err) + } + + // Create services + syncService, err := sync.New(databaseAPI, storeAPI, options) + if err != nil { + return nil, fmt.Errorf("failed to create sync service: %w", err) + } + + // Create JWT authentication service if enabled + var authnService *authn.Service + if cfg.Authn.Enabled { + authnService, err = authn.New(ctx, cfg.Authn) + if err != nil { + return nil, fmt.Errorf("failed to create authn service: %w", err) + } + + //nolint:contextcheck + serverOpts = append(serverOpts, authnService.GetServerOptions()...) + } + + var authzService *authz.Service + if cfg.Authz.Enabled { + authzService, err = authz.New(ctx, cfg.Authz) + if err != nil { + return nil, fmt.Errorf("failed to create authz service: %w", err) + } + + //nolint:contextcheck + serverOpts = append(serverOpts, authzService.GetServerOptions()...) + } + + // Create publication service + publicationService, err := publication.New(databaseAPI, storeAPI, routingAPI, options) + if err != nil { + return nil, fmt.Errorf("failed to create publication service: %w", err) + } + + // Create a server + grpcServer := grpc.NewServer(serverOpts...) + + // Create health checker + healthChecker := healthcheck.New() + + // Register APIs + eventsv1.RegisterEventServiceServer(grpcServer, controller.NewEventsController(eventService)) + storev1.RegisterStoreServiceServer(grpcServer, controller.NewStoreController(storeAPI, databaseAPI, options.EventBus())) + routingv1.RegisterRoutingServiceServer(grpcServer, controller.NewRoutingController(routingAPI, storeAPI, publicationService)) + routingv1.RegisterPublicationServiceServer(grpcServer, controller.NewPublicationController(databaseAPI, options)) + searchv1.RegisterSearchServiceServer(grpcServer, controller.NewSearchController(databaseAPI, storeAPI)) + storev1.RegisterSyncServiceServer(grpcServer, controller.NewSyncController(databaseAPI, options)) + signv1.RegisterSignServiceServer(grpcServer, controller.NewSignController(storeAPI)) + + // Register health service + healthChecker.Register(grpcServer) + + // Register reflection service + reflection.Register(grpcServer) + + // Initialize metrics after service registration + if metricsServer != nil { + metrics.InitializeMetrics(grpcServer, metricsServer) + + logger.Info("gRPC metrics registered") + } + + return &Server{ + options: options, + store: storeAPI, + routing: routingAPI, + database: databaseAPI, + eventService: eventService, + syncService: syncService, + authnService: authnService, + authzService: authzService, + publicationService: publicationService, + health: healthChecker, + grpcServer: grpcServer, + metricsServer: metricsServer, + }, nil +} + +func (s Server) Options() types.APIOptions { return s.options } + +func (s Server) Store() types.StoreAPI { return s.store } + +func (s Server) Routing() types.RoutingAPI { return s.routing } + +func (s Server) Database() types.DatabaseAPI { return s.database } + +// Close gracefully shuts down all server components. +// Complexity is acceptable for cleanup functions with independent service shutdowns. +// +//nolint:cyclop // Cleanup function requires checking each service independently +func (s Server) Close(ctx context.Context) { + // Stop health check monitoring + if s.health != nil { + stopCtx, cancel := context.WithTimeout(ctx, 5*time.Second) //nolint:mnd + defer cancel() + + if err := s.health.Stop(stopCtx); err != nil { + logger.Error("Failed to stop health check service", "error", err) + } + } + + // Stop event service + if s.eventService != nil { + if err := s.eventService.Stop(); err != nil { + logger.Error("Failed to stop event service", "error", err) + } + } + + // Stop metrics server + if s.metricsServer != nil { + stopCtx, cancel := context.WithTimeout(ctx, 10*time.Second) //nolint:mnd + defer cancel() + + if err := s.metricsServer.Stop(stopCtx); err != nil { + logger.Error("Failed to stop metrics server", "error", err) + } + } + + // Stop routing service (closes GossipSub, p2p server, DHT) + if s.routing != nil { + if err := s.routing.Stop(); err != nil { + logger.Error("Failed to stop routing service", "error", err) + } + } + + // Stop sync service if running + if s.syncService != nil { + if err := s.syncService.Stop(); err != nil { + logger.Error("Failed to stop sync service", "error", err) + } + } + + // Stop authn service if running + if s.authnService != nil { + if err := s.authnService.Stop(); err != nil { + logger.Error("Failed to stop authn service", "error", err) + } + } + + // Stop authz service if running + if s.authzService != nil { + if err := s.authzService.Stop(); err != nil { + logger.Error("Failed to stop authz service", "error", err) + } + } + + // Stop publication service if running + if s.publicationService != nil { + if err := s.publicationService.Stop(); err != nil { + logger.Error("Failed to stop publication service", "error", err) + } + } + + s.grpcServer.GracefulStop() +} + +func (s Server) start(ctx context.Context) error { + // Start metrics server + if s.metricsServer != nil { + if err := s.metricsServer.Start(); err != nil { + return fmt.Errorf("failed to start metrics server: %w", err) + } + + logger.Info("Metrics server started") + } + + // Start sync service + if s.syncService != nil { + if err := s.syncService.Start(ctx); err != nil { + return fmt.Errorf("failed to start sync service: %w", err) + } + + logger.Info("Sync service started") + } + + // Start publication service + if s.publicationService != nil { + if err := s.publicationService.Start(ctx); err != nil { + return fmt.Errorf("failed to start publication service: %w", err) + } + + logger.Info("Publication service started") + } + + // Create a listener on TCP port + listen, err := net.Listen("tcp", s.Options().Config().ListenAddress) //nolint:noctx + if err != nil { + return fmt.Errorf("failed to listen on %s: %w", s.Options().Config().ListenAddress, err) + } + + // Add readiness checks + s.health.AddReadinessCheck("database", s.database.IsReady) + s.health.AddReadinessCheck("sync", s.syncService.IsReady) + s.health.AddReadinessCheck("publication", s.publicationService.IsReady) + s.health.AddReadinessCheck("store", s.store.IsReady) + s.health.AddReadinessCheck("routing", s.routing.IsReady) + + // Start health check monitoring + if err := s.health.Start(ctx); err != nil { + return fmt.Errorf("failed to start health check monitoring: %w", err) + } + + // Serve gRPC server in the background + go func() { + logger.Info("Server starting", "address", s.Options().Config().ListenAddress) + + if err := s.grpcServer.Serve(listen); err != nil { + logger.Error("Failed to start server", "error", err) + } + }() + + return nil +} diff --git a/server/server_test.go b/server/server_test.go index 9599ff626..a75a58d45 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -1,339 +1,339 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package server - -import ( - "testing" - "time" - - "github.com/agntcy/dir/server/config" - "github.com/stretchr/testify/assert" - "google.golang.org/grpc" - "google.golang.org/grpc/keepalive" -) - -// TestBuildConnectionOptions verifies that buildConnectionOptions creates -// the correct gRPC server options from connection configuration. -func TestBuildConnectionOptions(t *testing.T) { - tests := []struct { - name string - config config.ConnectionConfig - validate func(t *testing.T, opts []grpc.ServerOption) - }{ - { - name: "default configuration", - config: config.DefaultConnectionConfig(), - validate: func(t *testing.T, opts []grpc.ServerOption) { - t.Helper() - // Verify we get the expected number of options - // 4 basic options + 2 keepalive options = 6 total - assert.Len(t, opts, 6, "Should create 6 server options") - }, - }, - { - name: "custom configuration", - config: config.ConnectionConfig{ - MaxConcurrentStreams: 500, - MaxRecvMsgSize: 2 * 1024 * 1024, - MaxSendMsgSize: 2 * 1024 * 1024, - ConnectionTimeout: 30 * time.Second, - Keepalive: config.KeepaliveConfig{ - MaxConnectionIdle: 5 * time.Minute, - MaxConnectionAge: 10 * time.Minute, - MaxConnectionAgeGrace: 2 * time.Minute, - Time: 2 * time.Minute, - Timeout: 20 * time.Second, - MinTime: 20 * time.Second, - PermitWithoutStream: true, - }, - }, - validate: func(t *testing.T, opts []grpc.ServerOption) { - t.Helper() - // Verify we get the expected number of options - assert.Len(t, opts, 6, "Should create 6 server options") - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - opts := buildConnectionOptions(tt.config) - assert.NotNil(t, opts) - tt.validate(t, opts) - }) - } -} - -// TestBuildConnectionOptions_AllOptionsPresent verifies that all required -// connection management options are included. -func TestBuildConnectionOptions_AllOptionsPresent(t *testing.T) { - cfg := config.DefaultConnectionConfig() - opts := buildConnectionOptions(cfg) - - // We should have exactly 6 options: - // 1. MaxConcurrentStreams - // 2. MaxRecvMsgSize - // 3. MaxSendMsgSize - // 4. ConnectionTimeout - // 5. KeepaliveParams - // 6. KeepaliveEnforcementPolicy - assert.Len(t, opts, 6, "Should have 6 connection management options") - - // Verify options are not nil - for i, opt := range opts { - assert.NotNil(t, opt, "Option %d should not be nil", i) - } -} - -// TestBuildConnectionOptions_KeepaliveParameters verifies that keepalive -// parameters are correctly configured. -func TestBuildConnectionOptions_KeepaliveParameters(t *testing.T) { - // Create a config with known keepalive values - cfg := config.ConnectionConfig{ - MaxConcurrentStreams: 1000, - MaxRecvMsgSize: 4 * 1024 * 1024, - MaxSendMsgSize: 4 * 1024 * 1024, - ConnectionTimeout: 120 * time.Second, - Keepalive: config.KeepaliveConfig{ - MaxConnectionIdle: 15 * time.Minute, - MaxConnectionAge: 30 * time.Minute, - MaxConnectionAgeGrace: 5 * time.Minute, - Time: 5 * time.Minute, - Timeout: 1 * time.Minute, - MinTime: 1 * time.Minute, - PermitWithoutStream: true, - }, - } - - opts := buildConnectionOptions(cfg) - - // Verify we have the keepalive options - // We can't directly inspect the options, but we can verify they're created - assert.NotEmpty(t, opts, "Should have created server options") -} - -// TestBuildConnectionOptions_MessageSizeLimits verifies that message size -// limits are correctly configured. -func TestBuildConnectionOptions_MessageSizeLimits(t *testing.T) { - tests := []struct { - name string - maxRecvMsgSize int - maxSendMsgSize int - }{ - { - name: "4MB limits (default)", - maxRecvMsgSize: 4 * 1024 * 1024, - maxSendMsgSize: 4 * 1024 * 1024, - }, - { - name: "8MB limits", - maxRecvMsgSize: 8 * 1024 * 1024, - maxSendMsgSize: 8 * 1024 * 1024, - }, - { - name: "16MB limits", - maxRecvMsgSize: 16 * 1024 * 1024, - maxSendMsgSize: 16 * 1024 * 1024, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := config.ConnectionConfig{ - MaxConcurrentStreams: 1000, - MaxRecvMsgSize: tt.maxRecvMsgSize, - MaxSendMsgSize: tt.maxSendMsgSize, - ConnectionTimeout: 120 * time.Second, - Keepalive: config.KeepaliveConfig{}, - } - - opts := buildConnectionOptions(cfg) - assert.NotEmpty(t, opts, "Should create server options") - }) - } -} - -// TestBuildConnectionOptions_StreamLimits verifies that concurrent stream -// limits are correctly configured. -func TestBuildConnectionOptions_StreamLimits(t *testing.T) { - tests := []struct { - name string - maxConcurrentStreams uint32 - }{ - { - name: "100 streams", - maxConcurrentStreams: 100, - }, - { - name: "1000 streams (default)", - maxConcurrentStreams: 1000, - }, - { - name: "5000 streams", - maxConcurrentStreams: 5000, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cfg := config.ConnectionConfig{ - MaxConcurrentStreams: tt.maxConcurrentStreams, - MaxRecvMsgSize: 4 * 1024 * 1024, - MaxSendMsgSize: 4 * 1024 * 1024, - ConnectionTimeout: 120 * time.Second, - Keepalive: config.KeepaliveConfig{}, - } - - opts := buildConnectionOptions(cfg) - assert.NotEmpty(t, opts, "Should create server options") - }) - } -} - -// TestKeepaliveServerParameters_StructCreation verifies that we can create -// keepalive.ServerParameters with our configuration values. -func TestKeepaliveServerParameters_StructCreation(t *testing.T) { - cfg := config.KeepaliveConfig{ - MaxConnectionIdle: 15 * time.Minute, - MaxConnectionAge: 30 * time.Minute, - MaxConnectionAgeGrace: 5 * time.Minute, - Time: 5 * time.Minute, - Timeout: 1 * time.Minute, - } - - // Verify we can create the keepalive.ServerParameters struct - params := keepalive.ServerParameters{ - MaxConnectionIdle: cfg.MaxConnectionIdle, - MaxConnectionAge: cfg.MaxConnectionAge, - MaxConnectionAgeGrace: cfg.MaxConnectionAgeGrace, - Time: cfg.Time, - Timeout: cfg.Timeout, - } - - assert.Equal(t, 15*time.Minute, params.MaxConnectionIdle) - assert.Equal(t, 30*time.Minute, params.MaxConnectionAge) - assert.Equal(t, 5*time.Minute, params.MaxConnectionAgeGrace) - assert.Equal(t, 5*time.Minute, params.Time) - assert.Equal(t, 1*time.Minute, params.Timeout) -} - -// TestServerInitialization_SchemaURL verifies that the server correctly -// configures the OASF schema URL during initialization. -func TestServerInitialization_SchemaURL(t *testing.T) { - tests := []struct { - name string - schemaURL string - }{ - { - name: "default schema URL", - schemaURL: config.DefaultSchemaURL, - }, - { - name: "custom schema URL", - schemaURL: "https://custom.schema.url", - }, - { - name: "empty schema URL (disable API validator)", - schemaURL: "", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create a minimal config with the schema URL - cfg := &config.Config{ - ListenAddress: config.DefaultListenAddress, - OASFAPIValidation: config.OASFAPIValidationConfig{ - SchemaURL: tt.schemaURL, - }, - Connection: config.DefaultConnectionConfig(), - } - - // We can't fully test New() because it tries to start services, - // but we can verify that a config with SchemaURL doesn't panic - // during the initial setup phase - assert.NotNil(t, cfg) - assert.Equal(t, tt.schemaURL, cfg.OASFAPIValidation.SchemaURL) - }) - } -} - -// TestServerInitialization_OASFValidation verifies that the server correctly -// configures OASF validation settings during initialization. -func TestServerInitialization_OASFValidation(t *testing.T) { - tests := []struct { - name string - schemaURL string - disableAPIValidation bool - strictValidation bool - }{ - { - name: "default configuration", - schemaURL: config.DefaultSchemaURL, - disableAPIValidation: false, - strictValidation: true, - }, - { - name: "custom schema URL", - schemaURL: "https://custom.schema.url", - disableAPIValidation: false, - strictValidation: true, - }, - { - name: "disable API validation", - schemaURL: "", - disableAPIValidation: true, - strictValidation: true, - }, - { - name: "lax validation mode", - schemaURL: config.DefaultSchemaURL, - disableAPIValidation: false, - strictValidation: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create a config with OASF validation settings - cfg := &config.Config{ - ListenAddress: config.DefaultListenAddress, - OASFAPIValidation: config.OASFAPIValidationConfig{ - SchemaURL: tt.schemaURL, - Disable: tt.disableAPIValidation, - StrictMode: tt.strictValidation, - }, - Connection: config.DefaultConnectionConfig(), - } - - // Verify config values are set correctly - assert.NotNil(t, cfg) - assert.Equal(t, tt.schemaURL, cfg.OASFAPIValidation.SchemaURL) - assert.Equal(t, tt.disableAPIValidation, cfg.OASFAPIValidation.Disable) - assert.Equal(t, tt.strictValidation, cfg.OASFAPIValidation.StrictMode) - - // Note: We can't fully test New() because it tries to start services - // that require database connections, but we can verify that the config - // values are correctly set and would be used during server initialization - }) - } -} - -// TestKeepaliveEnforcementPolicy_StructCreation verifies that we can create -// keepalive.EnforcementPolicy with our configuration values. -func TestKeepaliveEnforcementPolicy_StructCreation(t *testing.T) { - cfg := config.KeepaliveConfig{ - MinTime: 1 * time.Minute, - PermitWithoutStream: true, - } - - // Verify we can create the keepalive.EnforcementPolicy struct - policy := keepalive.EnforcementPolicy{ - MinTime: cfg.MinTime, - PermitWithoutStream: cfg.PermitWithoutStream, - } - - assert.Equal(t, 1*time.Minute, policy.MinTime) - assert.True(t, policy.PermitWithoutStream) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package server + +import ( + "testing" + "time" + + "github.com/agntcy/dir/server/config" + "github.com/stretchr/testify/assert" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" +) + +// TestBuildConnectionOptions verifies that buildConnectionOptions creates +// the correct gRPC server options from connection configuration. +func TestBuildConnectionOptions(t *testing.T) { + tests := []struct { + name string + config config.ConnectionConfig + validate func(t *testing.T, opts []grpc.ServerOption) + }{ + { + name: "default configuration", + config: config.DefaultConnectionConfig(), + validate: func(t *testing.T, opts []grpc.ServerOption) { + t.Helper() + // Verify we get the expected number of options + // 4 basic options + 2 keepalive options = 6 total + assert.Len(t, opts, 6, "Should create 6 server options") + }, + }, + { + name: "custom configuration", + config: config.ConnectionConfig{ + MaxConcurrentStreams: 500, + MaxRecvMsgSize: 2 * 1024 * 1024, + MaxSendMsgSize: 2 * 1024 * 1024, + ConnectionTimeout: 30 * time.Second, + Keepalive: config.KeepaliveConfig{ + MaxConnectionIdle: 5 * time.Minute, + MaxConnectionAge: 10 * time.Minute, + MaxConnectionAgeGrace: 2 * time.Minute, + Time: 2 * time.Minute, + Timeout: 20 * time.Second, + MinTime: 20 * time.Second, + PermitWithoutStream: true, + }, + }, + validate: func(t *testing.T, opts []grpc.ServerOption) { + t.Helper() + // Verify we get the expected number of options + assert.Len(t, opts, 6, "Should create 6 server options") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + opts := buildConnectionOptions(tt.config) + assert.NotNil(t, opts) + tt.validate(t, opts) + }) + } +} + +// TestBuildConnectionOptions_AllOptionsPresent verifies that all required +// connection management options are included. +func TestBuildConnectionOptions_AllOptionsPresent(t *testing.T) { + cfg := config.DefaultConnectionConfig() + opts := buildConnectionOptions(cfg) + + // We should have exactly 6 options: + // 1. MaxConcurrentStreams + // 2. MaxRecvMsgSize + // 3. MaxSendMsgSize + // 4. ConnectionTimeout + // 5. KeepaliveParams + // 6. KeepaliveEnforcementPolicy + assert.Len(t, opts, 6, "Should have 6 connection management options") + + // Verify options are not nil + for i, opt := range opts { + assert.NotNil(t, opt, "Option %d should not be nil", i) + } +} + +// TestBuildConnectionOptions_KeepaliveParameters verifies that keepalive +// parameters are correctly configured. +func TestBuildConnectionOptions_KeepaliveParameters(t *testing.T) { + // Create a config with known keepalive values + cfg := config.ConnectionConfig{ + MaxConcurrentStreams: 1000, + MaxRecvMsgSize: 4 * 1024 * 1024, + MaxSendMsgSize: 4 * 1024 * 1024, + ConnectionTimeout: 120 * time.Second, + Keepalive: config.KeepaliveConfig{ + MaxConnectionIdle: 15 * time.Minute, + MaxConnectionAge: 30 * time.Minute, + MaxConnectionAgeGrace: 5 * time.Minute, + Time: 5 * time.Minute, + Timeout: 1 * time.Minute, + MinTime: 1 * time.Minute, + PermitWithoutStream: true, + }, + } + + opts := buildConnectionOptions(cfg) + + // Verify we have the keepalive options + // We can't directly inspect the options, but we can verify they're created + assert.NotEmpty(t, opts, "Should have created server options") +} + +// TestBuildConnectionOptions_MessageSizeLimits verifies that message size +// limits are correctly configured. +func TestBuildConnectionOptions_MessageSizeLimits(t *testing.T) { + tests := []struct { + name string + maxRecvMsgSize int + maxSendMsgSize int + }{ + { + name: "4MB limits (default)", + maxRecvMsgSize: 4 * 1024 * 1024, + maxSendMsgSize: 4 * 1024 * 1024, + }, + { + name: "8MB limits", + maxRecvMsgSize: 8 * 1024 * 1024, + maxSendMsgSize: 8 * 1024 * 1024, + }, + { + name: "16MB limits", + maxRecvMsgSize: 16 * 1024 * 1024, + maxSendMsgSize: 16 * 1024 * 1024, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := config.ConnectionConfig{ + MaxConcurrentStreams: 1000, + MaxRecvMsgSize: tt.maxRecvMsgSize, + MaxSendMsgSize: tt.maxSendMsgSize, + ConnectionTimeout: 120 * time.Second, + Keepalive: config.KeepaliveConfig{}, + } + + opts := buildConnectionOptions(cfg) + assert.NotEmpty(t, opts, "Should create server options") + }) + } +} + +// TestBuildConnectionOptions_StreamLimits verifies that concurrent stream +// limits are correctly configured. +func TestBuildConnectionOptions_StreamLimits(t *testing.T) { + tests := []struct { + name string + maxConcurrentStreams uint32 + }{ + { + name: "100 streams", + maxConcurrentStreams: 100, + }, + { + name: "1000 streams (default)", + maxConcurrentStreams: 1000, + }, + { + name: "5000 streams", + maxConcurrentStreams: 5000, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := config.ConnectionConfig{ + MaxConcurrentStreams: tt.maxConcurrentStreams, + MaxRecvMsgSize: 4 * 1024 * 1024, + MaxSendMsgSize: 4 * 1024 * 1024, + ConnectionTimeout: 120 * time.Second, + Keepalive: config.KeepaliveConfig{}, + } + + opts := buildConnectionOptions(cfg) + assert.NotEmpty(t, opts, "Should create server options") + }) + } +} + +// TestKeepaliveServerParameters_StructCreation verifies that we can create +// keepalive.ServerParameters with our configuration values. +func TestKeepaliveServerParameters_StructCreation(t *testing.T) { + cfg := config.KeepaliveConfig{ + MaxConnectionIdle: 15 * time.Minute, + MaxConnectionAge: 30 * time.Minute, + MaxConnectionAgeGrace: 5 * time.Minute, + Time: 5 * time.Minute, + Timeout: 1 * time.Minute, + } + + // Verify we can create the keepalive.ServerParameters struct + params := keepalive.ServerParameters{ + MaxConnectionIdle: cfg.MaxConnectionIdle, + MaxConnectionAge: cfg.MaxConnectionAge, + MaxConnectionAgeGrace: cfg.MaxConnectionAgeGrace, + Time: cfg.Time, + Timeout: cfg.Timeout, + } + + assert.Equal(t, 15*time.Minute, params.MaxConnectionIdle) + assert.Equal(t, 30*time.Minute, params.MaxConnectionAge) + assert.Equal(t, 5*time.Minute, params.MaxConnectionAgeGrace) + assert.Equal(t, 5*time.Minute, params.Time) + assert.Equal(t, 1*time.Minute, params.Timeout) +} + +// TestServerInitialization_SchemaURL verifies that the server correctly +// configures the OASF schema URL during initialization. +func TestServerInitialization_SchemaURL(t *testing.T) { + tests := []struct { + name string + schemaURL string + }{ + { + name: "default schema URL", + schemaURL: config.DefaultSchemaURL, + }, + { + name: "custom schema URL", + schemaURL: "https://custom.schema.url", + }, + { + name: "empty schema URL (disable API validator)", + schemaURL: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a minimal config with the schema URL + cfg := &config.Config{ + ListenAddress: config.DefaultListenAddress, + OASFAPIValidation: config.OASFAPIValidationConfig{ + SchemaURL: tt.schemaURL, + }, + Connection: config.DefaultConnectionConfig(), + } + + // We can't fully test New() because it tries to start services, + // but we can verify that a config with SchemaURL doesn't panic + // during the initial setup phase + assert.NotNil(t, cfg) + assert.Equal(t, tt.schemaURL, cfg.OASFAPIValidation.SchemaURL) + }) + } +} + +// TestServerInitialization_OASFValidation verifies that the server correctly +// configures OASF validation settings during initialization. +func TestServerInitialization_OASFValidation(t *testing.T) { + tests := []struct { + name string + schemaURL string + disableAPIValidation bool + strictValidation bool + }{ + { + name: "default configuration", + schemaURL: config.DefaultSchemaURL, + disableAPIValidation: false, + strictValidation: true, + }, + { + name: "custom schema URL", + schemaURL: "https://custom.schema.url", + disableAPIValidation: false, + strictValidation: true, + }, + { + name: "disable API validation", + schemaURL: "", + disableAPIValidation: true, + strictValidation: true, + }, + { + name: "lax validation mode", + schemaURL: config.DefaultSchemaURL, + disableAPIValidation: false, + strictValidation: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a config with OASF validation settings + cfg := &config.Config{ + ListenAddress: config.DefaultListenAddress, + OASFAPIValidation: config.OASFAPIValidationConfig{ + SchemaURL: tt.schemaURL, + Disable: tt.disableAPIValidation, + StrictMode: tt.strictValidation, + }, + Connection: config.DefaultConnectionConfig(), + } + + // Verify config values are set correctly + assert.NotNil(t, cfg) + assert.Equal(t, tt.schemaURL, cfg.OASFAPIValidation.SchemaURL) + assert.Equal(t, tt.disableAPIValidation, cfg.OASFAPIValidation.Disable) + assert.Equal(t, tt.strictValidation, cfg.OASFAPIValidation.StrictMode) + + // Note: We can't fully test New() because it tries to start services + // that require database connections, but we can verify that the config + // values are correctly set and would be used during server initialization + }) + } +} + +// TestKeepaliveEnforcementPolicy_StructCreation verifies that we can create +// keepalive.EnforcementPolicy with our configuration values. +func TestKeepaliveEnforcementPolicy_StructCreation(t *testing.T) { + cfg := config.KeepaliveConfig{ + MinTime: 1 * time.Minute, + PermitWithoutStream: true, + } + + // Verify we can create the keepalive.EnforcementPolicy struct + policy := keepalive.EnforcementPolicy{ + MinTime: cfg.MinTime, + PermitWithoutStream: cfg.PermitWithoutStream, + } + + assert.Equal(t, 1*time.Minute, policy.MinTime) + assert.True(t, policy.PermitWithoutStream) +} diff --git a/server/store/cache/cache.go b/server/store/cache/cache.go index 083bc9eb4..9628bf5f4 100644 --- a/server/store/cache/cache.go +++ b/server/store/cache/cache.go @@ -1,215 +1,215 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:wrapcheck -package cache - -import ( - "context" - "encoding/json" - "errors" - "fmt" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/utils/logging" - "github.com/ipfs/go-datastore" - "google.golang.org/protobuf/proto" -) - -var logger = logging.Logger("store/cache") - -// cachedStore wraps a StoreAPI with caching functionality. -type cachedStore struct { - source types.StoreAPI - cache types.Datastore -} - -// Wrap creates a cached store that uses the provided datastore as a cache. -func Wrap(source types.StoreAPI, cache types.Datastore) types.StoreAPI { - return &cachedStore{ - source: source, - cache: cache, - } -} - -// Push pushes a record to the source store and caches it. -func (s *cachedStore) Push(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) { - logger.Debug("Push: forwarding to source store") - - // Push to source store first - ref, err := s.source.Push(ctx, record) - if err != nil { - return nil, err - } - - // Cache the record after successful push - if err := s.cacheRecord(ctx, record); err != nil { - logger.Debug("Failed to cache record", "cid", ref.GetCid(), "error", err) - } - - return ref, nil -} - -// Pull pulls a record from cache first, then from source store if not found. -func (s *cachedStore) Pull(ctx context.Context, ref *corev1.RecordRef) (*corev1.Record, error) { - cid := ref.GetCid() - logger.Debug("Pull: checking cache first", "cid", cid) - - // Try to get from cache first - if record, err := s.getRecordFromCache(ctx, cid); err == nil { - logger.Debug("Pull: cache hit", "cid", cid) - - return record, nil - } - - logger.Debug("Pull: cache miss, forwarding to source store", "cid", cid) - - // Not in cache, get from source store - record, err := s.source.Pull(ctx, ref) - if err != nil { - return nil, err - } - - // Cache the record for future requests - if err := s.cacheRecord(ctx, record); err != nil { - logger.Debug("Failed to cache record", "cid", cid, "error", err) - } - - return record, nil -} - -// Lookup looks up record metadata from cache first, then from source store if not found. -func (s *cachedStore) Lookup(ctx context.Context, ref *corev1.RecordRef) (*corev1.RecordMeta, error) { - cid := ref.GetCid() - logger.Debug("Lookup: checking cache first", "cid", cid) - - // Try to get metadata from cache first - if meta, err := s.getMetaFromCache(ctx, cid); err == nil { - logger.Debug("Lookup: cache hit", "cid", cid) - - return meta, nil - } - - logger.Debug("Lookup: cache miss, forwarding to source store", "cid", cid) - - // Not in cache, get from source store - meta, err := s.source.Lookup(ctx, ref) - if err != nil { - return nil, err - } - - // Cache the metadata for future requests - if err := s.cacheMeta(ctx, meta); err != nil { - logger.Debug("Failed to cache metadata", "cid", cid, "error", err) - } - - return meta, nil -} - -// Delete removes a record from both cache and source store. -func (s *cachedStore) Delete(ctx context.Context, ref *corev1.RecordRef) error { - cid := ref.GetCid() - logger.Debug("Delete: removing from cache and source store", "cid", cid) - - // Remove from cache first (don't fail if not in cache) - s.removeFromCache(ctx, cid) - - // Delete from source store - return s.source.Delete(ctx, ref) -} - -// IsReady checks if the store is ready to serve traffic. -func (s *cachedStore) IsReady(ctx context.Context) bool { - return s.source.IsReady(ctx) -} - -// cacheRecord stores a record in the cache. -func (s *cachedStore) cacheRecord(ctx context.Context, record *corev1.Record) error { - cid := record.GetCid() - if cid == "" { - return errors.New("record has no CID") - } - - // Marshal record to bytes - data, err := proto.Marshal(record) - if err != nil { - return fmt.Errorf("failed to marshal record: %w", err) - } - - // Store in cache with record key - key := datastore.NewKey("/record/" + cid) - - return s.cache.Put(ctx, key, data) -} - -// getRecordFromCache retrieves a record from the cache. -func (s *cachedStore) getRecordFromCache(ctx context.Context, cid string) (*corev1.Record, error) { - key := datastore.NewKey("/record/" + cid) - - data, err := s.cache.Get(ctx, key) - if err != nil { - return nil, err - } - - // Unmarshal record from bytes - var record corev1.Record - if err := proto.Unmarshal(data, &record); err != nil { - return nil, fmt.Errorf("failed to unmarshal record: %w", err) - } - - return &record, nil -} - -// cacheMeta stores record metadata in the cache. -func (s *cachedStore) cacheMeta(ctx context.Context, meta *corev1.RecordMeta) error { - cid := meta.GetCid() - if cid == "" { - return errors.New("metadata has no CID") - } - - // Marshal metadata to JSON (since it's not a protobuf message) - data, err := json.Marshal(meta) - if err != nil { - return fmt.Errorf("failed to marshal metadata: %w", err) - } - - // Store in cache with metadata key - key := datastore.NewKey("/meta/" + cid) - - return s.cache.Put(ctx, key, data) -} - -// getMetaFromCache retrieves record metadata from the cache. -func (s *cachedStore) getMetaFromCache(ctx context.Context, cid string) (*corev1.RecordMeta, error) { - key := datastore.NewKey("/meta/" + cid) - - data, err := s.cache.Get(ctx, key) - if err != nil { - return nil, err - } - - // Unmarshal metadata from JSON - var meta corev1.RecordMeta - if err := json.Unmarshal(data, &meta); err != nil { - return nil, fmt.Errorf("failed to unmarshal metadata: %w", err) - } - - return &meta, nil -} - -// removeFromCache removes both record and metadata from cache. -func (s *cachedStore) removeFromCache(ctx context.Context, cid string) { - recordKey := datastore.NewKey("/record/" + cid) - metaKey := datastore.NewKey("/meta/" + cid) - - // Remove record (ignore errors) - if err := s.cache.Delete(ctx, recordKey); err != nil { - logger.Debug("Failed to remove record from cache", "cid", cid, "error", err) - } - - // Remove metadata (ignore errors) - if err := s.cache.Delete(ctx, metaKey); err != nil { - logger.Debug("Failed to remove metadata from cache", "cid", cid, "error", err) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:wrapcheck +package cache + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/utils/logging" + "github.com/ipfs/go-datastore" + "google.golang.org/protobuf/proto" +) + +var logger = logging.Logger("store/cache") + +// cachedStore wraps a StoreAPI with caching functionality. +type cachedStore struct { + source types.StoreAPI + cache types.Datastore +} + +// Wrap creates a cached store that uses the provided datastore as a cache. +func Wrap(source types.StoreAPI, cache types.Datastore) types.StoreAPI { + return &cachedStore{ + source: source, + cache: cache, + } +} + +// Push pushes a record to the source store and caches it. +func (s *cachedStore) Push(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) { + logger.Debug("Push: forwarding to source store") + + // Push to source store first + ref, err := s.source.Push(ctx, record) + if err != nil { + return nil, err + } + + // Cache the record after successful push + if err := s.cacheRecord(ctx, record); err != nil { + logger.Debug("Failed to cache record", "cid", ref.GetCid(), "error", err) + } + + return ref, nil +} + +// Pull pulls a record from cache first, then from source store if not found. +func (s *cachedStore) Pull(ctx context.Context, ref *corev1.RecordRef) (*corev1.Record, error) { + cid := ref.GetCid() + logger.Debug("Pull: checking cache first", "cid", cid) + + // Try to get from cache first + if record, err := s.getRecordFromCache(ctx, cid); err == nil { + logger.Debug("Pull: cache hit", "cid", cid) + + return record, nil + } + + logger.Debug("Pull: cache miss, forwarding to source store", "cid", cid) + + // Not in cache, get from source store + record, err := s.source.Pull(ctx, ref) + if err != nil { + return nil, err + } + + // Cache the record for future requests + if err := s.cacheRecord(ctx, record); err != nil { + logger.Debug("Failed to cache record", "cid", cid, "error", err) + } + + return record, nil +} + +// Lookup looks up record metadata from cache first, then from source store if not found. +func (s *cachedStore) Lookup(ctx context.Context, ref *corev1.RecordRef) (*corev1.RecordMeta, error) { + cid := ref.GetCid() + logger.Debug("Lookup: checking cache first", "cid", cid) + + // Try to get metadata from cache first + if meta, err := s.getMetaFromCache(ctx, cid); err == nil { + logger.Debug("Lookup: cache hit", "cid", cid) + + return meta, nil + } + + logger.Debug("Lookup: cache miss, forwarding to source store", "cid", cid) + + // Not in cache, get from source store + meta, err := s.source.Lookup(ctx, ref) + if err != nil { + return nil, err + } + + // Cache the metadata for future requests + if err := s.cacheMeta(ctx, meta); err != nil { + logger.Debug("Failed to cache metadata", "cid", cid, "error", err) + } + + return meta, nil +} + +// Delete removes a record from both cache and source store. +func (s *cachedStore) Delete(ctx context.Context, ref *corev1.RecordRef) error { + cid := ref.GetCid() + logger.Debug("Delete: removing from cache and source store", "cid", cid) + + // Remove from cache first (don't fail if not in cache) + s.removeFromCache(ctx, cid) + + // Delete from source store + return s.source.Delete(ctx, ref) +} + +// IsReady checks if the store is ready to serve traffic. +func (s *cachedStore) IsReady(ctx context.Context) bool { + return s.source.IsReady(ctx) +} + +// cacheRecord stores a record in the cache. +func (s *cachedStore) cacheRecord(ctx context.Context, record *corev1.Record) error { + cid := record.GetCid() + if cid == "" { + return errors.New("record has no CID") + } + + // Marshal record to bytes + data, err := proto.Marshal(record) + if err != nil { + return fmt.Errorf("failed to marshal record: %w", err) + } + + // Store in cache with record key + key := datastore.NewKey("/record/" + cid) + + return s.cache.Put(ctx, key, data) +} + +// getRecordFromCache retrieves a record from the cache. +func (s *cachedStore) getRecordFromCache(ctx context.Context, cid string) (*corev1.Record, error) { + key := datastore.NewKey("/record/" + cid) + + data, err := s.cache.Get(ctx, key) + if err != nil { + return nil, err + } + + // Unmarshal record from bytes + var record corev1.Record + if err := proto.Unmarshal(data, &record); err != nil { + return nil, fmt.Errorf("failed to unmarshal record: %w", err) + } + + return &record, nil +} + +// cacheMeta stores record metadata in the cache. +func (s *cachedStore) cacheMeta(ctx context.Context, meta *corev1.RecordMeta) error { + cid := meta.GetCid() + if cid == "" { + return errors.New("metadata has no CID") + } + + // Marshal metadata to JSON (since it's not a protobuf message) + data, err := json.Marshal(meta) + if err != nil { + return fmt.Errorf("failed to marshal metadata: %w", err) + } + + // Store in cache with metadata key + key := datastore.NewKey("/meta/" + cid) + + return s.cache.Put(ctx, key, data) +} + +// getMetaFromCache retrieves record metadata from the cache. +func (s *cachedStore) getMetaFromCache(ctx context.Context, cid string) (*corev1.RecordMeta, error) { + key := datastore.NewKey("/meta/" + cid) + + data, err := s.cache.Get(ctx, key) + if err != nil { + return nil, err + } + + // Unmarshal metadata from JSON + var meta corev1.RecordMeta + if err := json.Unmarshal(data, &meta); err != nil { + return nil, fmt.Errorf("failed to unmarshal metadata: %w", err) + } + + return &meta, nil +} + +// removeFromCache removes both record and metadata from cache. +func (s *cachedStore) removeFromCache(ctx context.Context, cid string) { + recordKey := datastore.NewKey("/record/" + cid) + metaKey := datastore.NewKey("/meta/" + cid) + + // Remove record (ignore errors) + if err := s.cache.Delete(ctx, recordKey); err != nil { + logger.Debug("Failed to remove record from cache", "cid", cid, "error", err) + } + + // Remove metadata (ignore errors) + if err := s.cache.Delete(ctx, metaKey); err != nil { + logger.Debug("Failed to remove metadata from cache", "cid", cid, "error", err) + } +} diff --git a/server/store/cache/cache_test.go b/server/store/cache/cache_test.go index 162a2fc0c..fb80c4e28 100644 --- a/server/store/cache/cache_test.go +++ b/server/store/cache/cache_test.go @@ -1,327 +1,327 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package cache - -import ( - "context" - "testing" - - typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/sync" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -// MockStoreAPI is a mock implementation of types.StoreAPI for testing. -type MockStoreAPI struct { - mock.Mock -} - -func (m *MockStoreAPI) Push(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) { - args := m.Called(ctx, record) - - if args.Get(0) == nil { - return nil, args.Error(1) //nolint:wrapcheck // Mock should return exact error without wrapping - } - - ref, ok := args.Get(0).(*corev1.RecordRef) - if !ok { - panic("MockStoreAPI.Push: expected *corev1.RecordRef, got different type") - } - - return ref, args.Error(1) //nolint:wrapcheck // Mock should return exact error without wrapping -} - -func (m *MockStoreAPI) Pull(ctx context.Context, ref *corev1.RecordRef) (*corev1.Record, error) { - args := m.Called(ctx, ref) - - if args.Get(0) == nil { - return nil, args.Error(1) //nolint:wrapcheck // Mock should return exact error without wrapping - } - - record, ok := args.Get(0).(*corev1.Record) - if !ok { - panic("MockStoreAPI.Pull: expected *corev1.Record, got different type") - } - - return record, args.Error(1) //nolint:wrapcheck // Mock should return exact error without wrapping -} - -func (m *MockStoreAPI) Lookup(ctx context.Context, ref *corev1.RecordRef) (*corev1.RecordMeta, error) { - args := m.Called(ctx, ref) - - if args.Get(0) == nil { - return nil, args.Error(1) //nolint:wrapcheck // Mock should return exact error without wrapping - } - - meta, ok := args.Get(0).(*corev1.RecordMeta) - if !ok { - panic("MockStoreAPI.Lookup: expected *corev1.RecordMeta, got different type") - } - - return meta, args.Error(1) //nolint:wrapcheck // Mock should return exact error without wrapping -} - -func (m *MockStoreAPI) Delete(ctx context.Context, ref *corev1.RecordRef) error { - args := m.Called(ctx, ref) - - return args.Error(0) //nolint:wrapcheck // Mock should return exact error without wrapping -} - -func (m *MockStoreAPI) IsReady(ctx context.Context) bool { - return true -} - -func TestCachedStore_Push(t *testing.T) { - ctx := t.Context() - - // Create test record - record := corev1.New(&typesv1alpha0.Record{ - Name: "test-agent", - Description: "Test agent", - Version: "1.0.0", - SchemaVersion: "v0.3.1", - }) - decoded, err := record.Decode() - require.NoError(t, err) - - recordCID := record.GetCid() - require.NotEmpty(t, recordCID, "record should have a CID") - - expectedRef := &corev1.RecordRef{Cid: recordCID} - - // Create mock store and cache - mockStore := &MockStoreAPI{} - cache := sync.MutexWrap(datastore.NewMapDatastore()) - cachedStore, ok := Wrap(mockStore, cache).(*cachedStore) - require.True(t, ok, "Wrap should return *cachedStore") - - // Mock the Push call - mockStore.On("Push", ctx, record).Return(expectedRef, nil) - - // Test Push - ref, err := cachedStore.Push(ctx, record) - require.NoError(t, err) - assert.Equal(t, expectedRef, ref) - - // Test that record can be pulled from cache (verifies it was cached) - pulledRecord, err := cachedStore.Pull(ctx, expectedRef) - require.NoError(t, err) - pulledDecoded, err := pulledRecord.Decode() - require.NoError(t, err) - assert.Equal(t, decoded.GetV1Alpha0().GetName(), pulledDecoded.GetV1Alpha0().GetName()) - - // Verify mock was called only once (push), not for the pull (cache hit) - mockStore.AssertExpectations(t) -} - -func TestCachedStore_Pull_CacheHit(t *testing.T) { - ctx := t.Context() - - // Create test record - record := corev1.New(&typesv1alpha0.Record{ - Name: "test-agent", - Description: "Test agent", - Version: "1.0.0", - SchemaVersion: "v0.3.1", - }) - decoded, err := record.Decode() - require.NoError(t, err) - - recordCID := record.GetCid() - require.NotEmpty(t, recordCID, "record should have a CID") - - ref := &corev1.RecordRef{Cid: recordCID} - - // Create mock store and cache - mockStore := &MockStoreAPI{} - cache := sync.MutexWrap(datastore.NewMapDatastore()) - cachedStore, ok := Wrap(mockStore, cache).(*cachedStore) - require.True(t, ok, "Wrap should return *cachedStore") - - // Pre-cache the record - err = cachedStore.cacheRecord(ctx, record) - require.NoError(t, err) - - // Test Pull - should hit cache and not call source store - pulledRecord, err := cachedStore.Pull(ctx, ref) - require.NoError(t, err) - pulledDecoded, err := pulledRecord.Decode() - require.NoError(t, err) - assert.Equal(t, decoded.GetV1Alpha0().GetName(), pulledDecoded.GetV1Alpha0().GetName()) - - // Verify mock was NOT called (cache hit) - mockStore.AssertNotCalled(t, "Pull") -} - -func TestCachedStore_Pull_CacheMiss(t *testing.T) { - ctx := t.Context() - - // Create test record - record := corev1.New(&typesv1alpha0.Record{ - Name: "test-agent", - Description: "Test agent", - Version: "1.0.0", - SchemaVersion: "v0.3.1", - }) - decoded, err := record.Decode() - require.NoError(t, err) - - recordCID := record.GetCid() - ref := &corev1.RecordRef{Cid: recordCID} - - // Create mock store and cache - mockStore := &MockStoreAPI{} - cache := sync.MutexWrap(datastore.NewMapDatastore()) - cachedStore := Wrap(mockStore, cache) - - // Mock the Pull call - mockStore.On("Pull", ctx, ref).Return(record, nil) - - // Test Pull - should miss cache and call source store - pulledRecord, err := cachedStore.Pull(ctx, ref) - require.NoError(t, err) - pulledDecoded, err := pulledRecord.Decode() - require.NoError(t, err) - assert.Equal(t, decoded.GetV1Alpha0().GetName(), pulledDecoded.GetV1Alpha0().GetName()) - - // Test that subsequent pull hits cache (verifies it was cached after first pull) - pulledRecord2, err := cachedStore.Pull(ctx, ref) - require.NoError(t, err) - pulledDecoded2, err := pulledRecord2.Decode() - require.NoError(t, err) - assert.Equal(t, decoded.GetV1Alpha0().GetName(), pulledDecoded2.GetV1Alpha0().GetName()) - - // Verify mock was called only once (first pull), not for the second pull (cache hit) - mockStore.AssertExpectations(t) -} - -func TestCachedStore_Lookup_CacheHit(t *testing.T) { - ctx := t.Context() - - recordCID := "test-cid-123" - ref := &corev1.RecordRef{Cid: recordCID} - - meta := &corev1.RecordMeta{ - Cid: recordCID, - Annotations: map[string]string{"test": "value"}, - SchemaVersion: "v0.3.1", - CreatedAt: "2023-01-01T00:00:00Z", - } - - // Create mock store and cache - mockStore := &MockStoreAPI{} - cache := sync.MutexWrap(datastore.NewMapDatastore()) - cachedStore, ok := Wrap(mockStore, cache).(*cachedStore) - require.True(t, ok, "Wrap should return *cachedStore") - - // Pre-cache the metadata - err := cachedStore.cacheMeta(ctx, meta) - require.NoError(t, err) - - // Test Lookup - should hit cache and not call source store - lookedUpMeta, err := cachedStore.Lookup(ctx, ref) - require.NoError(t, err) - assert.Equal(t, meta.GetCid(), lookedUpMeta.GetCid()) - assert.Equal(t, meta.GetAnnotations(), lookedUpMeta.GetAnnotations()) - - // Verify mock was NOT called (cache hit) - mockStore.AssertNotCalled(t, "Lookup") -} - -func TestCachedStore_Lookup_CacheMiss(t *testing.T) { - ctx := t.Context() - - recordCID := "test-cid-123" - ref := &corev1.RecordRef{Cid: recordCID} - - meta := &corev1.RecordMeta{ - Cid: recordCID, - Annotations: map[string]string{"test": "value"}, - SchemaVersion: "v0.3.1", - CreatedAt: "2023-01-01T00:00:00Z", - } - - // Create mock store and cache - mockStore := &MockStoreAPI{} - cache := sync.MutexWrap(datastore.NewMapDatastore()) - cachedStore := Wrap(mockStore, cache) - - // Mock the Lookup call - mockStore.On("Lookup", ctx, ref).Return(meta, nil) - - // Test Lookup - should miss cache and call source store - lookedUpMeta, err := cachedStore.Lookup(ctx, ref) - require.NoError(t, err) - assert.Equal(t, meta.GetCid(), lookedUpMeta.GetCid()) - assert.Equal(t, meta.GetAnnotations(), lookedUpMeta.GetAnnotations()) - - // Test that subsequent lookup hits cache (verifies it was cached after first lookup) - lookedUpMeta2, err := cachedStore.Lookup(ctx, ref) - require.NoError(t, err) - assert.Equal(t, meta.GetCid(), lookedUpMeta2.GetCid()) - - // Verify mock was called only once (first lookup), not for the second lookup (cache hit) - mockStore.AssertExpectations(t) -} - -func TestCachedStore_Delete(t *testing.T) { - ctx := t.Context() - - // Create test record and metadata - record := corev1.New(&typesv1alpha0.Record{ - Name: "test-agent", - Description: "Test agent", - Version: "1.0.0", - SchemaVersion: "v0.3.1", - }) - - recordCID := record.GetCid() - ref := &corev1.RecordRef{Cid: recordCID} - - meta := &corev1.RecordMeta{ - Cid: recordCID, - Annotations: map[string]string{"test": "value"}, - SchemaVersion: "v0.3.1", - CreatedAt: "2023-01-01T00:00:00Z", - } - - // Create mock store and cache - mockStore := &MockStoreAPI{} - cache := sync.MutexWrap(datastore.NewMapDatastore()) - cachedStore := Wrap(mockStore, cache) - - // First, populate cache by doing Pull and Lookup operations - mockStore.On("Pull", ctx, ref).Return(record, nil) - mockStore.On("Lookup", ctx, ref).Return(meta, nil) - - // Pull and Lookup to populate cache - _, err := cachedStore.Pull(ctx, ref) - require.NoError(t, err) - _, err = cachedStore.Lookup(ctx, ref) - require.NoError(t, err) - - // Mock the Delete call - mockStore.On("Delete", ctx, ref).Return(nil) - - // Test Delete - err = cachedStore.Delete(ctx, ref) - require.NoError(t, err) - - // Verify items were removed from cache by attempting to pull/lookup - // They should now hit the source store again (cache miss) - mockStore.On("Pull", ctx, ref).Return(record, nil) - mockStore.On("Lookup", ctx, ref).Return(meta, nil) - - _, err = cachedStore.Pull(ctx, ref) - require.NoError(t, err) - _, err = cachedStore.Lookup(ctx, ref) - require.NoError(t, err) - - // Verify mock was called - mockStore.AssertExpectations(t) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package cache + +import ( + "context" + "testing" + + typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/sync" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +// MockStoreAPI is a mock implementation of types.StoreAPI for testing. +type MockStoreAPI struct { + mock.Mock +} + +func (m *MockStoreAPI) Push(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) { + args := m.Called(ctx, record) + + if args.Get(0) == nil { + return nil, args.Error(1) //nolint:wrapcheck // Mock should return exact error without wrapping + } + + ref, ok := args.Get(0).(*corev1.RecordRef) + if !ok { + panic("MockStoreAPI.Push: expected *corev1.RecordRef, got different type") + } + + return ref, args.Error(1) //nolint:wrapcheck // Mock should return exact error without wrapping +} + +func (m *MockStoreAPI) Pull(ctx context.Context, ref *corev1.RecordRef) (*corev1.Record, error) { + args := m.Called(ctx, ref) + + if args.Get(0) == nil { + return nil, args.Error(1) //nolint:wrapcheck // Mock should return exact error without wrapping + } + + record, ok := args.Get(0).(*corev1.Record) + if !ok { + panic("MockStoreAPI.Pull: expected *corev1.Record, got different type") + } + + return record, args.Error(1) //nolint:wrapcheck // Mock should return exact error without wrapping +} + +func (m *MockStoreAPI) Lookup(ctx context.Context, ref *corev1.RecordRef) (*corev1.RecordMeta, error) { + args := m.Called(ctx, ref) + + if args.Get(0) == nil { + return nil, args.Error(1) //nolint:wrapcheck // Mock should return exact error without wrapping + } + + meta, ok := args.Get(0).(*corev1.RecordMeta) + if !ok { + panic("MockStoreAPI.Lookup: expected *corev1.RecordMeta, got different type") + } + + return meta, args.Error(1) //nolint:wrapcheck // Mock should return exact error without wrapping +} + +func (m *MockStoreAPI) Delete(ctx context.Context, ref *corev1.RecordRef) error { + args := m.Called(ctx, ref) + + return args.Error(0) //nolint:wrapcheck // Mock should return exact error without wrapping +} + +func (m *MockStoreAPI) IsReady(ctx context.Context) bool { + return true +} + +func TestCachedStore_Push(t *testing.T) { + ctx := t.Context() + + // Create test record + record := corev1.New(&typesv1alpha0.Record{ + Name: "test-agent", + Description: "Test agent", + Version: "1.0.0", + SchemaVersion: "v0.3.1", + }) + decoded, err := record.Decode() + require.NoError(t, err) + + recordCID := record.GetCid() + require.NotEmpty(t, recordCID, "record should have a CID") + + expectedRef := &corev1.RecordRef{Cid: recordCID} + + // Create mock store and cache + mockStore := &MockStoreAPI{} + cache := sync.MutexWrap(datastore.NewMapDatastore()) + cachedStore, ok := Wrap(mockStore, cache).(*cachedStore) + require.True(t, ok, "Wrap should return *cachedStore") + + // Mock the Push call + mockStore.On("Push", ctx, record).Return(expectedRef, nil) + + // Test Push + ref, err := cachedStore.Push(ctx, record) + require.NoError(t, err) + assert.Equal(t, expectedRef, ref) + + // Test that record can be pulled from cache (verifies it was cached) + pulledRecord, err := cachedStore.Pull(ctx, expectedRef) + require.NoError(t, err) + pulledDecoded, err := pulledRecord.Decode() + require.NoError(t, err) + assert.Equal(t, decoded.GetV1Alpha0().GetName(), pulledDecoded.GetV1Alpha0().GetName()) + + // Verify mock was called only once (push), not for the pull (cache hit) + mockStore.AssertExpectations(t) +} + +func TestCachedStore_Pull_CacheHit(t *testing.T) { + ctx := t.Context() + + // Create test record + record := corev1.New(&typesv1alpha0.Record{ + Name: "test-agent", + Description: "Test agent", + Version: "1.0.0", + SchemaVersion: "v0.3.1", + }) + decoded, err := record.Decode() + require.NoError(t, err) + + recordCID := record.GetCid() + require.NotEmpty(t, recordCID, "record should have a CID") + + ref := &corev1.RecordRef{Cid: recordCID} + + // Create mock store and cache + mockStore := &MockStoreAPI{} + cache := sync.MutexWrap(datastore.NewMapDatastore()) + cachedStore, ok := Wrap(mockStore, cache).(*cachedStore) + require.True(t, ok, "Wrap should return *cachedStore") + + // Pre-cache the record + err = cachedStore.cacheRecord(ctx, record) + require.NoError(t, err) + + // Test Pull - should hit cache and not call source store + pulledRecord, err := cachedStore.Pull(ctx, ref) + require.NoError(t, err) + pulledDecoded, err := pulledRecord.Decode() + require.NoError(t, err) + assert.Equal(t, decoded.GetV1Alpha0().GetName(), pulledDecoded.GetV1Alpha0().GetName()) + + // Verify mock was NOT called (cache hit) + mockStore.AssertNotCalled(t, "Pull") +} + +func TestCachedStore_Pull_CacheMiss(t *testing.T) { + ctx := t.Context() + + // Create test record + record := corev1.New(&typesv1alpha0.Record{ + Name: "test-agent", + Description: "Test agent", + Version: "1.0.0", + SchemaVersion: "v0.3.1", + }) + decoded, err := record.Decode() + require.NoError(t, err) + + recordCID := record.GetCid() + ref := &corev1.RecordRef{Cid: recordCID} + + // Create mock store and cache + mockStore := &MockStoreAPI{} + cache := sync.MutexWrap(datastore.NewMapDatastore()) + cachedStore := Wrap(mockStore, cache) + + // Mock the Pull call + mockStore.On("Pull", ctx, ref).Return(record, nil) + + // Test Pull - should miss cache and call source store + pulledRecord, err := cachedStore.Pull(ctx, ref) + require.NoError(t, err) + pulledDecoded, err := pulledRecord.Decode() + require.NoError(t, err) + assert.Equal(t, decoded.GetV1Alpha0().GetName(), pulledDecoded.GetV1Alpha0().GetName()) + + // Test that subsequent pull hits cache (verifies it was cached after first pull) + pulledRecord2, err := cachedStore.Pull(ctx, ref) + require.NoError(t, err) + pulledDecoded2, err := pulledRecord2.Decode() + require.NoError(t, err) + assert.Equal(t, decoded.GetV1Alpha0().GetName(), pulledDecoded2.GetV1Alpha0().GetName()) + + // Verify mock was called only once (first pull), not for the second pull (cache hit) + mockStore.AssertExpectations(t) +} + +func TestCachedStore_Lookup_CacheHit(t *testing.T) { + ctx := t.Context() + + recordCID := "test-cid-123" + ref := &corev1.RecordRef{Cid: recordCID} + + meta := &corev1.RecordMeta{ + Cid: recordCID, + Annotations: map[string]string{"test": "value"}, + SchemaVersion: "v0.3.1", + CreatedAt: "2023-01-01T00:00:00Z", + } + + // Create mock store and cache + mockStore := &MockStoreAPI{} + cache := sync.MutexWrap(datastore.NewMapDatastore()) + cachedStore, ok := Wrap(mockStore, cache).(*cachedStore) + require.True(t, ok, "Wrap should return *cachedStore") + + // Pre-cache the metadata + err := cachedStore.cacheMeta(ctx, meta) + require.NoError(t, err) + + // Test Lookup - should hit cache and not call source store + lookedUpMeta, err := cachedStore.Lookup(ctx, ref) + require.NoError(t, err) + assert.Equal(t, meta.GetCid(), lookedUpMeta.GetCid()) + assert.Equal(t, meta.GetAnnotations(), lookedUpMeta.GetAnnotations()) + + // Verify mock was NOT called (cache hit) + mockStore.AssertNotCalled(t, "Lookup") +} + +func TestCachedStore_Lookup_CacheMiss(t *testing.T) { + ctx := t.Context() + + recordCID := "test-cid-123" + ref := &corev1.RecordRef{Cid: recordCID} + + meta := &corev1.RecordMeta{ + Cid: recordCID, + Annotations: map[string]string{"test": "value"}, + SchemaVersion: "v0.3.1", + CreatedAt: "2023-01-01T00:00:00Z", + } + + // Create mock store and cache + mockStore := &MockStoreAPI{} + cache := sync.MutexWrap(datastore.NewMapDatastore()) + cachedStore := Wrap(mockStore, cache) + + // Mock the Lookup call + mockStore.On("Lookup", ctx, ref).Return(meta, nil) + + // Test Lookup - should miss cache and call source store + lookedUpMeta, err := cachedStore.Lookup(ctx, ref) + require.NoError(t, err) + assert.Equal(t, meta.GetCid(), lookedUpMeta.GetCid()) + assert.Equal(t, meta.GetAnnotations(), lookedUpMeta.GetAnnotations()) + + // Test that subsequent lookup hits cache (verifies it was cached after first lookup) + lookedUpMeta2, err := cachedStore.Lookup(ctx, ref) + require.NoError(t, err) + assert.Equal(t, meta.GetCid(), lookedUpMeta2.GetCid()) + + // Verify mock was called only once (first lookup), not for the second lookup (cache hit) + mockStore.AssertExpectations(t) +} + +func TestCachedStore_Delete(t *testing.T) { + ctx := t.Context() + + // Create test record and metadata + record := corev1.New(&typesv1alpha0.Record{ + Name: "test-agent", + Description: "Test agent", + Version: "1.0.0", + SchemaVersion: "v0.3.1", + }) + + recordCID := record.GetCid() + ref := &corev1.RecordRef{Cid: recordCID} + + meta := &corev1.RecordMeta{ + Cid: recordCID, + Annotations: map[string]string{"test": "value"}, + SchemaVersion: "v0.3.1", + CreatedAt: "2023-01-01T00:00:00Z", + } + + // Create mock store and cache + mockStore := &MockStoreAPI{} + cache := sync.MutexWrap(datastore.NewMapDatastore()) + cachedStore := Wrap(mockStore, cache) + + // First, populate cache by doing Pull and Lookup operations + mockStore.On("Pull", ctx, ref).Return(record, nil) + mockStore.On("Lookup", ctx, ref).Return(meta, nil) + + // Pull and Lookup to populate cache + _, err := cachedStore.Pull(ctx, ref) + require.NoError(t, err) + _, err = cachedStore.Lookup(ctx, ref) + require.NoError(t, err) + + // Mock the Delete call + mockStore.On("Delete", ctx, ref).Return(nil) + + // Test Delete + err = cachedStore.Delete(ctx, ref) + require.NoError(t, err) + + // Verify items were removed from cache by attempting to pull/lookup + // They should now hit the source store again (cache miss) + mockStore.On("Pull", ctx, ref).Return(record, nil) + mockStore.On("Lookup", ctx, ref).Return(meta, nil) + + _, err = cachedStore.Pull(ctx, ref) + require.NoError(t, err) + _, err = cachedStore.Lookup(ctx, ref) + require.NoError(t, err) + + // Verify mock was called + mockStore.AssertExpectations(t) +} diff --git a/server/store/config/config.go b/server/store/config/config.go index 423743add..354ae4b3a 100644 --- a/server/store/config/config.go +++ b/server/store/config/config.go @@ -1,20 +1,20 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - oci "github.com/agntcy/dir/server/store/oci/config" -) - -const ( - DefaultProvider = "oci" -) - -type Config struct { - // Provider is the type of the storage provider. - Provider string `json:"c,omitempty" mapstructure:"provider"` - - // Config for OCI database. - OCI oci.Config `json:"oci,omitempty" mapstructure:"oci"` -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + oci "github.com/agntcy/dir/server/store/oci/config" +) + +const ( + DefaultProvider = "oci" +) + +type Config struct { + // Provider is the type of the storage provider. + Provider string `json:"c,omitempty" mapstructure:"provider"` + + // Config for OCI database. + OCI oci.Config `json:"oci,omitempty" mapstructure:"oci"` +} diff --git a/server/store/eventswrap/eventswrap.go b/server/store/eventswrap/eventswrap.go index 03eb07bcc..25f38f6df 100644 --- a/server/store/eventswrap/eventswrap.go +++ b/server/store/eventswrap/eventswrap.go @@ -1,144 +1,144 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// Package eventswrap provides an event-emitting wrapper for StoreAPI. -// It emits events for all store operations (push, pull, delete) without -// modifying the underlying store implementation. -package eventswrap - -import ( - "context" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/server/events" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/server/types/adapters" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// eventsStore wraps a StoreAPI with event emission. -type eventsStore struct { - source types.StoreAPI - eventBus *events.SafeEventBus -} - -// Wrap creates an event-emitting wrapper around a StoreAPI. -// All successful operations will emit corresponding events. -func Wrap(source types.StoreAPI, eventBus *events.SafeEventBus) types.StoreAPI { - return &eventsStore{ - source: source, - eventBus: eventBus, - } -} - -// Push pushes a record to the source store and emits a RECORD_PUSHED event. -func (s *eventsStore) Push(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) { - // Push to source store - ref, err := s.source.Push(ctx, record) - if err != nil { - return nil, err //nolint:wrapcheck // Transparent wrapper - pass through errors unchanged - } - - // Emit event after successful push - labels := types.GetLabelsFromRecord(adapters.NewRecordAdapter(record)) - labelStrings := make([]string, len(labels)) - - for i, label := range labels { - labelStrings[i] = label.String() - } - - s.eventBus.RecordPushed(ref.GetCid(), labelStrings) - - return ref, nil -} - -// Pull pulls a record from the source store and emits a RECORD_PULLED event. -func (s *eventsStore) Pull(ctx context.Context, ref *corev1.RecordRef) (*corev1.Record, error) { - // Pull from source store - record, err := s.source.Pull(ctx, ref) - if err != nil { - return nil, err //nolint:wrapcheck // Transparent wrapper - pass through errors unchanged - } - - // Emit event after successful pull - labels := types.GetLabelsFromRecord(adapters.NewRecordAdapter(record)) - labelStrings := make([]string, len(labels)) - - for i, label := range labels { - labelStrings[i] = label.String() - } - - s.eventBus.RecordPulled(ref.GetCid(), labelStrings) - - return record, nil -} - -// Lookup forwards to the source store (no event emitted for metadata lookups). -func (s *eventsStore) Lookup(ctx context.Context, ref *corev1.RecordRef) (*corev1.RecordMeta, error) { - //nolint:wrapcheck // Transparent wrapper - pass through errors unchanged - return s.source.Lookup(ctx, ref) -} - -// Delete deletes a record from the source store and emits a RECORD_DELETED event. -func (s *eventsStore) Delete(ctx context.Context, ref *corev1.RecordRef) error { - // Delete from source store - err := s.source.Delete(ctx, ref) - if err != nil { - return err //nolint:wrapcheck // Transparent wrapper - pass through errors unchanged - } - - // Emit event after successful deletion - s.eventBus.RecordDeleted(ref.GetCid()) - - return nil -} - -// IsReady checks if the store is ready to serve traffic. -func (s *eventsStore) IsReady(ctx context.Context) bool { - return s.source.IsReady(ctx) -} - -// VerifyWithZot delegates to the source store if it supports Zot verification. -// This ensures the wrapper doesn't hide optional methods from the underlying store. -func (s *eventsStore) VerifyWithZot(ctx context.Context, recordCID string) (bool, error) { - // Check if source supports Zot verification - zotStore, ok := s.source.(types.VerifierStore) - if !ok { - // Source doesn't support it - this shouldn't happen with OCI store, - // but handle gracefully - return false, nil - } - - // Delegate to source - //nolint:wrapcheck - return zotStore.VerifyWithZot(ctx, recordCID) -} - -// PushReferrer delegates to the source store if it supports referrer operations. -// This is needed for signature and public key storage. -func (s *eventsStore) PushReferrer(ctx context.Context, recordCID string, referrer *corev1.RecordReferrer) error { - // Check if source supports referrer operations - referrerStore, ok := s.source.(types.ReferrerStoreAPI) - if !ok { - return status.Errorf(codes.Unimplemented, "source store does not support referrer operations") - } - - // Delegate to source (no event emitted for referrer operations) - //nolint:wrapcheck - return referrerStore.PushReferrer(ctx, recordCID, referrer) -} - -// WalkReferrers delegates to the source store if it supports referrer operations. -// This is needed for retrieving signatures and public keys. -func (s *eventsStore) WalkReferrers(ctx context.Context, recordCID string, referrerType string, walkFn func(*corev1.RecordReferrer) error) error { - // Check if source supports referrer operations - referrerStore, ok := s.source.(types.ReferrerStoreAPI) - if !ok { - return status.Errorf(codes.Unimplemented, "source store does not support referrer operations") - } - - // Delegate to source (no event emitted for referrer operations) - //nolint:wrapcheck - return referrerStore.WalkReferrers(ctx, recordCID, referrerType, walkFn) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// Package eventswrap provides an event-emitting wrapper for StoreAPI. +// It emits events for all store operations (push, pull, delete) without +// modifying the underlying store implementation. +package eventswrap + +import ( + "context" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/server/events" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/server/types/adapters" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// eventsStore wraps a StoreAPI with event emission. +type eventsStore struct { + source types.StoreAPI + eventBus *events.SafeEventBus +} + +// Wrap creates an event-emitting wrapper around a StoreAPI. +// All successful operations will emit corresponding events. +func Wrap(source types.StoreAPI, eventBus *events.SafeEventBus) types.StoreAPI { + return &eventsStore{ + source: source, + eventBus: eventBus, + } +} + +// Push pushes a record to the source store and emits a RECORD_PUSHED event. +func (s *eventsStore) Push(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) { + // Push to source store + ref, err := s.source.Push(ctx, record) + if err != nil { + return nil, err //nolint:wrapcheck // Transparent wrapper - pass through errors unchanged + } + + // Emit event after successful push + labels := types.GetLabelsFromRecord(adapters.NewRecordAdapter(record)) + labelStrings := make([]string, len(labels)) + + for i, label := range labels { + labelStrings[i] = label.String() + } + + s.eventBus.RecordPushed(ref.GetCid(), labelStrings) + + return ref, nil +} + +// Pull pulls a record from the source store and emits a RECORD_PULLED event. +func (s *eventsStore) Pull(ctx context.Context, ref *corev1.RecordRef) (*corev1.Record, error) { + // Pull from source store + record, err := s.source.Pull(ctx, ref) + if err != nil { + return nil, err //nolint:wrapcheck // Transparent wrapper - pass through errors unchanged + } + + // Emit event after successful pull + labels := types.GetLabelsFromRecord(adapters.NewRecordAdapter(record)) + labelStrings := make([]string, len(labels)) + + for i, label := range labels { + labelStrings[i] = label.String() + } + + s.eventBus.RecordPulled(ref.GetCid(), labelStrings) + + return record, nil +} + +// Lookup forwards to the source store (no event emitted for metadata lookups). +func (s *eventsStore) Lookup(ctx context.Context, ref *corev1.RecordRef) (*corev1.RecordMeta, error) { + //nolint:wrapcheck // Transparent wrapper - pass through errors unchanged + return s.source.Lookup(ctx, ref) +} + +// Delete deletes a record from the source store and emits a RECORD_DELETED event. +func (s *eventsStore) Delete(ctx context.Context, ref *corev1.RecordRef) error { + // Delete from source store + err := s.source.Delete(ctx, ref) + if err != nil { + return err //nolint:wrapcheck // Transparent wrapper - pass through errors unchanged + } + + // Emit event after successful deletion + s.eventBus.RecordDeleted(ref.GetCid()) + + return nil +} + +// IsReady checks if the store is ready to serve traffic. +func (s *eventsStore) IsReady(ctx context.Context) bool { + return s.source.IsReady(ctx) +} + +// VerifyWithZot delegates to the source store if it supports Zot verification. +// This ensures the wrapper doesn't hide optional methods from the underlying store. +func (s *eventsStore) VerifyWithZot(ctx context.Context, recordCID string) (bool, error) { + // Check if source supports Zot verification + zotStore, ok := s.source.(types.VerifierStore) + if !ok { + // Source doesn't support it - this shouldn't happen with OCI store, + // but handle gracefully + return false, nil + } + + // Delegate to source + //nolint:wrapcheck + return zotStore.VerifyWithZot(ctx, recordCID) +} + +// PushReferrer delegates to the source store if it supports referrer operations. +// This is needed for signature and public key storage. +func (s *eventsStore) PushReferrer(ctx context.Context, recordCID string, referrer *corev1.RecordReferrer) error { + // Check if source supports referrer operations + referrerStore, ok := s.source.(types.ReferrerStoreAPI) + if !ok { + return status.Errorf(codes.Unimplemented, "source store does not support referrer operations") + } + + // Delegate to source (no event emitted for referrer operations) + //nolint:wrapcheck + return referrerStore.PushReferrer(ctx, recordCID, referrer) +} + +// WalkReferrers delegates to the source store if it supports referrer operations. +// This is needed for retrieving signatures and public keys. +func (s *eventsStore) WalkReferrers(ctx context.Context, recordCID string, referrerType string, walkFn func(*corev1.RecordReferrer) error) error { + // Check if source supports referrer operations + referrerStore, ok := s.source.(types.ReferrerStoreAPI) + if !ok { + return status.Errorf(codes.Unimplemented, "source store does not support referrer operations") + } + + // Delegate to source (no event emitted for referrer operations) + //nolint:wrapcheck + return referrerStore.WalkReferrers(ctx, recordCID, referrerType, walkFn) +} diff --git a/server/store/eventswrap/eventswrap_test.go b/server/store/eventswrap/eventswrap_test.go index 4346360b0..b8a575e3a 100644 --- a/server/store/eventswrap/eventswrap_test.go +++ b/server/store/eventswrap/eventswrap_test.go @@ -1,233 +1,233 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package eventswrap - -import ( - "context" - "testing" - - typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" - corev1 "github.com/agntcy/dir/api/core/v1" - eventsv1 "github.com/agntcy/dir/api/events/v1" - "github.com/agntcy/dir/server/events" -) - -// mockStore is a minimal store implementation for testing. -type mockStore struct { - pushCalled bool - pullCalled bool - deleteCalled bool -} - -func (m *mockStore) Push(_ context.Context, record *corev1.Record) (*corev1.RecordRef, error) { - m.pushCalled = true - - return &corev1.RecordRef{Cid: record.GetCid()}, nil -} - -func (m *mockStore) Pull(_ context.Context, _ *corev1.RecordRef) (*corev1.Record, error) { - m.pullCalled = true - // Create a minimal record for testing - record := corev1.New(&typesv1alpha0.Record{ - Name: "test-record", - SchemaVersion: "v0.3.1", - }) - - return record, nil -} - -func (m *mockStore) Lookup(_ context.Context, ref *corev1.RecordRef) (*corev1.RecordMeta, error) { - return &corev1.RecordMeta{Cid: ref.GetCid()}, nil -} - -func (m *mockStore) Delete(_ context.Context, _ *corev1.RecordRef) error { - m.deleteCalled = true - - return nil -} - -func (m *mockStore) IsReady(_ context.Context) bool { - return true -} - -func TestEventsWrapPush(t *testing.T) { - // Use real event bus for testing - realBus := events.NewEventBus() - safeBus := events.NewSafeEventBus(realBus) - mockSrc := &mockStore{} - - wrappedStore := Wrap(mockSrc, safeBus) - - // Subscribe to capture events - req := &eventsv1.ListenRequest{} - - subID, eventCh := realBus.Subscribe(req) - defer realBus.Unsubscribe(subID) - - // Create test record - record := corev1.New(&typesv1alpha0.Record{ - Name: "test-agent", - SchemaVersion: "v0.3.1", - }) - - // Push record - ref, err := wrappedStore.Push(t.Context(), record) - if err != nil { - t.Fatalf("Push failed: %v", err) - } - - // Verify source store was called - if !mockSrc.pushCalled { - t.Error("Source store Push was not called") - } - - // Wait for async delivery to complete - realBus.WaitForAsyncPublish() - - // Verify event was emitted - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED { - t.Errorf("Expected RECORD_PUSHED event, got %v", event.Type) - } - - if event.ResourceID != ref.GetCid() { - t.Errorf("Expected event resource_id %s, got %s", ref.GetCid(), event.ResourceID) - } - default: - t.Error("Expected to receive RECORD_PUSHED event") - } -} - -func TestEventsWrapPull(t *testing.T) { - realBus := events.NewEventBus() - safeBus := events.NewSafeEventBus(realBus) - mockSrc := &mockStore{} - - wrappedStore := Wrap(mockSrc, safeBus) - - // Subscribe to capture events - req := &eventsv1.ListenRequest{} - - subID, eventCh := realBus.Subscribe(req) - defer realBus.Unsubscribe(subID) - - // Pull record - ref := &corev1.RecordRef{Cid: "bafytest123"} - - record, err := wrappedStore.Pull(t.Context(), ref) - if err != nil { - t.Fatalf("Pull failed: %v", err) - } - - if record == nil { - t.Fatal("Expected record to be returned") - } - - // Verify source store was called - if !mockSrc.pullCalled { - t.Error("Source store Pull was not called") - } - - // Wait for async delivery to complete - realBus.WaitForAsyncPublish() - - // Verify event was emitted - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PULLED { - t.Errorf("Expected RECORD_PULLED event, got %v", event.Type) - } - default: - t.Error("Expected to receive RECORD_PULLED event") - } -} - -func TestEventsWrapDelete(t *testing.T) { - realBus := events.NewEventBus() - safeBus := events.NewSafeEventBus(realBus) - mockSrc := &mockStore{} - - wrappedStore := Wrap(mockSrc, safeBus) - - // Subscribe to capture events - req := &eventsv1.ListenRequest{} - - subID, eventCh := realBus.Subscribe(req) - defer realBus.Unsubscribe(subID) - - // Delete record - ref := &corev1.RecordRef{Cid: "bafytest123"} - - err := wrappedStore.Delete(t.Context(), ref) - if err != nil { - t.Fatalf("Delete failed: %v", err) - } - - // Verify source store was called - if !mockSrc.deleteCalled { - t.Error("Source store Delete was not called") - } - - // Wait for async delivery to complete - realBus.WaitForAsyncPublish() - - // Verify event was emitted - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_DELETED { - t.Errorf("Expected RECORD_DELETED event, got %v", event.Type) - } - - if event.ResourceID != "bafytest123" { - t.Errorf("Expected event resource_id bafytest123, got %s", event.ResourceID) - } - default: - t.Error("Expected to receive RECORD_DELETED event") - } -} - -func TestEventsWrapLookup(t *testing.T) { - realBus := events.NewEventBus() - safeBus := events.NewSafeEventBus(realBus) - mockSrc := &mockStore{} - - wrappedStore := Wrap(mockSrc, safeBus) - - // Lookup record - ref := &corev1.RecordRef{Cid: "bafytest123"} - - meta, err := wrappedStore.Lookup(t.Context(), ref) - if err != nil { - t.Fatalf("Lookup failed: %v", err) - } - - if meta == nil { - t.Fatal("Expected metadata to be returned") - } - - // Verify no event was emitted for Lookup (metadata operations don't emit events) - metrics := realBus.GetMetrics() - if metrics.PublishedTotal != 0 { - t.Errorf("Expected 0 events for Lookup operation, got %d", metrics.PublishedTotal) - } -} - -func TestEventsWrapWithNilBus(t *testing.T) { - // Should work even with nil bus (no-op) - mockSrc := &mockStore{} - wrappedStore := Wrap(mockSrc, events.NewSafeEventBus(nil)) - - record := corev1.New(&typesv1alpha0.Record{Name: "test", SchemaVersion: "v0.3.1"}) - - // Should not panic - _, err := wrappedStore.Push(t.Context(), record) - if err != nil { - t.Errorf("Push with nil bus should not error: %v", err) - } - - if !mockSrc.pushCalled { - t.Error("Source store should still be called with nil bus") - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package eventswrap + +import ( + "context" + "testing" + + typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" + corev1 "github.com/agntcy/dir/api/core/v1" + eventsv1 "github.com/agntcy/dir/api/events/v1" + "github.com/agntcy/dir/server/events" +) + +// mockStore is a minimal store implementation for testing. +type mockStore struct { + pushCalled bool + pullCalled bool + deleteCalled bool +} + +func (m *mockStore) Push(_ context.Context, record *corev1.Record) (*corev1.RecordRef, error) { + m.pushCalled = true + + return &corev1.RecordRef{Cid: record.GetCid()}, nil +} + +func (m *mockStore) Pull(_ context.Context, _ *corev1.RecordRef) (*corev1.Record, error) { + m.pullCalled = true + // Create a minimal record for testing + record := corev1.New(&typesv1alpha0.Record{ + Name: "test-record", + SchemaVersion: "v0.3.1", + }) + + return record, nil +} + +func (m *mockStore) Lookup(_ context.Context, ref *corev1.RecordRef) (*corev1.RecordMeta, error) { + return &corev1.RecordMeta{Cid: ref.GetCid()}, nil +} + +func (m *mockStore) Delete(_ context.Context, _ *corev1.RecordRef) error { + m.deleteCalled = true + + return nil +} + +func (m *mockStore) IsReady(_ context.Context) bool { + return true +} + +func TestEventsWrapPush(t *testing.T) { + // Use real event bus for testing + realBus := events.NewEventBus() + safeBus := events.NewSafeEventBus(realBus) + mockSrc := &mockStore{} + + wrappedStore := Wrap(mockSrc, safeBus) + + // Subscribe to capture events + req := &eventsv1.ListenRequest{} + + subID, eventCh := realBus.Subscribe(req) + defer realBus.Unsubscribe(subID) + + // Create test record + record := corev1.New(&typesv1alpha0.Record{ + Name: "test-agent", + SchemaVersion: "v0.3.1", + }) + + // Push record + ref, err := wrappedStore.Push(t.Context(), record) + if err != nil { + t.Fatalf("Push failed: %v", err) + } + + // Verify source store was called + if !mockSrc.pushCalled { + t.Error("Source store Push was not called") + } + + // Wait for async delivery to complete + realBus.WaitForAsyncPublish() + + // Verify event was emitted + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PUSHED { + t.Errorf("Expected RECORD_PUSHED event, got %v", event.Type) + } + + if event.ResourceID != ref.GetCid() { + t.Errorf("Expected event resource_id %s, got %s", ref.GetCid(), event.ResourceID) + } + default: + t.Error("Expected to receive RECORD_PUSHED event") + } +} + +func TestEventsWrapPull(t *testing.T) { + realBus := events.NewEventBus() + safeBus := events.NewSafeEventBus(realBus) + mockSrc := &mockStore{} + + wrappedStore := Wrap(mockSrc, safeBus) + + // Subscribe to capture events + req := &eventsv1.ListenRequest{} + + subID, eventCh := realBus.Subscribe(req) + defer realBus.Unsubscribe(subID) + + // Pull record + ref := &corev1.RecordRef{Cid: "bafytest123"} + + record, err := wrappedStore.Pull(t.Context(), ref) + if err != nil { + t.Fatalf("Pull failed: %v", err) + } + + if record == nil { + t.Fatal("Expected record to be returned") + } + + // Verify source store was called + if !mockSrc.pullCalled { + t.Error("Source store Pull was not called") + } + + // Wait for async delivery to complete + realBus.WaitForAsyncPublish() + + // Verify event was emitted + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_PULLED { + t.Errorf("Expected RECORD_PULLED event, got %v", event.Type) + } + default: + t.Error("Expected to receive RECORD_PULLED event") + } +} + +func TestEventsWrapDelete(t *testing.T) { + realBus := events.NewEventBus() + safeBus := events.NewSafeEventBus(realBus) + mockSrc := &mockStore{} + + wrappedStore := Wrap(mockSrc, safeBus) + + // Subscribe to capture events + req := &eventsv1.ListenRequest{} + + subID, eventCh := realBus.Subscribe(req) + defer realBus.Unsubscribe(subID) + + // Delete record + ref := &corev1.RecordRef{Cid: "bafytest123"} + + err := wrappedStore.Delete(t.Context(), ref) + if err != nil { + t.Fatalf("Delete failed: %v", err) + } + + // Verify source store was called + if !mockSrc.deleteCalled { + t.Error("Source store Delete was not called") + } + + // Wait for async delivery to complete + realBus.WaitForAsyncPublish() + + // Verify event was emitted + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_RECORD_DELETED { + t.Errorf("Expected RECORD_DELETED event, got %v", event.Type) + } + + if event.ResourceID != "bafytest123" { + t.Errorf("Expected event resource_id bafytest123, got %s", event.ResourceID) + } + default: + t.Error("Expected to receive RECORD_DELETED event") + } +} + +func TestEventsWrapLookup(t *testing.T) { + realBus := events.NewEventBus() + safeBus := events.NewSafeEventBus(realBus) + mockSrc := &mockStore{} + + wrappedStore := Wrap(mockSrc, safeBus) + + // Lookup record + ref := &corev1.RecordRef{Cid: "bafytest123"} + + meta, err := wrappedStore.Lookup(t.Context(), ref) + if err != nil { + t.Fatalf("Lookup failed: %v", err) + } + + if meta == nil { + t.Fatal("Expected metadata to be returned") + } + + // Verify no event was emitted for Lookup (metadata operations don't emit events) + metrics := realBus.GetMetrics() + if metrics.PublishedTotal != 0 { + t.Errorf("Expected 0 events for Lookup operation, got %d", metrics.PublishedTotal) + } +} + +func TestEventsWrapWithNilBus(t *testing.T) { + // Should work even with nil bus (no-op) + mockSrc := &mockStore{} + wrappedStore := Wrap(mockSrc, events.NewSafeEventBus(nil)) + + record := corev1.New(&typesv1alpha0.Record{Name: "test", SchemaVersion: "v0.3.1"}) + + // Should not panic + _, err := wrappedStore.Push(t.Context(), record) + if err != nil { + t.Errorf("Push with nil bus should not error: %v", err) + } + + if !mockSrc.pushCalled { + t.Error("Source store should still be called with nil bus") + } +} diff --git a/server/store/oci/README.md b/server/store/oci/README.md index 9a30e0559..a8beb88c3 100644 --- a/server/store/oci/README.md +++ b/server/store/oci/README.md @@ -1,482 +1,482 @@ -# OCI Storage - -The OCI (Open Container Initiative) storage implementation provides a robust, scalable storage backend for OASF (Open Agent Specification Format) records using OCI-compliant registries. - -## Overview - -The OCI storage system enables: -- **Storage of OASF objects** in OCI-compliant registries (local or remote) -- **Rich metadata annotations** for discovery and filtering -- **Multiple discovery tags** for enhanced browsability -- **Content-addressable storage** using CIDs calculated from ORAS digest operations -- **Version-agnostic record handling** across OASF v0.3.1, v0.4.0, and v0.5.0 -- **Registry-aware operations** optimized for local vs remote storage - -## Architecture - -``` -┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ -│ OASF Object │───▶│ OCI Manifest │───▶│ OCI Registry │ -│ (JSON) │ │ + Annotations │ │ (Storage) │ -└─────────────────┘ └─────────────────┘ └─────────────────┘ - │ │ │ - │ ┌─────────────────┐ │ - └─────────────▶│ Discovery Tags │◀─────────────┘ - │ (Multiple Tags) │ - └─────────────────┘ - │ - ┌─────────────────┐ - │ CID Utils │ - │ (utils/cid/) │ - └─────────────────┘ -``` - -## Core Workflow Processes - -### 1. Push Operation - -The push operation stores agent records with rich metadata and discovery tags using ORAS-native operations: - -```go -// Push record to OCI registry -func (s *store) Push(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) -``` - -**Workflow (6-step process):** -1. **Marshal record** - Convert to canonical OASF JSON -2. **Push blob with ORAS** - Use `oras.PushBytes` to get layer descriptor -3. **Calculate CID from digest** - Use `cidutil.ConvertDigestToCID` on ORAS digest -4. **Construct manifest annotations** - Rich metadata including calculated CID -5. **Pack manifest** - Create OCI manifest with `oras.PackManifest` -6. **Tag manifest** - Apply multiple discovery tags for browsability - -### 2. Pull Operation - -Retrieves complete agent records with validation: - -```go -// Pull record from OCI registry -func (s *store) Pull(ctx context.Context, ref *corev1.RecordRef) (*corev1.Record, error) -``` - -**Workflow:** -1. **Validate input** - Comprehensive reference validation -2. **Fetch and parse manifest** - Shared helper eliminates code duplication -3. **Validate layer structure** - Check for proper blob descriptors -4. **Fetch blob data** - Download actual record content -5. **Validate blob integrity** - Size and format verification -6. **Unmarshal record** - Convert back to OASF Record - -### 3. Lookup Operation - -Fast metadata retrieval optimized for performance: - -```go -// Lookup record metadata -func (s *store) Lookup(ctx context.Context, ref *corev1.RecordRef) (*corev1.RecordMeta, error) -``` - -**Workflow:** -1. **Validate input** - Fast-fail for invalid references -2. **Resolve manifest directly** - Skip redundant existence check -3. **Parse manifest annotations** - Extract rich metadata -4. **Return metadata only** - No blob download required - -### 4. Delete Operation - -Registry-aware deletion following OCI best practices: - -```go -// Delete record and cleanup tags -func (s *store) Delete(ctx context.Context, ref *corev1.RecordRef) error -``` - -**Registry-Aware Workflow:** - -#### Local OCI Store: -1. **Clean up discovery tags** - Remove all associated tags -2. **Delete manifest** - Remove manifest descriptor -3. **Delete blob explicitly** - Full cleanup (we have filesystem control) - -#### Remote Registry: -1. **Best-effort tag cleanup** - Many registries don't support tag deletion -2. **Delete manifest** - Usually supported via OCI API -3. **Skip blob deletion** - Let registry garbage collection handle cleanup - -## Shared Helper Functions - -The implementation uses shared helper functions to eliminate code duplication: - -### Internal Helpers (`internal.go`) - -```go -// Shared manifest operations (used by Lookup and Pull) -func (s *store) fetchAndParseManifest(ctx context.Context, cid string) (*ocispec.Manifest, *ocispec.Descriptor, error) - -// Shared input validation (used by Lookup, Pull, Delete) -func validateRecordRef(ref *corev1.RecordRef) error - -// Local blob deletion (used by Delete for local stores) -func (s *store) deleteBlobForLocalStore(ctx context.Context, cid string, store *oci.Store) error -``` - -**Benefits:** -- **DRY principle** - Eliminates code duplication -- **Consistent behavior** - Same validation and error handling patterns -- **Easier maintenance** - Single place to modify shared logic - -## CID Utility Package (`utils/cid/`) - -Centralized CID operations with structured error handling: - -```go -// Convert OCI digest to CID (used in Push) -func ConvertDigestToCID(digest ocidigest.Digest) (string, error) - -// Convert CID to OCI digest (used in Delete) -func ConvertCIDToDigest(cidString string) (ocidigest.Digest, error) - -// Calculate digest from bytes (fallback utility) -func CalculateDigest(data []byte) (ocidigest.Digest, error) -``` - -**Features:** -- **Structured errors** - Custom error types with detailed context -- **Comprehensive validation** - Algorithm and format checking -- **Round-trip consistency** - Guaranteed CID ↔ Digest conversion -- **Performance optimized** - Efficient hash operations - -## Annotations System - -The system uses a streamlined annotation approach focused on manifest annotations: - -### Manifest Annotations - -Rich metadata stored in OCI manifest for discovery and filtering: - -```go -// Example manifest annotations -annotations := map[string]string{ - "org.agntcy.dir/type": "record", - "org.agntcy.dir/cid": "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi", - "org.agntcy.dir/name": "aws-ec2-agent", - "org.agntcy.dir/version": "1.2.0", - "org.agntcy.dir/description": "AWS EC2 management agent", - "org.agntcy.dir/oasf-version": "v0.5.0", - "org.agntcy.dir/schema-version": "v0.5.0", - "org.agntcy.dir/created-at": "2024-01-15T10:30:00Z", - "org.agntcy.dir/authors": "dev-team,ops-team", - "org.agntcy.dir/skills": "ec2-management,auto-scaling", - "org.agntcy.dir/locator-types": "docker,helm", - "org.agntcy.dir/extension-names": "monitoring,security", - "org.agntcy.dir/signed": "true", - "org.agntcy.dir/signature-algorithm": "cosign", - "org.agntcy.dir/signed-at": "2024-01-15T10:35:00Z", - "org.agntcy.dir/custom.team": "platform", - "org.agntcy.dir/custom.project": "cloud-automation", -} -``` - -### Annotation Categories - -| Category | Purpose | Examples | -|----------|---------|----------| -| **Core Identity** | Basic record information | `name`, `version`, `description`, `cid` | -| **Lifecycle** | Versioning and timestamps | `schema-version`, `created-at`, `authors` | -| **Capability Discovery** | Functional metadata | `skills`, `locator-types`, `extension-names` | -| **Security** | Integrity and verification | `signed`, `signature-algorithm`, `signed-at` | -| **Custom** | User-defined metadata | `custom.team`, `custom.project`, `custom.environment` | - -## Tag Generation System - -The tag generation system creates multiple discovery tags for enhanced browsability and filtering: - -### Tag Strategy Configuration - -```go -type TagStrategy struct { - EnableNameTags bool // Name-based tags (true) - EnableCapabilityTags bool // Skill/extension tags (true) - EnableInfrastructureTags bool // Deployment tags (true) - EnableTeamTags bool // Organization tags (true) - EnableContentAddressable bool // CID tag (true) - MaxTagsPerRecord int // Tag limit (20) -} -``` - -### Tag Categories and Examples - -#### 1. Content-Addressable Tags -Primary identifier for exact record lookup (calculated from ORAS digest): -``` -bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi -``` - -#### 2. Name-Based Tags -For human-friendly browsing: -``` -aws-ec2-agent -aws-ec2-agent:1.2.0 -aws-ec2-agent:latest -``` - -#### 3. Capability-Based Tags -For functional discovery: -``` -skill.ec2-management -skill.auto-scaling -ext.monitoring -ext.security -``` - -#### 4. Infrastructure Tags -For deployment discovery: -``` -deploy.docker -deploy.helm -deploy.kubernetes -``` - -#### 5. Team-Based Tags -For organizational filtering: -``` -team.platform -org.acme-corp -project.cloud-automation -``` - -### Tag Normalization - -All tags are normalized for OCI compliance: - -```go -// Input: "My Agent/v1.0@Company" -// Output: "my-agent.v1.0_company" - -// Rules: -// - Lowercase conversion -// - Spaces → hyphens (-) -// - Path separators (/) → dots (.) -// - Invalid chars → underscores (_) -// - Must start with [a-zA-Z0-9_] -// - Max 128 characters -// - No trailing separators -``` - -### Example Tag Generation - -For an AWS EC2 management agent with ORAS-calculated CID: - -```go -record := corev1.New(&typesv1alpha0.Record{ - Name: "aws-ec2-agent", - Version: "1.2.0", - Skills: []*typesv1alpha0.Skill{ - {Name: "ec2-management"}, - {Name: "auto-scaling"}, - }, - Locators: []*typesv1alpha0.Locator{ - {Type: "docker"}, - {Type: "helm"}, - }, - Extensions: []*typesv1alpha0.Extension{ - {Name: "monitoring"}, - }, - Annotations: map[string]string{ - "team": "platform", - "project": "cloud-automation", - }, -}) - -// Generated tags (CID calculated from ORAS digest): -tags := []string{ - "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi", // CID from ORAS - "aws-ec2-agent", // Name - "aws-ec2-agent:1.2.0", // Name + version - "aws-ec2-agent:latest", // Name + latest - "skill.ec2-management", // Capability - "skill.auto-scaling", // Capability - "ext.monitoring", // Extension - "deploy.docker", // Infrastructure - "deploy.helm", // Infrastructure - "team.platform", // Team - "project.cloud-automation", // Project -} -``` - -## OASF Version Support - -The system supports multiple OASF versions with automatic detection: - -| OASF Version | API Version | Features | -|--------------|-------------|----------| -| **v0.3.1** | `objects/v1` | Basic agents with hierarchical skills (`category/class`) | -| **v0.4.0** | `objects/v2` | Agent records with simple skill names | -| **v0.5.0** | `objects/v3` | Full records with enhanced metadata | - -### Version-Specific Examples - -#### OASF v0.3.1 (objects/v1) -```go -// Skills use hierarchical format -skills := []*typesv1alpha0.Skill{ - {CategoryName: stringPtr("nlp"), ClassName: stringPtr("processing")}, - {CategoryName: stringPtr("ml"), ClassName: stringPtr("inference")}, -} -// Generates tags: skill.nlp.processing, skill.ml.inference -``` - -#### OASF v0.5.0 (objects/v3) -```go -// Skills use simple names -skills := []*typesv1alpha0.Skill{ - {Name: "natural-language-processing"}, - {Name: "machine-learning"}, -} -// Generates tags: skill.natural-language-processing, skill.machine-learning -``` - -## Configuration - -### Local Storage -```go -cfg := ociconfig.Config{ - LocalDir: "/var/lib/agents/oci", - CacheDir: "/var/cache/agents", // Optional -} -``` - -### Remote Registry -```go -cfg := ociconfig.Config{ - RegistryAddress: "registry.example.com", - RepositoryName: "agents", - Username: "user", - Password: "pass", - Insecure: false, - CacheDir: "/var/cache/agents", // Optional -} -``` - -### Registry Authentication -Supports multiple authentication methods: -- **Username/Password** - Basic auth -- **Access Token** - Bearer token -- **Refresh Token** - OAuth refresh -- **Registry Credentials** - Docker config - -## Storage Features - -### Content Addressability -- **ORAS-based CID calculation** - CIDs derived from ORAS digest operations -- **Integrity verification** - Automatic content validation -- **Deduplication** - Identical OASF content stored once - -### Rich Metadata -- **Manifest annotations** - Searchable metadata stored in OCI manifests -- **Version tracking** - Schema evolution support -- **Custom annotations** - User-defined metadata -- **CID in annotations** - Direct CID storage for discovery - -### Discovery & Browsability -- **Multiple tag strategies** - Enhanced discoverability -- **Filtering capabilities** - Metadata-based queries -- **Hierarchical organization** - Team/project/capability grouping - -### Performance -- **Optimized network operations** - Minimal redundant calls -- **Optional caching** - Local cache for remote registries -- **Shared helper functions** - Eliminated code duplication -- **Registry-aware operations** - Optimized for local vs remote storage - -## Error Handling - -The system provides comprehensive error handling with structured errors and best-effort operations: - -### Structured CID Errors -```go -// CID utility errors with detailed context -&Error{ - Type: ErrorTypeInvalidCID, - Message: "failed to decode CID", - Details: map[string]interface{}{"cid": cidString, "error": err.Error()}, -} -``` - -### gRPC Status Codes -```go -// Common error scenarios -status.Error(codes.InvalidArgument, "record reference cannot be nil") -status.Error(codes.NotFound, "record not found: ") -status.Error(codes.Internal, "failed to push record bytes: ") -``` - -### Best-Effort Operations -```go -// Delete operations continue despite partial failures -var errors []string -if err := deleteManifest(); err != nil { - errors = append(errors, fmt.Sprintf("manifest delete: %v", err)) - // Continue with cleanup -} -``` - -## Testing - -The package includes comprehensive tests covering: -- **CID utility functions** - Round-trip conversion, error cases -- **Shared helper functions** - Manifest parsing, validation -- **Annotation extraction** - All OASF versions -- **Tag generation** - All tag strategies -- **Tag normalization** - OCI compliance -- **Workflow operations** - Push/Pull/Lookup/Delete -- **Registry-aware deletion** - Local vs remote behavior -- **Error scenarios** - Validation and edge cases - -Run tests: -```bash -go test ./server/store/oci/... -go test ./utils/cid/... -``` - -## Dependencies - -### Core Dependencies -- **`oras.land/oras-go/v2`** - OCI registry operations and native digest calculation -- **`github.com/opencontainers/image-spec`** - OCI specifications -- **`github.com/agntcy/dir/utils/cid`** - Centralized CID utilities -- **`github.com/ipfs/go-cid`** - Content addressing (via CID utils) -- **`github.com/multiformats/go-multihash`** - Hash format support (via CID utils) - -### Registry Support -- **OCI Distribution Spec** - Standard OCI registries -- **Docker Registry V2** - Docker Hub, Harbor, etc. -- **Local OCI Layout** - Local filesystem storage -- **Cloud Registries** - AWS ECR, GCP GCR, Azure ACR - -## Best Practices - -### Record Design -1. **Use descriptive names** - Enhance discoverability -2. **Include rich metadata** - Skills, extensions, locators -3. **Add custom annotations** - Team, project, environment -4. **Sign records** - Enable integrity verification - -### Tag Strategy -1. **Enable all tag types** - Maximize discoverability -2. **Use consistent naming** - Follow organizational conventions -3. **Limit custom tags** - Prevent tag explosion -4. **Consider tag namespacing** - Use prefixes for organization - -### Storage Configuration -1. **Use caching** - Improve performance for remote registries -2. **Configure authentication** - Secure access control -3. **Choose appropriate deletion strategy** - Understand local vs remote behavior -4. **Monitor storage usage** - Track registry size and costs -5. **Backup strategies** - Ensure data resilience - -### Development Guidelines -1. **Use shared helpers** - Leverage `fetchAndParseManifest`, `validateRecordRef` -2. **Follow error handling patterns** - Use structured errors with context -3. **Leverage CID utilities** - Use `utils/cid/` package for all CID operations +# OCI Storage + +The OCI (Open Container Initiative) storage implementation provides a robust, scalable storage backend for OASF (Open Agent Specification Format) records using OCI-compliant registries. + +## Overview + +The OCI storage system enables: +- **Storage of OASF objects** in OCI-compliant registries (local or remote) +- **Rich metadata annotations** for discovery and filtering +- **Multiple discovery tags** for enhanced browsability +- **Content-addressable storage** using CIDs calculated from ORAS digest operations +- **Version-agnostic record handling** across OASF v0.3.1, v0.4.0, and v0.5.0 +- **Registry-aware operations** optimized for local vs remote storage + +## Architecture + +``` +┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ +│ OASF Object │───▶│ OCI Manifest │───▶│ OCI Registry │ +│ (JSON) │ │ + Annotations │ │ (Storage) │ +└─────────────────┘ └─────────────────┘ └─────────────────┘ + │ │ │ + │ ┌─────────────────┐ │ + └─────────────▶│ Discovery Tags │◀─────────────┘ + │ (Multiple Tags) │ + └─────────────────┘ + │ + ┌─────────────────┐ + │ CID Utils │ + │ (utils/cid/) │ + └─────────────────┘ +``` + +## Core Workflow Processes + +### 1. Push Operation + +The push operation stores agent records with rich metadata and discovery tags using ORAS-native operations: + +```go +// Push record to OCI registry +func (s *store) Push(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) +``` + +**Workflow (6-step process):** +1. **Marshal record** - Convert to canonical OASF JSON +2. **Push blob with ORAS** - Use `oras.PushBytes` to get layer descriptor +3. **Calculate CID from digest** - Use `cidutil.ConvertDigestToCID` on ORAS digest +4. **Construct manifest annotations** - Rich metadata including calculated CID +5. **Pack manifest** - Create OCI manifest with `oras.PackManifest` +6. **Tag manifest** - Apply multiple discovery tags for browsability + +### 2. Pull Operation + +Retrieves complete agent records with validation: + +```go +// Pull record from OCI registry +func (s *store) Pull(ctx context.Context, ref *corev1.RecordRef) (*corev1.Record, error) +``` + +**Workflow:** +1. **Validate input** - Comprehensive reference validation +2. **Fetch and parse manifest** - Shared helper eliminates code duplication +3. **Validate layer structure** - Check for proper blob descriptors +4. **Fetch blob data** - Download actual record content +5. **Validate blob integrity** - Size and format verification +6. **Unmarshal record** - Convert back to OASF Record + +### 3. Lookup Operation + +Fast metadata retrieval optimized for performance: + +```go +// Lookup record metadata +func (s *store) Lookup(ctx context.Context, ref *corev1.RecordRef) (*corev1.RecordMeta, error) +``` + +**Workflow:** +1. **Validate input** - Fast-fail for invalid references +2. **Resolve manifest directly** - Skip redundant existence check +3. **Parse manifest annotations** - Extract rich metadata +4. **Return metadata only** - No blob download required + +### 4. Delete Operation + +Registry-aware deletion following OCI best practices: + +```go +// Delete record and cleanup tags +func (s *store) Delete(ctx context.Context, ref *corev1.RecordRef) error +``` + +**Registry-Aware Workflow:** + +#### Local OCI Store: +1. **Clean up discovery tags** - Remove all associated tags +2. **Delete manifest** - Remove manifest descriptor +3. **Delete blob explicitly** - Full cleanup (we have filesystem control) + +#### Remote Registry: +1. **Best-effort tag cleanup** - Many registries don't support tag deletion +2. **Delete manifest** - Usually supported via OCI API +3. **Skip blob deletion** - Let registry garbage collection handle cleanup + +## Shared Helper Functions + +The implementation uses shared helper functions to eliminate code duplication: + +### Internal Helpers (`internal.go`) + +```go +// Shared manifest operations (used by Lookup and Pull) +func (s *store) fetchAndParseManifest(ctx context.Context, cid string) (*ocispec.Manifest, *ocispec.Descriptor, error) + +// Shared input validation (used by Lookup, Pull, Delete) +func validateRecordRef(ref *corev1.RecordRef) error + +// Local blob deletion (used by Delete for local stores) +func (s *store) deleteBlobForLocalStore(ctx context.Context, cid string, store *oci.Store) error +``` + +**Benefits:** +- **DRY principle** - Eliminates code duplication +- **Consistent behavior** - Same validation and error handling patterns +- **Easier maintenance** - Single place to modify shared logic + +## CID Utility Package (`utils/cid/`) + +Centralized CID operations with structured error handling: + +```go +// Convert OCI digest to CID (used in Push) +func ConvertDigestToCID(digest ocidigest.Digest) (string, error) + +// Convert CID to OCI digest (used in Delete) +func ConvertCIDToDigest(cidString string) (ocidigest.Digest, error) + +// Calculate digest from bytes (fallback utility) +func CalculateDigest(data []byte) (ocidigest.Digest, error) +``` + +**Features:** +- **Structured errors** - Custom error types with detailed context +- **Comprehensive validation** - Algorithm and format checking +- **Round-trip consistency** - Guaranteed CID ↔ Digest conversion +- **Performance optimized** - Efficient hash operations + +## Annotations System + +The system uses a streamlined annotation approach focused on manifest annotations: + +### Manifest Annotations + +Rich metadata stored in OCI manifest for discovery and filtering: + +```go +// Example manifest annotations +annotations := map[string]string{ + "org.agntcy.dir/type": "record", + "org.agntcy.dir/cid": "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi", + "org.agntcy.dir/name": "aws-ec2-agent", + "org.agntcy.dir/version": "1.2.0", + "org.agntcy.dir/description": "AWS EC2 management agent", + "org.agntcy.dir/oasf-version": "v0.5.0", + "org.agntcy.dir/schema-version": "v0.5.0", + "org.agntcy.dir/created-at": "2024-01-15T10:30:00Z", + "org.agntcy.dir/authors": "dev-team,ops-team", + "org.agntcy.dir/skills": "ec2-management,auto-scaling", + "org.agntcy.dir/locator-types": "docker,helm", + "org.agntcy.dir/extension-names": "monitoring,security", + "org.agntcy.dir/signed": "true", + "org.agntcy.dir/signature-algorithm": "cosign", + "org.agntcy.dir/signed-at": "2024-01-15T10:35:00Z", + "org.agntcy.dir/custom.team": "platform", + "org.agntcy.dir/custom.project": "cloud-automation", +} +``` + +### Annotation Categories + +| Category | Purpose | Examples | +|----------|---------|----------| +| **Core Identity** | Basic record information | `name`, `version`, `description`, `cid` | +| **Lifecycle** | Versioning and timestamps | `schema-version`, `created-at`, `authors` | +| **Capability Discovery** | Functional metadata | `skills`, `locator-types`, `extension-names` | +| **Security** | Integrity and verification | `signed`, `signature-algorithm`, `signed-at` | +| **Custom** | User-defined metadata | `custom.team`, `custom.project`, `custom.environment` | + +## Tag Generation System + +The tag generation system creates multiple discovery tags for enhanced browsability and filtering: + +### Tag Strategy Configuration + +```go +type TagStrategy struct { + EnableNameTags bool // Name-based tags (true) + EnableCapabilityTags bool // Skill/extension tags (true) + EnableInfrastructureTags bool // Deployment tags (true) + EnableTeamTags bool // Organization tags (true) + EnableContentAddressable bool // CID tag (true) + MaxTagsPerRecord int // Tag limit (20) +} +``` + +### Tag Categories and Examples + +#### 1. Content-Addressable Tags +Primary identifier for exact record lookup (calculated from ORAS digest): +``` +bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi +``` + +#### 2. Name-Based Tags +For human-friendly browsing: +``` +aws-ec2-agent +aws-ec2-agent:1.2.0 +aws-ec2-agent:latest +``` + +#### 3. Capability-Based Tags +For functional discovery: +``` +skill.ec2-management +skill.auto-scaling +ext.monitoring +ext.security +``` + +#### 4. Infrastructure Tags +For deployment discovery: +``` +deploy.docker +deploy.helm +deploy.kubernetes +``` + +#### 5. Team-Based Tags +For organizational filtering: +``` +team.platform +org.acme-corp +project.cloud-automation +``` + +### Tag Normalization + +All tags are normalized for OCI compliance: + +```go +// Input: "My Agent/v1.0@Company" +// Output: "my-agent.v1.0_company" + +// Rules: +// - Lowercase conversion +// - Spaces → hyphens (-) +// - Path separators (/) → dots (.) +// - Invalid chars → underscores (_) +// - Must start with [a-zA-Z0-9_] +// - Max 128 characters +// - No trailing separators +``` + +### Example Tag Generation + +For an AWS EC2 management agent with ORAS-calculated CID: + +```go +record := corev1.New(&typesv1alpha0.Record{ + Name: "aws-ec2-agent", + Version: "1.2.0", + Skills: []*typesv1alpha0.Skill{ + {Name: "ec2-management"}, + {Name: "auto-scaling"}, + }, + Locators: []*typesv1alpha0.Locator{ + {Type: "docker"}, + {Type: "helm"}, + }, + Extensions: []*typesv1alpha0.Extension{ + {Name: "monitoring"}, + }, + Annotations: map[string]string{ + "team": "platform", + "project": "cloud-automation", + }, +}) + +// Generated tags (CID calculated from ORAS digest): +tags := []string{ + "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi", // CID from ORAS + "aws-ec2-agent", // Name + "aws-ec2-agent:1.2.0", // Name + version + "aws-ec2-agent:latest", // Name + latest + "skill.ec2-management", // Capability + "skill.auto-scaling", // Capability + "ext.monitoring", // Extension + "deploy.docker", // Infrastructure + "deploy.helm", // Infrastructure + "team.platform", // Team + "project.cloud-automation", // Project +} +``` + +## OASF Version Support + +The system supports multiple OASF versions with automatic detection: + +| OASF Version | API Version | Features | +|--------------|-------------|----------| +| **v0.3.1** | `objects/v1` | Basic agents with hierarchical skills (`category/class`) | +| **v0.4.0** | `objects/v2` | Agent records with simple skill names | +| **v0.5.0** | `objects/v3` | Full records with enhanced metadata | + +### Version-Specific Examples + +#### OASF v0.3.1 (objects/v1) +```go +// Skills use hierarchical format +skills := []*typesv1alpha0.Skill{ + {CategoryName: stringPtr("nlp"), ClassName: stringPtr("processing")}, + {CategoryName: stringPtr("ml"), ClassName: stringPtr("inference")}, +} +// Generates tags: skill.nlp.processing, skill.ml.inference +``` + +#### OASF v0.5.0 (objects/v3) +```go +// Skills use simple names +skills := []*typesv1alpha0.Skill{ + {Name: "natural-language-processing"}, + {Name: "machine-learning"}, +} +// Generates tags: skill.natural-language-processing, skill.machine-learning +``` + +## Configuration + +### Local Storage +```go +cfg := ociconfig.Config{ + LocalDir: "/var/lib/agents/oci", + CacheDir: "/var/cache/agents", // Optional +} +``` + +### Remote Registry +```go +cfg := ociconfig.Config{ + RegistryAddress: "registry.example.com", + RepositoryName: "agents", + Username: "user", + Password: "pass", + Insecure: false, + CacheDir: "/var/cache/agents", // Optional +} +``` + +### Registry Authentication +Supports multiple authentication methods: +- **Username/Password** - Basic auth +- **Access Token** - Bearer token +- **Refresh Token** - OAuth refresh +- **Registry Credentials** - Docker config + +## Storage Features + +### Content Addressability +- **ORAS-based CID calculation** - CIDs derived from ORAS digest operations +- **Integrity verification** - Automatic content validation +- **Deduplication** - Identical OASF content stored once + +### Rich Metadata +- **Manifest annotations** - Searchable metadata stored in OCI manifests +- **Version tracking** - Schema evolution support +- **Custom annotations** - User-defined metadata +- **CID in annotations** - Direct CID storage for discovery + +### Discovery & Browsability +- **Multiple tag strategies** - Enhanced discoverability +- **Filtering capabilities** - Metadata-based queries +- **Hierarchical organization** - Team/project/capability grouping + +### Performance +- **Optimized network operations** - Minimal redundant calls +- **Optional caching** - Local cache for remote registries +- **Shared helper functions** - Eliminated code duplication +- **Registry-aware operations** - Optimized for local vs remote storage + +## Error Handling + +The system provides comprehensive error handling with structured errors and best-effort operations: + +### Structured CID Errors +```go +// CID utility errors with detailed context +&Error{ + Type: ErrorTypeInvalidCID, + Message: "failed to decode CID", + Details: map[string]interface{}{"cid": cidString, "error": err.Error()}, +} +``` + +### gRPC Status Codes +```go +// Common error scenarios +status.Error(codes.InvalidArgument, "record reference cannot be nil") +status.Error(codes.NotFound, "record not found: ") +status.Error(codes.Internal, "failed to push record bytes: ") +``` + +### Best-Effort Operations +```go +// Delete operations continue despite partial failures +var errors []string +if err := deleteManifest(); err != nil { + errors = append(errors, fmt.Sprintf("manifest delete: %v", err)) + // Continue with cleanup +} +``` + +## Testing + +The package includes comprehensive tests covering: +- **CID utility functions** - Round-trip conversion, error cases +- **Shared helper functions** - Manifest parsing, validation +- **Annotation extraction** - All OASF versions +- **Tag generation** - All tag strategies +- **Tag normalization** - OCI compliance +- **Workflow operations** - Push/Pull/Lookup/Delete +- **Registry-aware deletion** - Local vs remote behavior +- **Error scenarios** - Validation and edge cases + +Run tests: +```bash +go test ./server/store/oci/... +go test ./utils/cid/... +``` + +## Dependencies + +### Core Dependencies +- **`oras.land/oras-go/v2`** - OCI registry operations and native digest calculation +- **`github.com/opencontainers/image-spec`** - OCI specifications +- **`github.com/agntcy/dir/utils/cid`** - Centralized CID utilities +- **`github.com/ipfs/go-cid`** - Content addressing (via CID utils) +- **`github.com/multiformats/go-multihash`** - Hash format support (via CID utils) + +### Registry Support +- **OCI Distribution Spec** - Standard OCI registries +- **Docker Registry V2** - Docker Hub, Harbor, etc. +- **Local OCI Layout** - Local filesystem storage +- **Cloud Registries** - AWS ECR, GCP GCR, Azure ACR + +## Best Practices + +### Record Design +1. **Use descriptive names** - Enhance discoverability +2. **Include rich metadata** - Skills, extensions, locators +3. **Add custom annotations** - Team, project, environment +4. **Sign records** - Enable integrity verification + +### Tag Strategy +1. **Enable all tag types** - Maximize discoverability +2. **Use consistent naming** - Follow organizational conventions +3. **Limit custom tags** - Prevent tag explosion +4. **Consider tag namespacing** - Use prefixes for organization + +### Storage Configuration +1. **Use caching** - Improve performance for remote registries +2. **Configure authentication** - Secure access control +3. **Choose appropriate deletion strategy** - Understand local vs remote behavior +4. **Monitor storage usage** - Track registry size and costs +5. **Backup strategies** - Ensure data resilience + +### Development Guidelines +1. **Use shared helpers** - Leverage `fetchAndParseManifest`, `validateRecordRef` +2. **Follow error handling patterns** - Use structured errors with context +3. **Leverage CID utilities** - Use `utils/cid/` package for all CID operations 4. **Consider registry differences** - Design for both local and remote scenarios \ No newline at end of file diff --git a/server/store/oci/annotations.go b/server/store/oci/annotations.go index 27767881e..8ff4e94e5 100644 --- a/server/store/oci/annotations.go +++ b/server/store/oci/annotations.go @@ -1,242 +1,242 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package oci - -import ( - "strconv" - "strings" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/server/types/adapters" -) - -// extractManifestAnnotations extracts manifest annotations from record using adapter pattern. -// -//nolint:cyclop // Function handles multiple annotation types with justified complexity -func extractManifestAnnotations(record *corev1.Record) map[string]string { - annotations := make(map[string]string) - - // Always set the type - annotations[manifestDirObjectTypeKey] = "record" - - // Use adapter pattern to get version-agnostic access to record data - adapter := adapters.NewRecordAdapter(record) - - recordData, err := adapter.GetRecordData() - if err != nil { - // Return minimal annotations if no valid data - return annotations - } - - // Add version details - annotations[ManifestKeyOASFVersion] = record.GetSchemaVersion() - - // Core identity fields (version-agnostic via adapter) - if name := recordData.GetName(); name != "" { - annotations[ManifestKeyName] = name - } - - if version := recordData.GetVersion(); version != "" { - annotations[ManifestKeyVersion] = version - } - - if description := recordData.GetDescription(); description != "" { - annotations[ManifestKeyDescription] = description - } - - // Lifecycle metadata - if schemaVersion := recordData.GetSchemaVersion(); schemaVersion != "" { - annotations[ManifestKeySchemaVersion] = schemaVersion - } - - if createdAt := recordData.GetCreatedAt(); createdAt != "" { - annotations[ManifestKeyCreatedAt] = createdAt - } - - if authors := recordData.GetAuthors(); len(authors) > 0 { - annotations[ManifestKeyAuthors] = strings.Join(authors, ",") - } - - // Capability discovery - extract skill names - if skills := recordData.GetSkills(); len(skills) > 0 { - skillNames := make([]string, len(skills)) - for i, skill := range skills { - skillNames[i] = skill.GetName() - } - - annotations[ManifestKeySkills] = strings.Join(skillNames, ",") - } - - // Extract locator types - if locators := recordData.GetLocators(); len(locators) > 0 { - locatorTypes := make([]string, len(locators)) - for i, locator := range locators { - locatorTypes[i] = locator.GetType() - } - - annotations[ManifestKeyLocatorTypes] = strings.Join(locatorTypes, ",") - } - - // Extract module names - if modules := recordData.GetModules(); len(modules) > 0 { - moduleNames := make([]string, len(modules)) - for i, module := range modules { - moduleNames[i] = module.GetName() - } - - annotations[ManifestKeyModuleNames] = strings.Join(moduleNames, ",") - } - - // Security metadata - if signature := recordData.GetSignature(); signature != nil { - annotations[ManifestKeySigned] = "true" - if algorithm := signature.GetAlgorithm(); algorithm != "" { - annotations[ManifestKeySignatureAlgo] = algorithm - } - - if signedAt := signature.GetSignedAt(); signedAt != "" { - annotations[ManifestKeySignedAt] = signedAt - } - } else { - annotations[ManifestKeySigned] = "false" - } - - // Versioning (v1 specific) - if previousCid := recordData.GetPreviousRecordCid(); previousCid != "" { - annotations[ManifestKeyPreviousCid] = previousCid - } - - // Custom annotations from record data -> manifest custom annotations - if customAnnotations := recordData.GetAnnotations(); len(customAnnotations) > 0 { - for key, value := range customAnnotations { - annotations[ManifestKeyCustomPrefix+key] = value - } - } - - return annotations -} - -// parseManifestAnnotations extracts structured metadata from manifest annotations. -// -//nolint:cyclop // Function handles multiple metadata extraction paths with justified complexity -func parseManifestAnnotations(annotations map[string]string) *corev1.RecordMeta { - recordMeta := &corev1.RecordMeta{ - Annotations: make(map[string]string), - } - - // Set fallback schema version first for error recovery scenarios - recordMeta.SchemaVersion = FallbackSchemaVersion - - if annotations == nil { - return recordMeta - } - - // Extract schema version from stored data (override fallback if present) - if schemaVersion := annotations[ManifestKeySchemaVersion]; schemaVersion != "" { - recordMeta.SchemaVersion = schemaVersion - } - - // Extract created time from stored data (no more empty strings!) - if createdAt := annotations[ManifestKeyCreatedAt]; createdAt != "" { - recordMeta.CreatedAt = createdAt - } - - // Copy structured metadata into annotations for easy access - // Core identity - these will be easily accessible to consumers - if name := annotations[ManifestKeyName]; name != "" { - recordMeta.Annotations[MetadataKeyName] = name - } - - if version := annotations[ManifestKeyVersion]; version != "" { - recordMeta.Annotations[MetadataKeyVersion] = version - } - - if description := annotations[ManifestKeyDescription]; description != "" { - recordMeta.Annotations[MetadataKeyDescription] = description - } - - if oasfVersion := annotations[ManifestKeyOASFVersion]; oasfVersion != "" { - recordMeta.Annotations[MetadataKeyOASFVersion] = oasfVersion - } - - // Structured lists (easily parseable by consumers) - if authors := annotations[ManifestKeyAuthors]; authors != "" { - recordMeta.Annotations[MetadataKeyAuthors] = authors // comma-separated - // Also provide parsed count for quick stats - authorList := parseCommaSeparated(authors) - recordMeta.Annotations[MetadataKeyAuthorsCount] = strconv.Itoa(len(authorList)) - } - - if skills := annotations[ManifestKeySkills]; skills != "" { - recordMeta.Annotations[MetadataKeySkills] = skills // comma-separated - skillList := parseCommaSeparated(skills) - recordMeta.Annotations[MetadataKeySkillsCount] = strconv.Itoa(len(skillList)) - } - - if locatorTypes := annotations[ManifestKeyLocatorTypes]; locatorTypes != "" { - recordMeta.Annotations[MetadataKeyLocatorTypes] = locatorTypes // comma-separated - locatorList := parseCommaSeparated(locatorTypes) - recordMeta.Annotations[MetadataKeyLocatorTypesCount] = strconv.Itoa(len(locatorList)) - } - - if moduleNames := annotations[ManifestKeyModuleNames]; moduleNames != "" { - recordMeta.Annotations[MetadataKeyModuleNames] = moduleNames // comma-separated - moduleList := parseCommaSeparated(moduleNames) - recordMeta.Annotations[MetadataKeyModuleCount] = strconv.Itoa(len(moduleList)) - } - - // Security information (structured and easily accessible) - //nolint:nestif // Nested structure needed for conditional signature metadata extraction - if signedStr := annotations[ManifestKeySigned]; signedStr != "" { - recordMeta.Annotations[MetadataKeySigned] = signedStr - - if signedStr == "true" { - if algorithm := annotations[ManifestKeySignatureAlgo]; algorithm != "" { - recordMeta.Annotations[MetadataKeySignatureAlgo] = algorithm - } - - if signedAt := annotations[ManifestKeySignedAt]; signedAt != "" { - recordMeta.Annotations[MetadataKeySignedAt] = signedAt - } - } - } - - // Versioning information - if previousCid := annotations[ManifestKeyPreviousCid]; previousCid != "" { - recordMeta.Annotations[MetadataKeyPreviousCid] = previousCid - } - - // Custom annotations (those with our custom prefix) - clean namespace - for key, value := range annotations { - if strings.HasPrefix(key, ManifestKeyCustomPrefix) { - customKey := strings.TrimPrefix(key, ManifestKeyCustomPrefix) - recordMeta.Annotations[customKey] = value - } - } - - return recordMeta -} - -// parseCommaSeparated splits comma-separated values and trims whitespace. -func parseCommaSeparated(value string) []string { - if value == "" { - return nil - } - - parts := strings.Split(value, ",") - result := make([]string, 0, len(parts)) - - for _, part := range parts { - if trimmed := strings.TrimSpace(part); trimmed != "" { - result = append(result, trimmed) - } - } - - // Return nil if result is empty after filtering - if len(result) == 0 { - return nil - } - - return result -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package oci + +import ( + "strconv" + "strings" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/server/types/adapters" +) + +// extractManifestAnnotations extracts manifest annotations from record using adapter pattern. +// +//nolint:cyclop // Function handles multiple annotation types with justified complexity +func extractManifestAnnotations(record *corev1.Record) map[string]string { + annotations := make(map[string]string) + + // Always set the type + annotations[manifestDirObjectTypeKey] = "record" + + // Use adapter pattern to get version-agnostic access to record data + adapter := adapters.NewRecordAdapter(record) + + recordData, err := adapter.GetRecordData() + if err != nil { + // Return minimal annotations if no valid data + return annotations + } + + // Add version details + annotations[ManifestKeyOASFVersion] = record.GetSchemaVersion() + + // Core identity fields (version-agnostic via adapter) + if name := recordData.GetName(); name != "" { + annotations[ManifestKeyName] = name + } + + if version := recordData.GetVersion(); version != "" { + annotations[ManifestKeyVersion] = version + } + + if description := recordData.GetDescription(); description != "" { + annotations[ManifestKeyDescription] = description + } + + // Lifecycle metadata + if schemaVersion := recordData.GetSchemaVersion(); schemaVersion != "" { + annotations[ManifestKeySchemaVersion] = schemaVersion + } + + if createdAt := recordData.GetCreatedAt(); createdAt != "" { + annotations[ManifestKeyCreatedAt] = createdAt + } + + if authors := recordData.GetAuthors(); len(authors) > 0 { + annotations[ManifestKeyAuthors] = strings.Join(authors, ",") + } + + // Capability discovery - extract skill names + if skills := recordData.GetSkills(); len(skills) > 0 { + skillNames := make([]string, len(skills)) + for i, skill := range skills { + skillNames[i] = skill.GetName() + } + + annotations[ManifestKeySkills] = strings.Join(skillNames, ",") + } + + // Extract locator types + if locators := recordData.GetLocators(); len(locators) > 0 { + locatorTypes := make([]string, len(locators)) + for i, locator := range locators { + locatorTypes[i] = locator.GetType() + } + + annotations[ManifestKeyLocatorTypes] = strings.Join(locatorTypes, ",") + } + + // Extract module names + if modules := recordData.GetModules(); len(modules) > 0 { + moduleNames := make([]string, len(modules)) + for i, module := range modules { + moduleNames[i] = module.GetName() + } + + annotations[ManifestKeyModuleNames] = strings.Join(moduleNames, ",") + } + + // Security metadata + if signature := recordData.GetSignature(); signature != nil { + annotations[ManifestKeySigned] = "true" + if algorithm := signature.GetAlgorithm(); algorithm != "" { + annotations[ManifestKeySignatureAlgo] = algorithm + } + + if signedAt := signature.GetSignedAt(); signedAt != "" { + annotations[ManifestKeySignedAt] = signedAt + } + } else { + annotations[ManifestKeySigned] = "false" + } + + // Versioning (v1 specific) + if previousCid := recordData.GetPreviousRecordCid(); previousCid != "" { + annotations[ManifestKeyPreviousCid] = previousCid + } + + // Custom annotations from record data -> manifest custom annotations + if customAnnotations := recordData.GetAnnotations(); len(customAnnotations) > 0 { + for key, value := range customAnnotations { + annotations[ManifestKeyCustomPrefix+key] = value + } + } + + return annotations +} + +// parseManifestAnnotations extracts structured metadata from manifest annotations. +// +//nolint:cyclop // Function handles multiple metadata extraction paths with justified complexity +func parseManifestAnnotations(annotations map[string]string) *corev1.RecordMeta { + recordMeta := &corev1.RecordMeta{ + Annotations: make(map[string]string), + } + + // Set fallback schema version first for error recovery scenarios + recordMeta.SchemaVersion = FallbackSchemaVersion + + if annotations == nil { + return recordMeta + } + + // Extract schema version from stored data (override fallback if present) + if schemaVersion := annotations[ManifestKeySchemaVersion]; schemaVersion != "" { + recordMeta.SchemaVersion = schemaVersion + } + + // Extract created time from stored data (no more empty strings!) + if createdAt := annotations[ManifestKeyCreatedAt]; createdAt != "" { + recordMeta.CreatedAt = createdAt + } + + // Copy structured metadata into annotations for easy access + // Core identity - these will be easily accessible to consumers + if name := annotations[ManifestKeyName]; name != "" { + recordMeta.Annotations[MetadataKeyName] = name + } + + if version := annotations[ManifestKeyVersion]; version != "" { + recordMeta.Annotations[MetadataKeyVersion] = version + } + + if description := annotations[ManifestKeyDescription]; description != "" { + recordMeta.Annotations[MetadataKeyDescription] = description + } + + if oasfVersion := annotations[ManifestKeyOASFVersion]; oasfVersion != "" { + recordMeta.Annotations[MetadataKeyOASFVersion] = oasfVersion + } + + // Structured lists (easily parseable by consumers) + if authors := annotations[ManifestKeyAuthors]; authors != "" { + recordMeta.Annotations[MetadataKeyAuthors] = authors // comma-separated + // Also provide parsed count for quick stats + authorList := parseCommaSeparated(authors) + recordMeta.Annotations[MetadataKeyAuthorsCount] = strconv.Itoa(len(authorList)) + } + + if skills := annotations[ManifestKeySkills]; skills != "" { + recordMeta.Annotations[MetadataKeySkills] = skills // comma-separated + skillList := parseCommaSeparated(skills) + recordMeta.Annotations[MetadataKeySkillsCount] = strconv.Itoa(len(skillList)) + } + + if locatorTypes := annotations[ManifestKeyLocatorTypes]; locatorTypes != "" { + recordMeta.Annotations[MetadataKeyLocatorTypes] = locatorTypes // comma-separated + locatorList := parseCommaSeparated(locatorTypes) + recordMeta.Annotations[MetadataKeyLocatorTypesCount] = strconv.Itoa(len(locatorList)) + } + + if moduleNames := annotations[ManifestKeyModuleNames]; moduleNames != "" { + recordMeta.Annotations[MetadataKeyModuleNames] = moduleNames // comma-separated + moduleList := parseCommaSeparated(moduleNames) + recordMeta.Annotations[MetadataKeyModuleCount] = strconv.Itoa(len(moduleList)) + } + + // Security information (structured and easily accessible) + //nolint:nestif // Nested structure needed for conditional signature metadata extraction + if signedStr := annotations[ManifestKeySigned]; signedStr != "" { + recordMeta.Annotations[MetadataKeySigned] = signedStr + + if signedStr == "true" { + if algorithm := annotations[ManifestKeySignatureAlgo]; algorithm != "" { + recordMeta.Annotations[MetadataKeySignatureAlgo] = algorithm + } + + if signedAt := annotations[ManifestKeySignedAt]; signedAt != "" { + recordMeta.Annotations[MetadataKeySignedAt] = signedAt + } + } + } + + // Versioning information + if previousCid := annotations[ManifestKeyPreviousCid]; previousCid != "" { + recordMeta.Annotations[MetadataKeyPreviousCid] = previousCid + } + + // Custom annotations (those with our custom prefix) - clean namespace + for key, value := range annotations { + if strings.HasPrefix(key, ManifestKeyCustomPrefix) { + customKey := strings.TrimPrefix(key, ManifestKeyCustomPrefix) + recordMeta.Annotations[customKey] = value + } + } + + return recordMeta +} + +// parseCommaSeparated splits comma-separated values and trims whitespace. +func parseCommaSeparated(value string) []string { + if value == "" { + return nil + } + + parts := strings.Split(value, ",") + result := make([]string, 0, len(parts)) + + for _, part := range parts { + if trimmed := strings.TrimSpace(part); trimmed != "" { + result = append(result, trimmed) + } + } + + // Return nil if result is empty after filtering + if len(result) == 0 { + return nil + } + + return result +} diff --git a/server/store/oci/annotations_test.go b/server/store/oci/annotations_test.go index b52772547..d7d471b14 100644 --- a/server/store/oci/annotations_test.go +++ b/server/store/oci/annotations_test.go @@ -1,409 +1,409 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package oci - -import ( - "testing" - - typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" - typesv1alpha1 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha1" - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/stretchr/testify/assert" -) - -func TestParseCommaSeparated(t *testing.T) { - tests := []struct { - name string - input string - expected []string - }{ - { - name: "Empty string", - input: "", - expected: nil, - }, - { - name: "Single value", - input: "value1", - expected: []string{"value1"}, - }, - { - name: "Multiple values", - input: "value1,value2,value3", - expected: []string{"value1", "value2", "value3"}, - }, - { - name: "Values with spaces", - input: "value1, value2 , value3", - expected: []string{"value1", "value2", "value3"}, - }, - { - name: "Empty values filtered out", - input: "value1,,value2, ,value3", - expected: []string{"value1", "value2", "value3"}, - }, - { - name: "Only commas and spaces", - input: ", , ,", - expected: nil, - }, - { - name: "Trailing and leading commas", - input: ",value1,value2,", - expected: []string{"value1", "value2"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := parseCommaSeparated(tt.input) - assert.Equal(t, tt.expected, result) - }) - } -} - -func TestExtractManifestAnnotations(t *testing.T) { - // NOTE: This test covers different OASF versions with varying skill name formats: - // - V1 (objects.v1): Skills use "categoryName/className" hierarchical format - // - V2 (objects.v2): Skills use simple name strings - // - V3 (objects.v3): Skills use simple name strings - tests := []struct { - name string - record *corev1.Record - expected map[string]string - contains map[string]string // Keys that should be present - }{ - { - name: "Nil record", - record: nil, - expected: map[string]string{ - manifestDirObjectTypeKey: "record", - }, - }, - { - name: "V1 basic record", - record: corev1.New(&typesv1alpha0.Record{ - Name: "test-agent", - Version: "1.0.0", - Description: "Test agent description", - SchemaVersion: "v0.3.1", - CreatedAt: "2023-01-01T00:00:00Z", - Authors: []string{"author1", "author2"}, - }), - contains: map[string]string{ - manifestDirObjectTypeKey: "record", - ManifestKeyOASFVersion: "v0.3.1", - ManifestKeyName: "test-agent", - ManifestKeyVersion: "1.0.0", - ManifestKeyDescription: "Test agent description", - ManifestKeySchemaVersion: "v0.3.1", - ManifestKeyCreatedAt: "2023-01-01T00:00:00Z", - ManifestKeyAuthors: "author1,author2", - ManifestKeySigned: "false", - }, - }, - { - name: "V1 with skills and extensions", - record: corev1.New(&typesv1alpha0.Record{ - Name: "skill-agent", - Version: "2.0.0", - SchemaVersion: "v0.3.1", - Skills: []*typesv1alpha0.Skill{ - {CategoryName: stringPtr("nlp"), ClassName: stringPtr("processing")}, - {CategoryName: stringPtr("ml"), ClassName: stringPtr("inference")}, - }, - Locators: []*typesv1alpha0.Locator{ - {Type: "docker"}, - {Type: "helm"}, - }, - Extensions: []*typesv1alpha0.Extension{ - {Name: "security"}, - {Name: "monitoring"}, - }, - Annotations: map[string]string{ - "custom1": "value1", - "custom2": "value2", - }, - }), - contains: map[string]string{ - ManifestKeyName: "skill-agent", - ManifestKeyVersion: "2.0.0", - // NOTE: V1 skills use "categoryName/className" format, unlike V2/V3 which use simple names - ManifestKeySkills: "nlp/processing,ml/inference", - ManifestKeyLocatorTypes: "docker,helm", - ManifestKeyModuleNames: "security,monitoring", - ManifestKeyCustomPrefix + "custom1": "value1", - ManifestKeyCustomPrefix + "custom2": "value2", - }, - }, - { - name: "V1 basic record", - record: corev1.New(&typesv1alpha1.Record{ - Name: "test-record-v2", - Version: "2.0.0", - SchemaVersion: "0.7.0", - Description: "Test record v2 description", - Skills: []*typesv1alpha1.Skill{ - {Name: "nlp-skill"}, - }, - PreviousRecordCid: stringPtr("QmPreviousCID123"), - }), - contains: map[string]string{ - ManifestKeyOASFVersion: "0.7.0", - ManifestKeyName: "test-record-v2", - ManifestKeyVersion: "2.0.0", - ManifestKeyDescription: "Test record v2 description", - // NOTE: V3 skills use simple names, unlike V1 which uses "categoryName/className" - ManifestKeySkills: "nlp-skill", - ManifestKeyPreviousCid: "QmPreviousCID123", - ManifestKeySigned: "false", - }, - }, - { - name: "Record with signature", - record: corev1.New(&typesv1alpha0.Record{ - Name: "signed-agent", - Version: "1.0.0", - SchemaVersion: "v0.3.1", - Signature: &typesv1alpha0.Signature{ - Algorithm: "ed25519", - SignedAt: "2023-01-01T12:00:00Z", - Signature: "signature-bytes", - }, - }), - contains: map[string]string{ - ManifestKeyName: "signed-agent", - ManifestKeySigned: "true", - ManifestKeySignatureAlgo: "ed25519", - ManifestKeySignedAt: "2023-01-01T12:00:00Z", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := extractManifestAnnotations(tt.record) - - // Check that all expected keys are present with correct values - for key, expectedValue := range tt.contains { - assert.Equal(t, expectedValue, result[key], "Key %s should have correct value", key) - } - - // Always should have the type key - assert.Equal(t, "record", result[manifestDirObjectTypeKey]) - }) - } -} - -func TestParseManifestAnnotations(t *testing.T) { - tests := []struct { - name string - annotations map[string]string - expected *corev1.RecordMeta - }{ - { - name: "Nil annotations", - annotations: nil, - expected: &corev1.RecordMeta{ - SchemaVersion: FallbackSchemaVersion, - Annotations: make(map[string]string), - }, - }, - { - name: "Empty annotations", - annotations: map[string]string{}, - expected: &corev1.RecordMeta{ - SchemaVersion: FallbackSchemaVersion, - Annotations: make(map[string]string), - }, - }, - { - name: "Basic record metadata", - annotations: map[string]string{ - ManifestKeySchemaVersion: "v1", - ManifestKeyCreatedAt: "2023-01-01T00:00:00Z", - ManifestKeyName: "test-agent", - ManifestKeyVersion: "1.0.0", - ManifestKeyDescription: "Test description", - ManifestKeyOASFVersion: "v1", - }, - expected: &corev1.RecordMeta{ - SchemaVersion: "v1", - CreatedAt: "2023-01-01T00:00:00Z", - Annotations: map[string]string{ - MetadataKeyName: "test-agent", - MetadataKeyVersion: "1.0.0", - MetadataKeyDescription: "Test description", - MetadataKeyOASFVersion: "v1", - }, - }, - }, - { - name: "Record with skills and counts", - annotations: map[string]string{ - ManifestKeyName: "skill-agent", - ManifestKeySkills: "nlp,ml,vision", - ManifestKeyAuthors: "author1,author2", - }, - expected: &corev1.RecordMeta{ - SchemaVersion: FallbackSchemaVersion, - Annotations: map[string]string{ - MetadataKeyName: "skill-agent", - MetadataKeySkills: "nlp,ml,vision", - MetadataKeySkillsCount: "3", - MetadataKeyAuthors: "author1,author2", - MetadataKeyAuthorsCount: "2", - }, - }, - }, - { - name: "Record with security information", - annotations: map[string]string{ - ManifestKeyName: "secure-agent", - ManifestKeySigned: "true", - ManifestKeySignatureAlgo: "ed25519", - ManifestKeySignedAt: "2023-01-01T12:00:00Z", - }, - expected: &corev1.RecordMeta{ - SchemaVersion: FallbackSchemaVersion, - Annotations: map[string]string{ - MetadataKeyName: "secure-agent", - MetadataKeySigned: "true", - MetadataKeySignatureAlgo: "ed25519", - MetadataKeySignedAt: "2023-01-01T12:00:00Z", - }, - }, - }, - { - name: "Record with custom annotations", - annotations: map[string]string{ - ManifestKeyName: "custom-agent", - ManifestKeyCustomPrefix + "custom1": "value1", - ManifestKeyCustomPrefix + "custom2": "value2", - }, - expected: &corev1.RecordMeta{ - SchemaVersion: FallbackSchemaVersion, - Annotations: map[string]string{ - MetadataKeyName: "custom-agent", - "custom1": "value1", - "custom2": "value2", - }, - }, - }, - { - name: "Record with all metadata types", - annotations: map[string]string{ - ManifestKeyName: "full-agent", - ManifestKeySkills: "nlp,ml", - ManifestKeyLocatorTypes: "docker,helm,k8s", - ManifestKeyModuleNames: "security,monitoring", - ManifestKeyPreviousCid: "QmPrevious123", - }, - expected: &corev1.RecordMeta{ - SchemaVersion: FallbackSchemaVersion, - Annotations: map[string]string{ - MetadataKeyName: "full-agent", - MetadataKeySkills: "nlp,ml", - MetadataKeySkillsCount: "2", - MetadataKeyLocatorTypes: "docker,helm,k8s", - MetadataKeyLocatorTypesCount: "3", - MetadataKeyModuleNames: "security,monitoring", - MetadataKeyModuleCount: "2", - MetadataKeyPreviousCid: "QmPrevious123", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := parseManifestAnnotations(tt.annotations) - - assert.Equal(t, tt.expected.GetSchemaVersion(), result.GetSchemaVersion()) - assert.Equal(t, tt.expected.GetCreatedAt(), result.GetCreatedAt()) - - // Check all expected annotations - for key, expectedValue := range tt.expected.GetAnnotations() { - assert.Equal(t, expectedValue, result.GetAnnotations()[key], "Annotation key %s should have correct value", key) - } - - // Ensure no unexpected annotations (allow for additional count fields) - for key := range result.GetAnnotations() { - if _, expected := tt.expected.GetAnnotations()[key]; !expected { - // Allow count fields that are auto-generated - assert.True(t, - key == MetadataKeySkillsCount || - key == MetadataKeyAuthorsCount || - key == MetadataKeyLocatorTypesCount || - key == MetadataKeyModuleCount, - "Unexpected annotation key: %s", key) - } - } - }) - } -} - -func TestExtractManifestAnnotations_EdgeCases(t *testing.T) { - t.Run("Record with empty data", func(t *testing.T) { - record := corev1.New(&typesv1alpha0.Record{ - SchemaVersion: "v0.3.1", - }) - - result := extractManifestAnnotations(record) - - // Should still have basic annotations - assert.Equal(t, "record", result[manifestDirObjectTypeKey]) - assert.Equal(t, "v0.3.1", result[ManifestKeyOASFVersion]) - assert.Equal(t, "false", result[ManifestKeySigned]) - }) - - t.Run("Record with nil adapter data", func(t *testing.T) { - record := &corev1.Record{} // No data field set - - result := extractManifestAnnotations(record) - - // Should return minimal annotations - assert.Equal(t, "record", result[manifestDirObjectTypeKey]) - assert.Len(t, result, 1) // Only the type key - }) -} - -func TestRoundTripConversion(t *testing.T) { - // Test that we can extract manifest annotations and parse them back correctly - // NOTE: This test uses V1 format where skills have "categoryName/className" structure - originalRecord := corev1.New(&typesv1alpha0.Record{ - Name: "roundtrip-agent", - Version: "1.0.0", - Description: "Test roundtrip conversion", - SchemaVersion: "v0.3.1", - CreatedAt: "2023-01-01T00:00:00Z", - Authors: []string{"author1", "author2"}, - Skills: []*typesv1alpha0.Skill{ - {CategoryName: stringPtr("nlp"), ClassName: stringPtr("processing")}, - }, - Annotations: map[string]string{ - "custom": "value", - }, - }) - - // Extract annotations - manifestAnnotations := extractManifestAnnotations(originalRecord) - - // Parse them back - recordMeta := parseManifestAnnotations(manifestAnnotations) - - // Verify round-trip conversion - assert.Equal(t, "v0.3.1", recordMeta.GetSchemaVersion()) - assert.Equal(t, "2023-01-01T00:00:00Z", recordMeta.GetCreatedAt()) - assert.Equal(t, "roundtrip-agent", recordMeta.GetAnnotations()[MetadataKeyName]) - assert.Equal(t, "1.0.0", recordMeta.GetAnnotations()[MetadataKeyVersion]) - assert.Equal(t, "Test roundtrip conversion", recordMeta.GetAnnotations()[MetadataKeyDescription]) - assert.Equal(t, "v0.3.1", recordMeta.GetAnnotations()[MetadataKeyOASFVersion]) - assert.Equal(t, "author1,author2", recordMeta.GetAnnotations()[MetadataKeyAuthors]) - assert.Equal(t, "2", recordMeta.GetAnnotations()[MetadataKeyAuthorsCount]) - // NOTE: V1 skills return "categoryName/className" format, not just className - assert.Equal(t, "nlp/processing", recordMeta.GetAnnotations()[MetadataKeySkills]) - assert.Equal(t, "1", recordMeta.GetAnnotations()[MetadataKeySkillsCount]) - assert.Equal(t, "value", recordMeta.GetAnnotations()["custom"]) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package oci + +import ( + "testing" + + typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" + typesv1alpha1 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha1" + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/stretchr/testify/assert" +) + +func TestParseCommaSeparated(t *testing.T) { + tests := []struct { + name string + input string + expected []string + }{ + { + name: "Empty string", + input: "", + expected: nil, + }, + { + name: "Single value", + input: "value1", + expected: []string{"value1"}, + }, + { + name: "Multiple values", + input: "value1,value2,value3", + expected: []string{"value1", "value2", "value3"}, + }, + { + name: "Values with spaces", + input: "value1, value2 , value3", + expected: []string{"value1", "value2", "value3"}, + }, + { + name: "Empty values filtered out", + input: "value1,,value2, ,value3", + expected: []string{"value1", "value2", "value3"}, + }, + { + name: "Only commas and spaces", + input: ", , ,", + expected: nil, + }, + { + name: "Trailing and leading commas", + input: ",value1,value2,", + expected: []string{"value1", "value2"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := parseCommaSeparated(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestExtractManifestAnnotations(t *testing.T) { + // NOTE: This test covers different OASF versions with varying skill name formats: + // - V1 (objects.v1): Skills use "categoryName/className" hierarchical format + // - V2 (objects.v2): Skills use simple name strings + // - V3 (objects.v3): Skills use simple name strings + tests := []struct { + name string + record *corev1.Record + expected map[string]string + contains map[string]string // Keys that should be present + }{ + { + name: "Nil record", + record: nil, + expected: map[string]string{ + manifestDirObjectTypeKey: "record", + }, + }, + { + name: "V1 basic record", + record: corev1.New(&typesv1alpha0.Record{ + Name: "test-agent", + Version: "1.0.0", + Description: "Test agent description", + SchemaVersion: "v0.3.1", + CreatedAt: "2023-01-01T00:00:00Z", + Authors: []string{"author1", "author2"}, + }), + contains: map[string]string{ + manifestDirObjectTypeKey: "record", + ManifestKeyOASFVersion: "v0.3.1", + ManifestKeyName: "test-agent", + ManifestKeyVersion: "1.0.0", + ManifestKeyDescription: "Test agent description", + ManifestKeySchemaVersion: "v0.3.1", + ManifestKeyCreatedAt: "2023-01-01T00:00:00Z", + ManifestKeyAuthors: "author1,author2", + ManifestKeySigned: "false", + }, + }, + { + name: "V1 with skills and extensions", + record: corev1.New(&typesv1alpha0.Record{ + Name: "skill-agent", + Version: "2.0.0", + SchemaVersion: "v0.3.1", + Skills: []*typesv1alpha0.Skill{ + {CategoryName: stringPtr("nlp"), ClassName: stringPtr("processing")}, + {CategoryName: stringPtr("ml"), ClassName: stringPtr("inference")}, + }, + Locators: []*typesv1alpha0.Locator{ + {Type: "docker"}, + {Type: "helm"}, + }, + Extensions: []*typesv1alpha0.Extension{ + {Name: "security"}, + {Name: "monitoring"}, + }, + Annotations: map[string]string{ + "custom1": "value1", + "custom2": "value2", + }, + }), + contains: map[string]string{ + ManifestKeyName: "skill-agent", + ManifestKeyVersion: "2.0.0", + // NOTE: V1 skills use "categoryName/className" format, unlike V2/V3 which use simple names + ManifestKeySkills: "nlp/processing,ml/inference", + ManifestKeyLocatorTypes: "docker,helm", + ManifestKeyModuleNames: "security,monitoring", + ManifestKeyCustomPrefix + "custom1": "value1", + ManifestKeyCustomPrefix + "custom2": "value2", + }, + }, + { + name: "V1 basic record", + record: corev1.New(&typesv1alpha1.Record{ + Name: "test-record-v2", + Version: "2.0.0", + SchemaVersion: "0.7.0", + Description: "Test record v2 description", + Skills: []*typesv1alpha1.Skill{ + {Name: "nlp-skill"}, + }, + PreviousRecordCid: stringPtr("QmPreviousCID123"), + }), + contains: map[string]string{ + ManifestKeyOASFVersion: "0.7.0", + ManifestKeyName: "test-record-v2", + ManifestKeyVersion: "2.0.0", + ManifestKeyDescription: "Test record v2 description", + // NOTE: V3 skills use simple names, unlike V1 which uses "categoryName/className" + ManifestKeySkills: "nlp-skill", + ManifestKeyPreviousCid: "QmPreviousCID123", + ManifestKeySigned: "false", + }, + }, + { + name: "Record with signature", + record: corev1.New(&typesv1alpha0.Record{ + Name: "signed-agent", + Version: "1.0.0", + SchemaVersion: "v0.3.1", + Signature: &typesv1alpha0.Signature{ + Algorithm: "ed25519", + SignedAt: "2023-01-01T12:00:00Z", + Signature: "signature-bytes", + }, + }), + contains: map[string]string{ + ManifestKeyName: "signed-agent", + ManifestKeySigned: "true", + ManifestKeySignatureAlgo: "ed25519", + ManifestKeySignedAt: "2023-01-01T12:00:00Z", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := extractManifestAnnotations(tt.record) + + // Check that all expected keys are present with correct values + for key, expectedValue := range tt.contains { + assert.Equal(t, expectedValue, result[key], "Key %s should have correct value", key) + } + + // Always should have the type key + assert.Equal(t, "record", result[manifestDirObjectTypeKey]) + }) + } +} + +func TestParseManifestAnnotations(t *testing.T) { + tests := []struct { + name string + annotations map[string]string + expected *corev1.RecordMeta + }{ + { + name: "Nil annotations", + annotations: nil, + expected: &corev1.RecordMeta{ + SchemaVersion: FallbackSchemaVersion, + Annotations: make(map[string]string), + }, + }, + { + name: "Empty annotations", + annotations: map[string]string{}, + expected: &corev1.RecordMeta{ + SchemaVersion: FallbackSchemaVersion, + Annotations: make(map[string]string), + }, + }, + { + name: "Basic record metadata", + annotations: map[string]string{ + ManifestKeySchemaVersion: "v1", + ManifestKeyCreatedAt: "2023-01-01T00:00:00Z", + ManifestKeyName: "test-agent", + ManifestKeyVersion: "1.0.0", + ManifestKeyDescription: "Test description", + ManifestKeyOASFVersion: "v1", + }, + expected: &corev1.RecordMeta{ + SchemaVersion: "v1", + CreatedAt: "2023-01-01T00:00:00Z", + Annotations: map[string]string{ + MetadataKeyName: "test-agent", + MetadataKeyVersion: "1.0.0", + MetadataKeyDescription: "Test description", + MetadataKeyOASFVersion: "v1", + }, + }, + }, + { + name: "Record with skills and counts", + annotations: map[string]string{ + ManifestKeyName: "skill-agent", + ManifestKeySkills: "nlp,ml,vision", + ManifestKeyAuthors: "author1,author2", + }, + expected: &corev1.RecordMeta{ + SchemaVersion: FallbackSchemaVersion, + Annotations: map[string]string{ + MetadataKeyName: "skill-agent", + MetadataKeySkills: "nlp,ml,vision", + MetadataKeySkillsCount: "3", + MetadataKeyAuthors: "author1,author2", + MetadataKeyAuthorsCount: "2", + }, + }, + }, + { + name: "Record with security information", + annotations: map[string]string{ + ManifestKeyName: "secure-agent", + ManifestKeySigned: "true", + ManifestKeySignatureAlgo: "ed25519", + ManifestKeySignedAt: "2023-01-01T12:00:00Z", + }, + expected: &corev1.RecordMeta{ + SchemaVersion: FallbackSchemaVersion, + Annotations: map[string]string{ + MetadataKeyName: "secure-agent", + MetadataKeySigned: "true", + MetadataKeySignatureAlgo: "ed25519", + MetadataKeySignedAt: "2023-01-01T12:00:00Z", + }, + }, + }, + { + name: "Record with custom annotations", + annotations: map[string]string{ + ManifestKeyName: "custom-agent", + ManifestKeyCustomPrefix + "custom1": "value1", + ManifestKeyCustomPrefix + "custom2": "value2", + }, + expected: &corev1.RecordMeta{ + SchemaVersion: FallbackSchemaVersion, + Annotations: map[string]string{ + MetadataKeyName: "custom-agent", + "custom1": "value1", + "custom2": "value2", + }, + }, + }, + { + name: "Record with all metadata types", + annotations: map[string]string{ + ManifestKeyName: "full-agent", + ManifestKeySkills: "nlp,ml", + ManifestKeyLocatorTypes: "docker,helm,k8s", + ManifestKeyModuleNames: "security,monitoring", + ManifestKeyPreviousCid: "QmPrevious123", + }, + expected: &corev1.RecordMeta{ + SchemaVersion: FallbackSchemaVersion, + Annotations: map[string]string{ + MetadataKeyName: "full-agent", + MetadataKeySkills: "nlp,ml", + MetadataKeySkillsCount: "2", + MetadataKeyLocatorTypes: "docker,helm,k8s", + MetadataKeyLocatorTypesCount: "3", + MetadataKeyModuleNames: "security,monitoring", + MetadataKeyModuleCount: "2", + MetadataKeyPreviousCid: "QmPrevious123", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := parseManifestAnnotations(tt.annotations) + + assert.Equal(t, tt.expected.GetSchemaVersion(), result.GetSchemaVersion()) + assert.Equal(t, tt.expected.GetCreatedAt(), result.GetCreatedAt()) + + // Check all expected annotations + for key, expectedValue := range tt.expected.GetAnnotations() { + assert.Equal(t, expectedValue, result.GetAnnotations()[key], "Annotation key %s should have correct value", key) + } + + // Ensure no unexpected annotations (allow for additional count fields) + for key := range result.GetAnnotations() { + if _, expected := tt.expected.GetAnnotations()[key]; !expected { + // Allow count fields that are auto-generated + assert.True(t, + key == MetadataKeySkillsCount || + key == MetadataKeyAuthorsCount || + key == MetadataKeyLocatorTypesCount || + key == MetadataKeyModuleCount, + "Unexpected annotation key: %s", key) + } + } + }) + } +} + +func TestExtractManifestAnnotations_EdgeCases(t *testing.T) { + t.Run("Record with empty data", func(t *testing.T) { + record := corev1.New(&typesv1alpha0.Record{ + SchemaVersion: "v0.3.1", + }) + + result := extractManifestAnnotations(record) + + // Should still have basic annotations + assert.Equal(t, "record", result[manifestDirObjectTypeKey]) + assert.Equal(t, "v0.3.1", result[ManifestKeyOASFVersion]) + assert.Equal(t, "false", result[ManifestKeySigned]) + }) + + t.Run("Record with nil adapter data", func(t *testing.T) { + record := &corev1.Record{} // No data field set + + result := extractManifestAnnotations(record) + + // Should return minimal annotations + assert.Equal(t, "record", result[manifestDirObjectTypeKey]) + assert.Len(t, result, 1) // Only the type key + }) +} + +func TestRoundTripConversion(t *testing.T) { + // Test that we can extract manifest annotations and parse them back correctly + // NOTE: This test uses V1 format where skills have "categoryName/className" structure + originalRecord := corev1.New(&typesv1alpha0.Record{ + Name: "roundtrip-agent", + Version: "1.0.0", + Description: "Test roundtrip conversion", + SchemaVersion: "v0.3.1", + CreatedAt: "2023-01-01T00:00:00Z", + Authors: []string{"author1", "author2"}, + Skills: []*typesv1alpha0.Skill{ + {CategoryName: stringPtr("nlp"), ClassName: stringPtr("processing")}, + }, + Annotations: map[string]string{ + "custom": "value", + }, + }) + + // Extract annotations + manifestAnnotations := extractManifestAnnotations(originalRecord) + + // Parse them back + recordMeta := parseManifestAnnotations(manifestAnnotations) + + // Verify round-trip conversion + assert.Equal(t, "v0.3.1", recordMeta.GetSchemaVersion()) + assert.Equal(t, "2023-01-01T00:00:00Z", recordMeta.GetCreatedAt()) + assert.Equal(t, "roundtrip-agent", recordMeta.GetAnnotations()[MetadataKeyName]) + assert.Equal(t, "1.0.0", recordMeta.GetAnnotations()[MetadataKeyVersion]) + assert.Equal(t, "Test roundtrip conversion", recordMeta.GetAnnotations()[MetadataKeyDescription]) + assert.Equal(t, "v0.3.1", recordMeta.GetAnnotations()[MetadataKeyOASFVersion]) + assert.Equal(t, "author1,author2", recordMeta.GetAnnotations()[MetadataKeyAuthors]) + assert.Equal(t, "2", recordMeta.GetAnnotations()[MetadataKeyAuthorsCount]) + // NOTE: V1 skills return "categoryName/className" format, not just className + assert.Equal(t, "nlp/processing", recordMeta.GetAnnotations()[MetadataKeySkills]) + assert.Equal(t, "1", recordMeta.GetAnnotations()[MetadataKeySkillsCount]) + assert.Equal(t, "value", recordMeta.GetAnnotations()["custom"]) +} diff --git a/server/store/oci/config/config.go b/server/store/oci/config/config.go index 85000b37a..47c791bef 100644 --- a/server/store/oci/config/config.go +++ b/server/store/oci/config/config.go @@ -1,42 +1,42 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package config - -const ( - DefaultAuthConfigInsecure = true - DefaultRegistryAddress = "127.0.0.1:5000" - DefaultRepositoryName = "dir" -) - -type Config struct { - // Path to a local directory that will be to hold data instead of remote. - // If this is set to non-empty value, only local store will be used. - LocalDir string `json:"local_dir,omitempty" mapstructure:"local_dir"` - - // Path to a local directory that will be used to cache metadata. - // If empty, caching will not be used. - CacheDir string `json:"cache_dir,omitempty" mapstructure:"cache_dir"` - - // Registry address to connect to - RegistryAddress string `json:"registry_address,omitempty" mapstructure:"registry_address"` - - // Repository name to connect to - RepositoryName string `json:"repository_name,omitempty" mapstructure:"repository_name"` - - // Authentication configuration - AuthConfig `json:"auth_config,omitempty" mapstructure:"auth_config"` -} - -// AuthConfig represents the configuration for authentication. -type AuthConfig struct { - Insecure bool `json:"insecure" mapstructure:"insecure"` - - Username string `json:"username,omitempty" mapstructure:"username"` - - Password string `json:"password,omitempty" mapstructure:"password"` - - RefreshToken string `json:"refresh_token,omitempty" mapstructure:"refresh_token"` - - AccessToken string `json:"access_token,omitempty" mapstructure:"access_token"` -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package config + +const ( + DefaultAuthConfigInsecure = true + DefaultRegistryAddress = "127.0.0.1:5000" + DefaultRepositoryName = "dir" +) + +type Config struct { + // Path to a local directory that will be to hold data instead of remote. + // If this is set to non-empty value, only local store will be used. + LocalDir string `json:"local_dir,omitempty" mapstructure:"local_dir"` + + // Path to a local directory that will be used to cache metadata. + // If empty, caching will not be used. + CacheDir string `json:"cache_dir,omitempty" mapstructure:"cache_dir"` + + // Registry address to connect to + RegistryAddress string `json:"registry_address,omitempty" mapstructure:"registry_address"` + + // Repository name to connect to + RepositoryName string `json:"repository_name,omitempty" mapstructure:"repository_name"` + + // Authentication configuration + AuthConfig `json:"auth_config,omitempty" mapstructure:"auth_config"` +} + +// AuthConfig represents the configuration for authentication. +type AuthConfig struct { + Insecure bool `json:"insecure" mapstructure:"insecure"` + + Username string `json:"username,omitempty" mapstructure:"username"` + + Password string `json:"password,omitempty" mapstructure:"password"` + + RefreshToken string `json:"refresh_token,omitempty" mapstructure:"refresh_token"` + + AccessToken string `json:"access_token,omitempty" mapstructure:"access_token"` +} diff --git a/server/store/oci/constants.go b/server/store/oci/constants.go index da53d674d..54e9cf27a 100644 --- a/server/store/oci/constants.go +++ b/server/store/oci/constants.go @@ -1,86 +1,86 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package oci - -// This file defines the complete metadata schema for OCI annotations. -// It serves as the single source of truth for all annotation keys used -// in manifest and descriptor annotations for record storage. - -const ( - // Used for dir-specific annotations. - manifestDirObjectKeyPrefix = "org.agntcy.dir" - manifestDirObjectTypeKey = manifestDirObjectKeyPrefix + "/type" - - // THESE ARE THE SOURCE OF TRUTH for field names. - - // Core Identity (simple keys). - MetadataKeyName = "name" - MetadataKeyVersion = "version" - MetadataKeyDescription = "description" - MetadataKeyOASFVersion = "oasf-version" - MetadataKeyCid = "cid" - - // Lifecycle (simple keys). - MetadataKeySchemaVersion = "schema-version" - MetadataKeyCreatedAt = "created-at" - MetadataKeyAuthors = "authors" - - // Capability Discovery (simple keys). - MetadataKeySkills = "skills" - MetadataKeyLocatorTypes = "locator-types" - MetadataKeyModuleNames = "module-names" - - // Security (simple keys). - MetadataKeySigned = "signed" - MetadataKeySignatureAlgo = "signature-algorithm" - MetadataKeySignedAt = "signed-at" - - // Versioning (simple keys). - MetadataKeyPreviousCid = "previous-cid" - - // Team-based (simple keys). - MetadataKeyTeam = "team" - MetadataKeyOrganization = "organization" - MetadataKeyProject = "project" - - // Count metadata (simple keys). - MetadataKeyAuthorsCount = "authors-count" - MetadataKeySkillsCount = "skills-count" - MetadataKeyLocatorTypesCount = "locator-types-count" - MetadataKeyModuleCount = "module-names-count" - - // Derived from MetadataKey constants to ensure consistency. - - // Core Identity (derived from MetadataKey constants). - ManifestKeyName = manifestDirObjectKeyPrefix + "/" + MetadataKeyName - ManifestKeyVersion = manifestDirObjectKeyPrefix + "/" + MetadataKeyVersion - ManifestKeyDescription = manifestDirObjectKeyPrefix + "/" + MetadataKeyDescription - ManifestKeyOASFVersion = manifestDirObjectKeyPrefix + "/" + MetadataKeyOASFVersion - ManifestKeyCid = manifestDirObjectKeyPrefix + "/" + MetadataKeyCid - - // Lifecycle Metadata (mixed: some derived, some standalone). - ManifestKeySchemaVersion = manifestDirObjectKeyPrefix + "/" + MetadataKeySchemaVersion - ManifestKeyCreatedAt = manifestDirObjectKeyPrefix + "/" + MetadataKeyCreatedAt - ManifestKeyAuthors = manifestDirObjectKeyPrefix + "/" + MetadataKeyAuthors - - // Capability Discovery (derived from MetadataKey constants). - ManifestKeySkills = manifestDirObjectKeyPrefix + "/" + MetadataKeySkills - ManifestKeyLocatorTypes = manifestDirObjectKeyPrefix + "/" + MetadataKeyLocatorTypes - ManifestKeyModuleNames = manifestDirObjectKeyPrefix + "/" + MetadataKeyModuleNames - - // Security & Integrity (mixed: some derived, some standalone). - ManifestKeySigned = manifestDirObjectKeyPrefix + "/" + MetadataKeySigned - ManifestKeySignatureAlgo = manifestDirObjectKeyPrefix + "/" + MetadataKeySignatureAlgo - ManifestKeySignedAt = manifestDirObjectKeyPrefix + "/" + MetadataKeySignedAt - - // Versioning & Linking (standalone - no simple key equivalents). - ManifestKeyPreviousCid = manifestDirObjectKeyPrefix + "/" + MetadataKeyPreviousCid - - // Custom annotations prefix. - ManifestKeyCustomPrefix = manifestDirObjectKeyPrefix + "/custom." - - // Fallback values for error recovery scenarios. - // Used when parsing corrupted storage, legacy records, or external modifications. - FallbackSchemaVersion = "v0.3.1" -) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package oci + +// This file defines the complete metadata schema for OCI annotations. +// It serves as the single source of truth for all annotation keys used +// in manifest and descriptor annotations for record storage. + +const ( + // Used for dir-specific annotations. + manifestDirObjectKeyPrefix = "org.agntcy.dir" + manifestDirObjectTypeKey = manifestDirObjectKeyPrefix + "/type" + + // THESE ARE THE SOURCE OF TRUTH for field names. + + // Core Identity (simple keys). + MetadataKeyName = "name" + MetadataKeyVersion = "version" + MetadataKeyDescription = "description" + MetadataKeyOASFVersion = "oasf-version" + MetadataKeyCid = "cid" + + // Lifecycle (simple keys). + MetadataKeySchemaVersion = "schema-version" + MetadataKeyCreatedAt = "created-at" + MetadataKeyAuthors = "authors" + + // Capability Discovery (simple keys). + MetadataKeySkills = "skills" + MetadataKeyLocatorTypes = "locator-types" + MetadataKeyModuleNames = "module-names" + + // Security (simple keys). + MetadataKeySigned = "signed" + MetadataKeySignatureAlgo = "signature-algorithm" + MetadataKeySignedAt = "signed-at" + + // Versioning (simple keys). + MetadataKeyPreviousCid = "previous-cid" + + // Team-based (simple keys). + MetadataKeyTeam = "team" + MetadataKeyOrganization = "organization" + MetadataKeyProject = "project" + + // Count metadata (simple keys). + MetadataKeyAuthorsCount = "authors-count" + MetadataKeySkillsCount = "skills-count" + MetadataKeyLocatorTypesCount = "locator-types-count" + MetadataKeyModuleCount = "module-names-count" + + // Derived from MetadataKey constants to ensure consistency. + + // Core Identity (derived from MetadataKey constants). + ManifestKeyName = manifestDirObjectKeyPrefix + "/" + MetadataKeyName + ManifestKeyVersion = manifestDirObjectKeyPrefix + "/" + MetadataKeyVersion + ManifestKeyDescription = manifestDirObjectKeyPrefix + "/" + MetadataKeyDescription + ManifestKeyOASFVersion = manifestDirObjectKeyPrefix + "/" + MetadataKeyOASFVersion + ManifestKeyCid = manifestDirObjectKeyPrefix + "/" + MetadataKeyCid + + // Lifecycle Metadata (mixed: some derived, some standalone). + ManifestKeySchemaVersion = manifestDirObjectKeyPrefix + "/" + MetadataKeySchemaVersion + ManifestKeyCreatedAt = manifestDirObjectKeyPrefix + "/" + MetadataKeyCreatedAt + ManifestKeyAuthors = manifestDirObjectKeyPrefix + "/" + MetadataKeyAuthors + + // Capability Discovery (derived from MetadataKey constants). + ManifestKeySkills = manifestDirObjectKeyPrefix + "/" + MetadataKeySkills + ManifestKeyLocatorTypes = manifestDirObjectKeyPrefix + "/" + MetadataKeyLocatorTypes + ManifestKeyModuleNames = manifestDirObjectKeyPrefix + "/" + MetadataKeyModuleNames + + // Security & Integrity (mixed: some derived, some standalone). + ManifestKeySigned = manifestDirObjectKeyPrefix + "/" + MetadataKeySigned + ManifestKeySignatureAlgo = manifestDirObjectKeyPrefix + "/" + MetadataKeySignatureAlgo + ManifestKeySignedAt = manifestDirObjectKeyPrefix + "/" + MetadataKeySignedAt + + // Versioning & Linking (standalone - no simple key equivalents). + ManifestKeyPreviousCid = manifestDirObjectKeyPrefix + "/" + MetadataKeyPreviousCid + + // Custom annotations prefix. + ManifestKeyCustomPrefix = manifestDirObjectKeyPrefix + "/custom." + + // Fallback values for error recovery scenarios. + // Used when parsing corrupted storage, legacy records, or external modifications. + FallbackSchemaVersion = "v0.3.1" +) diff --git a/server/store/oci/integration_test.go b/server/store/oci/integration_test.go index 93c8ca931..411e73552 100644 --- a/server/store/oci/integration_test.go +++ b/server/store/oci/integration_test.go @@ -1,321 +1,321 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package oci - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "testing" - "time" - - typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" - corev1 "github.com/agntcy/dir/api/core/v1" - ociconfig "github.com/agntcy/dir/server/store/oci/config" - "github.com/agntcy/dir/server/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - // Integration test configuration. - integrationRegistryAddress = "localhost:5555" - integrationRepositoryName = "integration-test" - integrationTimeout = 30 * time.Minute -) - -// Integration test configuration. -var integrationConfig = ociconfig.Config{ - RegistryAddress: integrationRegistryAddress, - RepositoryName: integrationRepositoryName, - AuthConfig: ociconfig.AuthConfig{ - Insecure: true, // Required for local zot registry - }, -} - -// createTestRecord creates a comprehensive test record for integration testing. -func createTestRecord() *corev1.Record { - return corev1.New(&typesv1alpha0.Record{ - Name: "integration-test-agent", - Version: "v1.0.0", - Description: "Integration test agent for OCI storage", - SchemaVersion: "v0.3.1", - CreatedAt: "2023-01-01T00:00:00Z", - Authors: []string{"integration-test@example.com"}, - Skills: []*typesv1alpha0.Skill{ - {CategoryName: stringPtr("nlp"), ClassName: stringPtr("processing")}, - {CategoryName: stringPtr("ml"), ClassName: stringPtr("inference")}, - }, - Locators: []*typesv1alpha0.Locator{ - {Type: "docker"}, - {Type: "helm"}, - }, - Extensions: []*typesv1alpha0.Extension{ - {Name: "security"}, - {Name: "monitoring"}, - }, - Annotations: map[string]string{ - "team": "integration-test", - "environment": "test", - "project": "oci-storage", - }, - }) -} - -// setupIntegrationStore creates a store connected to the local zot registry. -func setupIntegrationStore(t *testing.T) types.StoreAPI { - t.Helper() - - // Check if zot registry is available - ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) - defer cancel() - - client := &http.Client{Timeout: 2 * time.Second} - req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://"+integrationRegistryAddress+"/v2/", nil) - require.NoError(t, err, "Failed to create registry health check request") - - resp, err := client.Do(req) - if err != nil { - t.Skip("Zot registry not available at localhost:5555. Start with manual docker command or task server:store:start") - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - t.Skipf("Zot registry health check failed with status: %d", resp.StatusCode) - } - - // Create store - store, err := New(integrationConfig) - require.NoError(t, err, "Failed to create integration store") - - return store -} - -// getRegistryTags fetches all tags for the repository from zot registry. -func getRegistryTags(ctx context.Context, t *testing.T) []string { - t.Helper() - - client := &http.Client{Timeout: 5 * time.Second} - url := fmt.Sprintf("http://%s/v2/%s/tags/list", integrationRegistryAddress, integrationRepositoryName) - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) - require.NoError(t, err, "Failed to create tags request") - - resp, err := client.Do(req) - require.NoError(t, err, "Failed to fetch tags from registry") - - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNotFound { - return []string{} // Repository doesn't exist yet - } - - require.Equal(t, http.StatusOK, resp.StatusCode, "Unexpected status when fetching tags") - - var response struct { - Name string `json:"name"` - Tags []string `json:"tags"` - } - - err = json.NewDecoder(resp.Body).Decode(&response) - require.NoError(t, err, "Failed to decode tags response") - - return response.Tags -} - -// getManifest fetches manifest for a specific tag from zot registry. -func getManifest(ctx context.Context, t *testing.T, tag string) map[string]interface{} { - t.Helper() - - client := &http.Client{Timeout: 5 * time.Second} - url := fmt.Sprintf("http://%s/v2/%s/manifests/%s", integrationRegistryAddress, integrationRepositoryName, tag) - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) - require.NoError(t, err, "Failed to create manifest request") - req.Header.Set("Accept", "application/vnd.oci.image.manifest.v1+json") - - resp, err := client.Do(req) - require.NoError(t, err, "Failed to fetch manifest from registry") - - defer resp.Body.Close() - - require.Equal(t, http.StatusOK, resp.StatusCode, "Unexpected status when fetching manifest") - - var manifest map[string]interface{} - - err = json.NewDecoder(resp.Body).Decode(&manifest) - require.NoError(t, err, "Failed to decode manifest response") - - return manifest -} - -//nolint:maintidx // Function handles multiple test cases with justified complexity -func TestIntegrationOCIStoreWorkflow(t *testing.T) { - // NOTE: This integration test uses V1 records where skills have "categoryName/className" format - // This differs from V2/V3 which use simple skill names - ctx, cancel := context.WithTimeout(t.Context(), integrationTimeout) - defer cancel() - - store := setupIntegrationStore(t) - record := createTestRecord() - - t.Run("Push Record", func(t *testing.T) { - // Push the record - recordRef, err := store.Push(ctx, record) - require.NoError(t, err, "Failed to push record") - require.NotNil(t, recordRef, "Record reference should not be nil") - require.NotEmpty(t, recordRef.GetCid(), "Record CID should not be empty") - - t.Logf("Pushed record with CID: %s", recordRef.GetCid()) - }) - - t.Run("Verify CID Tag Generated", func(t *testing.T) { - // Give registry a moment to process - time.Sleep(1 * time.Second) - - tags := getRegistryTags(ctx, t) - require.NotEmpty(t, tags, "Registry should contain tags after push") - - t.Logf("Found %d tags in registry: %v", len(tags), tags) - - // With CID-only tagging, we should have exactly one tag: the CID - expectedCID := record.GetCid() - require.NotEmpty(t, expectedCID, "Record should have a valid CID") - - // Verify the CID tag exists in registry - var hasCIDTag bool - - for _, tag := range tags { - if tag == expectedCID { - hasCIDTag = true - - break - } - } - - assert.True(t, hasCIDTag, "Registry should contain the CID tag: %s", expectedCID) - assert.Len(t, tags, 1, "Should have exactly one CID tag, found: %v", tags) - }) - - t.Run("Verify Manifest Annotations", func(t *testing.T) { - // Test with CID tag (the only tag we create now) - manifest := getManifest(ctx, t, record.GetCid()) - - // Check manifest structure - require.Contains(t, manifest, "annotations", "Manifest should contain annotations") - annotations, ok := manifest["annotations"].(map[string]interface{}) - require.True(t, ok, "Annotations should be a map") - - t.Logf("Found %d manifest annotations", len(annotations)) - - // Verify core annotations - expectedAnnotations := map[string]string{ - "org.agntcy.dir/type": "record", - "org.agntcy.dir/name": "integration-test-agent", - "org.agntcy.dir/version": "v1.0.0", - "org.agntcy.dir/description": "Integration test agent for OCI storage", - "org.agntcy.dir/oasf-version": "v0.3.1", - "org.agntcy.dir/schema-version": "v0.3.1", - "org.agntcy.dir/created-at": "2023-01-01T00:00:00Z", - "org.agntcy.dir/authors": "integration-test@example.com", - // NOTE: V1 skills use "categoryName/className" hierarchical format - "org.agntcy.dir/skills": "nlp/processing,ml/inference", - "org.agntcy.dir/locator-types": "docker,helm", - "org.agntcy.dir/module-names": "security,monitoring", - "org.agntcy.dir/signed": "false", - } - - for key, expectedValue := range expectedAnnotations { - actualValue, exists := annotations[key] - assert.True(t, exists, "Annotation %s should exist", key) - assert.Equal(t, expectedValue, actualValue, "Annotation %s should have correct value", key) - } - - // Verify custom annotations - customAnnotations := map[string]string{ - "org.agntcy.dir/custom.team": "integration-test", - "org.agntcy.dir/custom.environment": "test", - "org.agntcy.dir/custom.project": "oci-storage", - } - - for key, expectedValue := range customAnnotations { - actualValue, exists := annotations[key] - assert.True(t, exists, "Custom annotation %s should exist", key) - assert.Equal(t, expectedValue, actualValue, "Custom annotation %s should have correct value", key) - } - }) - - // Note: Descriptor annotations removed during CID-only refactoring - // Layer descriptors now only contain basic fields: mediaType, digest, size - - t.Run("Lookup Record", func(t *testing.T) { - recordRef := &corev1.RecordRef{Cid: record.GetCid()} - - meta, err := store.Lookup(ctx, recordRef) - require.NoError(t, err, "Failed to lookup record") - require.NotNil(t, meta, "Lookup should return metadata") - - t.Logf("Lookup returned metadata with %d annotations", len(meta.GetAnnotations())) - - // Verify metadata contains expected fields - assert.Equal(t, "integration-test-agent", meta.GetAnnotations()["name"]) - assert.Equal(t, "v1.0.0", meta.GetAnnotations()["version"]) - // NOTE: V1 skills use "categoryName/className" hierarchical format - assert.Equal(t, "nlp/processing,ml/inference", meta.GetAnnotations()["skills"]) - assert.Equal(t, "v0.3.1", meta.GetSchemaVersion()) - assert.Equal(t, "2023-01-01T00:00:00Z", meta.GetCreatedAt()) - }) - - t.Run("Pull Record", func(t *testing.T) { - recordRef := &corev1.RecordRef{Cid: record.GetCid()} - - pulledRecord, err := store.Pull(ctx, recordRef) - require.NoError(t, err, "Failed to pull record") - require.NotNil(t, pulledRecord, "Pull should return record") - - // Verify pulled record matches original - decodedOriginalAgent, _ := record.Decode() - originalAgent := decodedOriginalAgent.GetV1Alpha0() - decodedPulledAgent, _ := pulledRecord.Decode() - pulledAgent := decodedPulledAgent.GetV1Alpha0() - - assert.Equal(t, originalAgent.GetName(), pulledAgent.GetName()) - assert.Equal(t, originalAgent.GetVersion(), pulledAgent.GetVersion()) - assert.Equal(t, originalAgent.GetDescription(), pulledAgent.GetDescription()) - assert.Len(t, pulledAgent.GetSkills(), len(originalAgent.GetSkills())) - assert.Len(t, pulledAgent.GetLocators(), len(originalAgent.GetLocators())) - assert.Len(t, pulledAgent.GetExtensions(), len(originalAgent.GetExtensions())) - - t.Logf("Successfully pulled and verified record integrity") - }) - - t.Run("CID Tag Reconstruction", func(t *testing.T) { - // Get CID for reconstruction - expectedCID := record.GetCid() - require.NotEmpty(t, expectedCID, "Record should have a valid CID") - - // In CID-only approach, reconstruction just returns the CID - reconstructedTags := []string{expectedCID} - require.NotEmpty(t, reconstructedTags, "Should reconstruct CID tag") - - t.Logf("Reconstructed CID tag: %v", reconstructedTags) - - // Verify only CID tag is reconstructed - assert.Len(t, reconstructedTags, 1, "Should reconstruct exactly one CID tag") - assert.Equal(t, expectedCID, reconstructedTags[0], "Reconstructed tag should be the CID") - }) - - t.Run("Duplicate Push Handling", func(t *testing.T) { - // Push the same record again - recordRef, err := store.Push(ctx, record) - require.NoError(t, err, "Duplicate push should not fail") - require.NotNil(t, recordRef, "Duplicate push should return reference") - assert.Equal(t, record.GetCid(), recordRef.GetCid(), "CID should remain the same") - - t.Logf("Duplicate push handled correctly for CID: %s", recordRef.GetCid()) - }) -} - -// TestIntegrationTagStrategy removed - no longer needed with CID-only tagging +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package oci + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "testing" + "time" + + typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" + corev1 "github.com/agntcy/dir/api/core/v1" + ociconfig "github.com/agntcy/dir/server/store/oci/config" + "github.com/agntcy/dir/server/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + // Integration test configuration. + integrationRegistryAddress = "localhost:5555" + integrationRepositoryName = "integration-test" + integrationTimeout = 30 * time.Minute +) + +// Integration test configuration. +var integrationConfig = ociconfig.Config{ + RegistryAddress: integrationRegistryAddress, + RepositoryName: integrationRepositoryName, + AuthConfig: ociconfig.AuthConfig{ + Insecure: true, // Required for local zot registry + }, +} + +// createTestRecord creates a comprehensive test record for integration testing. +func createTestRecord() *corev1.Record { + return corev1.New(&typesv1alpha0.Record{ + Name: "integration-test-agent", + Version: "v1.0.0", + Description: "Integration test agent for OCI storage", + SchemaVersion: "v0.3.1", + CreatedAt: "2023-01-01T00:00:00Z", + Authors: []string{"integration-test@example.com"}, + Skills: []*typesv1alpha0.Skill{ + {CategoryName: stringPtr("nlp"), ClassName: stringPtr("processing")}, + {CategoryName: stringPtr("ml"), ClassName: stringPtr("inference")}, + }, + Locators: []*typesv1alpha0.Locator{ + {Type: "docker"}, + {Type: "helm"}, + }, + Extensions: []*typesv1alpha0.Extension{ + {Name: "security"}, + {Name: "monitoring"}, + }, + Annotations: map[string]string{ + "team": "integration-test", + "environment": "test", + "project": "oci-storage", + }, + }) +} + +// setupIntegrationStore creates a store connected to the local zot registry. +func setupIntegrationStore(t *testing.T) types.StoreAPI { + t.Helper() + + // Check if zot registry is available + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + + client := &http.Client{Timeout: 2 * time.Second} + req, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://"+integrationRegistryAddress+"/v2/", nil) + require.NoError(t, err, "Failed to create registry health check request") + + resp, err := client.Do(req) + if err != nil { + t.Skip("Zot registry not available at localhost:5555. Start with manual docker command or task server:store:start") + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + t.Skipf("Zot registry health check failed with status: %d", resp.StatusCode) + } + + // Create store + store, err := New(integrationConfig) + require.NoError(t, err, "Failed to create integration store") + + return store +} + +// getRegistryTags fetches all tags for the repository from zot registry. +func getRegistryTags(ctx context.Context, t *testing.T) []string { + t.Helper() + + client := &http.Client{Timeout: 5 * time.Second} + url := fmt.Sprintf("http://%s/v2/%s/tags/list", integrationRegistryAddress, integrationRepositoryName) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + require.NoError(t, err, "Failed to create tags request") + + resp, err := client.Do(req) + require.NoError(t, err, "Failed to fetch tags from registry") + + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound { + return []string{} // Repository doesn't exist yet + } + + require.Equal(t, http.StatusOK, resp.StatusCode, "Unexpected status when fetching tags") + + var response struct { + Name string `json:"name"` + Tags []string `json:"tags"` + } + + err = json.NewDecoder(resp.Body).Decode(&response) + require.NoError(t, err, "Failed to decode tags response") + + return response.Tags +} + +// getManifest fetches manifest for a specific tag from zot registry. +func getManifest(ctx context.Context, t *testing.T, tag string) map[string]interface{} { + t.Helper() + + client := &http.Client{Timeout: 5 * time.Second} + url := fmt.Sprintf("http://%s/v2/%s/manifests/%s", integrationRegistryAddress, integrationRepositoryName, tag) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + require.NoError(t, err, "Failed to create manifest request") + req.Header.Set("Accept", "application/vnd.oci.image.manifest.v1+json") + + resp, err := client.Do(req) + require.NoError(t, err, "Failed to fetch manifest from registry") + + defer resp.Body.Close() + + require.Equal(t, http.StatusOK, resp.StatusCode, "Unexpected status when fetching manifest") + + var manifest map[string]interface{} + + err = json.NewDecoder(resp.Body).Decode(&manifest) + require.NoError(t, err, "Failed to decode manifest response") + + return manifest +} + +//nolint:maintidx // Function handles multiple test cases with justified complexity +func TestIntegrationOCIStoreWorkflow(t *testing.T) { + // NOTE: This integration test uses V1 records where skills have "categoryName/className" format + // This differs from V2/V3 which use simple skill names + ctx, cancel := context.WithTimeout(t.Context(), integrationTimeout) + defer cancel() + + store := setupIntegrationStore(t) + record := createTestRecord() + + t.Run("Push Record", func(t *testing.T) { + // Push the record + recordRef, err := store.Push(ctx, record) + require.NoError(t, err, "Failed to push record") + require.NotNil(t, recordRef, "Record reference should not be nil") + require.NotEmpty(t, recordRef.GetCid(), "Record CID should not be empty") + + t.Logf("Pushed record with CID: %s", recordRef.GetCid()) + }) + + t.Run("Verify CID Tag Generated", func(t *testing.T) { + // Give registry a moment to process + time.Sleep(1 * time.Second) + + tags := getRegistryTags(ctx, t) + require.NotEmpty(t, tags, "Registry should contain tags after push") + + t.Logf("Found %d tags in registry: %v", len(tags), tags) + + // With CID-only tagging, we should have exactly one tag: the CID + expectedCID := record.GetCid() + require.NotEmpty(t, expectedCID, "Record should have a valid CID") + + // Verify the CID tag exists in registry + var hasCIDTag bool + + for _, tag := range tags { + if tag == expectedCID { + hasCIDTag = true + + break + } + } + + assert.True(t, hasCIDTag, "Registry should contain the CID tag: %s", expectedCID) + assert.Len(t, tags, 1, "Should have exactly one CID tag, found: %v", tags) + }) + + t.Run("Verify Manifest Annotations", func(t *testing.T) { + // Test with CID tag (the only tag we create now) + manifest := getManifest(ctx, t, record.GetCid()) + + // Check manifest structure + require.Contains(t, manifest, "annotations", "Manifest should contain annotations") + annotations, ok := manifest["annotations"].(map[string]interface{}) + require.True(t, ok, "Annotations should be a map") + + t.Logf("Found %d manifest annotations", len(annotations)) + + // Verify core annotations + expectedAnnotations := map[string]string{ + "org.agntcy.dir/type": "record", + "org.agntcy.dir/name": "integration-test-agent", + "org.agntcy.dir/version": "v1.0.0", + "org.agntcy.dir/description": "Integration test agent for OCI storage", + "org.agntcy.dir/oasf-version": "v0.3.1", + "org.agntcy.dir/schema-version": "v0.3.1", + "org.agntcy.dir/created-at": "2023-01-01T00:00:00Z", + "org.agntcy.dir/authors": "integration-test@example.com", + // NOTE: V1 skills use "categoryName/className" hierarchical format + "org.agntcy.dir/skills": "nlp/processing,ml/inference", + "org.agntcy.dir/locator-types": "docker,helm", + "org.agntcy.dir/module-names": "security,monitoring", + "org.agntcy.dir/signed": "false", + } + + for key, expectedValue := range expectedAnnotations { + actualValue, exists := annotations[key] + assert.True(t, exists, "Annotation %s should exist", key) + assert.Equal(t, expectedValue, actualValue, "Annotation %s should have correct value", key) + } + + // Verify custom annotations + customAnnotations := map[string]string{ + "org.agntcy.dir/custom.team": "integration-test", + "org.agntcy.dir/custom.environment": "test", + "org.agntcy.dir/custom.project": "oci-storage", + } + + for key, expectedValue := range customAnnotations { + actualValue, exists := annotations[key] + assert.True(t, exists, "Custom annotation %s should exist", key) + assert.Equal(t, expectedValue, actualValue, "Custom annotation %s should have correct value", key) + } + }) + + // Note: Descriptor annotations removed during CID-only refactoring + // Layer descriptors now only contain basic fields: mediaType, digest, size + + t.Run("Lookup Record", func(t *testing.T) { + recordRef := &corev1.RecordRef{Cid: record.GetCid()} + + meta, err := store.Lookup(ctx, recordRef) + require.NoError(t, err, "Failed to lookup record") + require.NotNil(t, meta, "Lookup should return metadata") + + t.Logf("Lookup returned metadata with %d annotations", len(meta.GetAnnotations())) + + // Verify metadata contains expected fields + assert.Equal(t, "integration-test-agent", meta.GetAnnotations()["name"]) + assert.Equal(t, "v1.0.0", meta.GetAnnotations()["version"]) + // NOTE: V1 skills use "categoryName/className" hierarchical format + assert.Equal(t, "nlp/processing,ml/inference", meta.GetAnnotations()["skills"]) + assert.Equal(t, "v0.3.1", meta.GetSchemaVersion()) + assert.Equal(t, "2023-01-01T00:00:00Z", meta.GetCreatedAt()) + }) + + t.Run("Pull Record", func(t *testing.T) { + recordRef := &corev1.RecordRef{Cid: record.GetCid()} + + pulledRecord, err := store.Pull(ctx, recordRef) + require.NoError(t, err, "Failed to pull record") + require.NotNil(t, pulledRecord, "Pull should return record") + + // Verify pulled record matches original + decodedOriginalAgent, _ := record.Decode() + originalAgent := decodedOriginalAgent.GetV1Alpha0() + decodedPulledAgent, _ := pulledRecord.Decode() + pulledAgent := decodedPulledAgent.GetV1Alpha0() + + assert.Equal(t, originalAgent.GetName(), pulledAgent.GetName()) + assert.Equal(t, originalAgent.GetVersion(), pulledAgent.GetVersion()) + assert.Equal(t, originalAgent.GetDescription(), pulledAgent.GetDescription()) + assert.Len(t, pulledAgent.GetSkills(), len(originalAgent.GetSkills())) + assert.Len(t, pulledAgent.GetLocators(), len(originalAgent.GetLocators())) + assert.Len(t, pulledAgent.GetExtensions(), len(originalAgent.GetExtensions())) + + t.Logf("Successfully pulled and verified record integrity") + }) + + t.Run("CID Tag Reconstruction", func(t *testing.T) { + // Get CID for reconstruction + expectedCID := record.GetCid() + require.NotEmpty(t, expectedCID, "Record should have a valid CID") + + // In CID-only approach, reconstruction just returns the CID + reconstructedTags := []string{expectedCID} + require.NotEmpty(t, reconstructedTags, "Should reconstruct CID tag") + + t.Logf("Reconstructed CID tag: %v", reconstructedTags) + + // Verify only CID tag is reconstructed + assert.Len(t, reconstructedTags, 1, "Should reconstruct exactly one CID tag") + assert.Equal(t, expectedCID, reconstructedTags[0], "Reconstructed tag should be the CID") + }) + + t.Run("Duplicate Push Handling", func(t *testing.T) { + // Push the same record again + recordRef, err := store.Push(ctx, record) + require.NoError(t, err, "Duplicate push should not fail") + require.NotNil(t, recordRef, "Duplicate push should return reference") + assert.Equal(t, record.GetCid(), recordRef.GetCid(), "CID should remain the same") + + t.Logf("Duplicate push handled correctly for CID: %s", recordRef.GetCid()) + }) +} + +// TestIntegrationTagStrategy removed - no longer needed with CID-only tagging diff --git a/server/store/oci/internal.go b/server/store/oci/internal.go index 31c9b3c51..65d8171b4 100644 --- a/server/store/oci/internal.go +++ b/server/store/oci/internal.go @@ -1,214 +1,214 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package oci - -import ( - "context" - "encoding/json" - "fmt" - "io" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/utils/logging" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "oras.land/oras-go/v2/content/oci" - "oras.land/oras-go/v2/registry/remote" -) - -var internalLogger = logging.Logger("store/oci/internal") - -// validateRecordRef performs common input validation for record reference operations. -// This eliminates duplication across Lookup, Pull, and Delete methods. -func validateRecordRef(ref *corev1.RecordRef) error { - if ref == nil { - return status.Error(codes.InvalidArgument, "record reference cannot be nil") //nolint:wrapcheck - } - - if ref.GetCid() == "" { - return status.Error(codes.InvalidArgument, "record CID cannot be empty") //nolint:wrapcheck - } - - return nil -} - -// fetchAndParseManifest is a shared helper function that fetches and parses manifests -// for both Lookup and Pull operations, eliminating code duplication. -func (s *store) fetchAndParseManifest(ctx context.Context, cid string) (*ocispec.Manifest, *ocispec.Descriptor, error) { - // Resolve manifest from remote tag (this also checks existence and validates CID format) - manifestDesc, err := s.repo.Resolve(ctx, cid) - if err != nil { - internalLogger.Debug("Failed to resolve manifest", "cid", cid, "error", err) - - return nil, nil, status.Errorf(codes.NotFound, "record not found: %s", cid) - } - - internalLogger.Debug("Manifest resolved successfully", "cid", cid, "digest", manifestDesc.Digest.String()) - - manifest, err := s.fetchAndParseManifestFromDescriptor(ctx, manifestDesc) - if err != nil { - return nil, nil, err - } - - return manifest, &manifestDesc, nil -} - -// fetchAndParseManifestFromDescriptor fetches and parses a manifest when you already have the descriptor. -func (s *store) fetchAndParseManifestFromDescriptor(ctx context.Context, manifestDesc ocispec.Descriptor) (*ocispec.Manifest, error) { - // Validate manifest size if available - if manifestDesc.Size > 0 { - internalLogger.Debug("Manifest size from descriptor", "cid", manifestDesc.Digest.String(), "size", manifestDesc.Size) - } - - // Fetch manifest from remote - manifestRd, err := s.repo.Fetch(ctx, manifestDesc) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to fetch manifest for %s: %v", manifestDesc.Digest.String(), err) - } - defer manifestRd.Close() - - // Read manifest data - manifestData, err := io.ReadAll(manifestRd) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to read manifest data for %s: %v", manifestDesc.Digest.String(), err) - } - - // Validate manifest size matches descriptor - if manifestDesc.Size > 0 && int64(len(manifestData)) != manifestDesc.Size { - internalLogger.Warn("Manifest size mismatch", - "cid", manifestDesc.Digest.String(), - "expected", manifestDesc.Size, - "actual", len(manifestData)) - } - - // Parse manifest - var manifest ocispec.Manifest - if err := json.Unmarshal(manifestData, &manifest); err != nil { - return nil, status.Errorf(codes.Internal, "failed to unmarshal manifest for %s: %v", manifestDesc.Digest.String(), err) - } - - return &manifest, nil -} - -// Tag cleanup functions removed - OCI registry garbage collection handles dangling tags after manifest deletion - -// deleteFromOCIStore handles deletion of records from an OCI store. -func (s *store) deleteFromOCIStore(ctx context.Context, ref *corev1.RecordRef) error { - cid := ref.GetCid() - - store, ok := s.repo.(*oci.Store) - if !ok { - return status.Errorf(codes.Internal, "expected *oci.Store, got %T", s.repo) - } - - internalLogger.Debug("Starting OCI store deletion", "cid", cid) - - var errors []string - - // Phase 1: Delete manifest (tags will be cleaned up by OCI GC) - internalLogger.Debug("Phase 1: Deleting manifest", "cid", cid) - - manifestDesc, err := s.repo.Resolve(ctx, cid) - if err != nil { - // Manifest might already be gone - this is not necessarily an error - internalLogger.Debug("Failed to resolve manifest during delete (may already be deleted)", "cid", cid, "error", err) - errors = append(errors, fmt.Sprintf("manifest resolve: %v", err)) - } else { - if err := store.Delete(ctx, manifestDesc); err != nil { - internalLogger.Warn("Failed to delete manifest", "cid", cid, "error", err) - errors = append(errors, fmt.Sprintf("manifest delete: %v", err)) - } else { - internalLogger.Debug("Manifest deleted successfully", "cid", cid, "digest", manifestDesc.Digest.String()) - } - } - - // Phase 2: Remove blob data (local store - we have full control) - internalLogger.Debug("Phase 2: Deleting blob data", "cid", cid) - - if err := s.deleteBlobForLocalStore(ctx, cid, store); err != nil { - internalLogger.Warn("Failed to delete blob", "cid", cid, "error", err) - errors = append(errors, fmt.Sprintf("blob delete: %v", err)) - } - - // Log summary - if len(errors) > 0 { - // For local store, we might want to return an error if critical operations failed - // But continue with best-effort approach for now - internalLogger.Warn("Partial delete completed with some errors", "cid", cid, "errors", errors) - } else { - internalLogger.Info("Record deleted successfully from OCI store", "cid", cid) - } - - return nil // Best effort - don't fail on partial cleanup -} - -// deleteBlobForLocalStore safely deletes blob data from local OCI store using new CID utility. -func (s *store) deleteBlobForLocalStore(ctx context.Context, cid string, store *oci.Store) error { - // Convert CID to digest using our new utility function - ociDigest, err := corev1.ConvertCIDToDigest(cid) - if err != nil { - return fmt.Errorf("failed to convert CID to digest: %w", err) - } - - blobDesc := ocispec.Descriptor{ - Digest: ociDigest, - } - - if err := store.Delete(ctx, blobDesc); err != nil { - return fmt.Errorf("failed to delete blob: %w", err) - } - - internalLogger.Debug("Blob deleted successfully", "cid", cid, "digest", ociDigest.String()) - - return nil -} - -// deleteFromRemoteRepository handles deletion of records from a remote repository. -func (s *store) deleteFromRemoteRepository(ctx context.Context, ref *corev1.RecordRef) error { - cid := ref.GetCid() - - repo, ok := s.repo.(*remote.Repository) - if !ok { - return status.Errorf(codes.Internal, "expected *remote.Repository, got %T", s.repo) - } - - internalLogger.Debug("Starting remote repository deletion", "cid", cid) - - var errors []string - - // Phase 1: Delete manifest (tags will be cleaned up by OCI GC) - internalLogger.Debug("Phase 1: Deleting manifest", "cid", cid) - - manifestDesc, err := s.repo.Resolve(ctx, cid) - if err != nil { - internalLogger.Debug("Failed to resolve manifest during delete (may already be deleted)", "cid", cid, "error", err) - errors = append(errors, fmt.Sprintf("manifest resolve: %v", err)) - } else { - if err := repo.Manifests().Delete(ctx, manifestDesc); err != nil { - internalLogger.Warn("Failed to delete manifest", "cid", cid, "error", err) - errors = append(errors, fmt.Sprintf("manifest delete: %v", err)) - } else { - internalLogger.Debug("Manifest deleted successfully", "cid", cid, "digest", manifestDesc.Digest.String()) - } - } - - // Phase 2: Skip blob deletion for remote registries (best practice) - // Most remote registries handle blob cleanup via garbage collection - internalLogger.Debug("Phase 2: Skipping blob deletion (handled by registry GC)", "cid", cid) - internalLogger.Info("Blob cleanup skipped for remote registry - will be handled by garbage collection", - "cid", cid, - "note", "This is the recommended approach for remote registries") - - // Log summary - if len(errors) > 0 { - // For remote registries, partial failure is common and expected - // Many operations may not be supported, but this is normal - internalLogger.Warn("Partial delete completed with some errors", "cid", cid, "errors", errors) - } else { - internalLogger.Info("Record deletion completed successfully", "cid", cid) - } - - return nil // Best effort - remote registries have limited delete capabilities -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package oci + +import ( + "context" + "encoding/json" + "fmt" + "io" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/utils/logging" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "oras.land/oras-go/v2/content/oci" + "oras.land/oras-go/v2/registry/remote" +) + +var internalLogger = logging.Logger("store/oci/internal") + +// validateRecordRef performs common input validation for record reference operations. +// This eliminates duplication across Lookup, Pull, and Delete methods. +func validateRecordRef(ref *corev1.RecordRef) error { + if ref == nil { + return status.Error(codes.InvalidArgument, "record reference cannot be nil") //nolint:wrapcheck + } + + if ref.GetCid() == "" { + return status.Error(codes.InvalidArgument, "record CID cannot be empty") //nolint:wrapcheck + } + + return nil +} + +// fetchAndParseManifest is a shared helper function that fetches and parses manifests +// for both Lookup and Pull operations, eliminating code duplication. +func (s *store) fetchAndParseManifest(ctx context.Context, cid string) (*ocispec.Manifest, *ocispec.Descriptor, error) { + // Resolve manifest from remote tag (this also checks existence and validates CID format) + manifestDesc, err := s.repo.Resolve(ctx, cid) + if err != nil { + internalLogger.Debug("Failed to resolve manifest", "cid", cid, "error", err) + + return nil, nil, status.Errorf(codes.NotFound, "record not found: %s", cid) + } + + internalLogger.Debug("Manifest resolved successfully", "cid", cid, "digest", manifestDesc.Digest.String()) + + manifest, err := s.fetchAndParseManifestFromDescriptor(ctx, manifestDesc) + if err != nil { + return nil, nil, err + } + + return manifest, &manifestDesc, nil +} + +// fetchAndParseManifestFromDescriptor fetches and parses a manifest when you already have the descriptor. +func (s *store) fetchAndParseManifestFromDescriptor(ctx context.Context, manifestDesc ocispec.Descriptor) (*ocispec.Manifest, error) { + // Validate manifest size if available + if manifestDesc.Size > 0 { + internalLogger.Debug("Manifest size from descriptor", "cid", manifestDesc.Digest.String(), "size", manifestDesc.Size) + } + + // Fetch manifest from remote + manifestRd, err := s.repo.Fetch(ctx, manifestDesc) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to fetch manifest for %s: %v", manifestDesc.Digest.String(), err) + } + defer manifestRd.Close() + + // Read manifest data + manifestData, err := io.ReadAll(manifestRd) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to read manifest data for %s: %v", manifestDesc.Digest.String(), err) + } + + // Validate manifest size matches descriptor + if manifestDesc.Size > 0 && int64(len(manifestData)) != manifestDesc.Size { + internalLogger.Warn("Manifest size mismatch", + "cid", manifestDesc.Digest.String(), + "expected", manifestDesc.Size, + "actual", len(manifestData)) + } + + // Parse manifest + var manifest ocispec.Manifest + if err := json.Unmarshal(manifestData, &manifest); err != nil { + return nil, status.Errorf(codes.Internal, "failed to unmarshal manifest for %s: %v", manifestDesc.Digest.String(), err) + } + + return &manifest, nil +} + +// Tag cleanup functions removed - OCI registry garbage collection handles dangling tags after manifest deletion + +// deleteFromOCIStore handles deletion of records from an OCI store. +func (s *store) deleteFromOCIStore(ctx context.Context, ref *corev1.RecordRef) error { + cid := ref.GetCid() + + store, ok := s.repo.(*oci.Store) + if !ok { + return status.Errorf(codes.Internal, "expected *oci.Store, got %T", s.repo) + } + + internalLogger.Debug("Starting OCI store deletion", "cid", cid) + + var errors []string + + // Phase 1: Delete manifest (tags will be cleaned up by OCI GC) + internalLogger.Debug("Phase 1: Deleting manifest", "cid", cid) + + manifestDesc, err := s.repo.Resolve(ctx, cid) + if err != nil { + // Manifest might already be gone - this is not necessarily an error + internalLogger.Debug("Failed to resolve manifest during delete (may already be deleted)", "cid", cid, "error", err) + errors = append(errors, fmt.Sprintf("manifest resolve: %v", err)) + } else { + if err := store.Delete(ctx, manifestDesc); err != nil { + internalLogger.Warn("Failed to delete manifest", "cid", cid, "error", err) + errors = append(errors, fmt.Sprintf("manifest delete: %v", err)) + } else { + internalLogger.Debug("Manifest deleted successfully", "cid", cid, "digest", manifestDesc.Digest.String()) + } + } + + // Phase 2: Remove blob data (local store - we have full control) + internalLogger.Debug("Phase 2: Deleting blob data", "cid", cid) + + if err := s.deleteBlobForLocalStore(ctx, cid, store); err != nil { + internalLogger.Warn("Failed to delete blob", "cid", cid, "error", err) + errors = append(errors, fmt.Sprintf("blob delete: %v", err)) + } + + // Log summary + if len(errors) > 0 { + // For local store, we might want to return an error if critical operations failed + // But continue with best-effort approach for now + internalLogger.Warn("Partial delete completed with some errors", "cid", cid, "errors", errors) + } else { + internalLogger.Info("Record deleted successfully from OCI store", "cid", cid) + } + + return nil // Best effort - don't fail on partial cleanup +} + +// deleteBlobForLocalStore safely deletes blob data from local OCI store using new CID utility. +func (s *store) deleteBlobForLocalStore(ctx context.Context, cid string, store *oci.Store) error { + // Convert CID to digest using our new utility function + ociDigest, err := corev1.ConvertCIDToDigest(cid) + if err != nil { + return fmt.Errorf("failed to convert CID to digest: %w", err) + } + + blobDesc := ocispec.Descriptor{ + Digest: ociDigest, + } + + if err := store.Delete(ctx, blobDesc); err != nil { + return fmt.Errorf("failed to delete blob: %w", err) + } + + internalLogger.Debug("Blob deleted successfully", "cid", cid, "digest", ociDigest.String()) + + return nil +} + +// deleteFromRemoteRepository handles deletion of records from a remote repository. +func (s *store) deleteFromRemoteRepository(ctx context.Context, ref *corev1.RecordRef) error { + cid := ref.GetCid() + + repo, ok := s.repo.(*remote.Repository) + if !ok { + return status.Errorf(codes.Internal, "expected *remote.Repository, got %T", s.repo) + } + + internalLogger.Debug("Starting remote repository deletion", "cid", cid) + + var errors []string + + // Phase 1: Delete manifest (tags will be cleaned up by OCI GC) + internalLogger.Debug("Phase 1: Deleting manifest", "cid", cid) + + manifestDesc, err := s.repo.Resolve(ctx, cid) + if err != nil { + internalLogger.Debug("Failed to resolve manifest during delete (may already be deleted)", "cid", cid, "error", err) + errors = append(errors, fmt.Sprintf("manifest resolve: %v", err)) + } else { + if err := repo.Manifests().Delete(ctx, manifestDesc); err != nil { + internalLogger.Warn("Failed to delete manifest", "cid", cid, "error", err) + errors = append(errors, fmt.Sprintf("manifest delete: %v", err)) + } else { + internalLogger.Debug("Manifest deleted successfully", "cid", cid, "digest", manifestDesc.Digest.String()) + } + } + + // Phase 2: Skip blob deletion for remote registries (best practice) + // Most remote registries handle blob cleanup via garbage collection + internalLogger.Debug("Phase 2: Skipping blob deletion (handled by registry GC)", "cid", cid) + internalLogger.Info("Blob cleanup skipped for remote registry - will be handled by garbage collection", + "cid", cid, + "note", "This is the recommended approach for remote registries") + + // Log summary + if len(errors) > 0 { + // For remote registries, partial failure is common and expected + // Many operations may not be supported, but this is normal + internalLogger.Warn("Partial delete completed with some errors", "cid", cid, "errors", errors) + } else { + internalLogger.Info("Record deletion completed successfully", "cid", cid) + } + + return nil // Best effort - remote registries have limited delete capabilities +} diff --git a/server/store/oci/oci.go b/server/store/oci/oci.go index aaddbd3cd..456edafda 100644 --- a/server/store/oci/oci.go +++ b/server/store/oci/oci.go @@ -1,428 +1,428 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package oci - -import ( - "context" - "fmt" - "io" - "strings" - "time" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/server/datastore" - "github.com/agntcy/dir/server/store/cache" - ociconfig "github.com/agntcy/dir/server/store/oci/config" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/utils/logging" - "github.com/agntcy/dir/utils/zot" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "oras.land/oras-go/v2" - "oras.land/oras-go/v2/content/oci" - "oras.land/oras-go/v2/registry/remote" -) - -var logger = logging.Logger("store/oci") - -const ( - // maxTagRetries is the maximum number of retry attempts for Tag operations. - maxTagRetries = 3 - // initialRetryDelay is the initial delay before the first retry. - initialRetryDelay = 50 * time.Millisecond - // maxRetryDelay is the maximum delay between retries. - maxRetryDelay = 500 * time.Millisecond -) - -type store struct { - repo oras.GraphTarget - config ociconfig.Config -} - -// Compile-time interface checks to ensure store implements all capability interfaces. -var ( - _ types.StoreAPI = (*store)(nil) - _ types.ReferrerStoreAPI = (*store)(nil) - _ types.VerifierStore = (*store)(nil) - _ types.FullStore = (*store)(nil) -) - -func New(cfg ociconfig.Config) (types.StoreAPI, error) { - logger.Debug("Creating OCI store with config", "config", cfg) - - // if local dir used, return client for that local path. - // allows mounting of data via volumes - // allows S3 usage for backup store - if repoPath := cfg.LocalDir; repoPath != "" { - repo, err := oci.New(repoPath) - if err != nil { - return nil, fmt.Errorf("failed to create local repo: %w", err) - } - - return &store{ - repo: repo, - config: cfg, - }, nil - } - - repo, err := NewORASRepository(cfg) - if err != nil { - return nil, fmt.Errorf("failed to create remote repo: %w", err) - } - - // Create store API - store := &store{ - repo: repo, - config: cfg, - } - - // If no cache requested, return. - // Do not use in memory cache as it can get large. - if cfg.CacheDir == "" { - return store, nil - } - - // Create cache datastore - cacheDS, err := datastore.New(datastore.WithFsProvider(cfg.CacheDir)) - if err != nil { - return nil, fmt.Errorf("failed to create cache store: %w", err) - } - - // Return cached store - return cache.Wrap(store, cacheDS), nil -} - -// isNotFoundError checks if an error is a "not found" error from the registry. -func isNotFoundError(err error) bool { - if err == nil { - return false - } - - errMsg := err.Error() - - return strings.Contains(errMsg, "not found") || strings.Contains(errMsg, "NOT_FOUND") -} - -// tagWithRetry attempts to tag a manifest with exponential backoff retry logic. -// This is necessary because under concurrent load, oras.PackManifest may push the manifest -// to the registry, but it might not be immediately available when oras.Tag is called. -func (s *store) tagWithRetry(ctx context.Context, manifestDigest, tag string) error { - var lastErr error - - delay := initialRetryDelay - - for attempt := 0; attempt <= maxTagRetries; attempt++ { - if attempt > 0 { - logger.Debug("Retrying Tag operation", - "attempt", attempt, - "max_retries", maxTagRetries, - "delay", delay, - "manifest_digest", manifestDigest, - "tag", tag) - - // Wait before retrying - select { - case <-ctx.Done(): - return fmt.Errorf("context cancelled during tag retry: %w", ctx.Err()) - case <-time.After(delay): - } - - // Exponential backoff with cap - delay *= 2 - if delay > maxRetryDelay { - delay = maxRetryDelay - } - } - - // Attempt to tag the manifest - _, err := oras.Tag(ctx, s.repo, manifestDigest, tag) - if err == nil { - if attempt > 0 { - logger.Info("Tag operation succeeded after retry", - "attempt", attempt, - "manifest_digest", manifestDigest, - "tag", tag) - } - - return nil - } - - lastErr = err - - // Only retry on "not found" errors (transient race condition) - // For other errors, fail immediately - if !isNotFoundError(err) { - logger.Debug("Tag operation failed with non-retryable error", - "error", err, - "manifest_digest", manifestDigest, - "tag", tag) - - return fmt.Errorf("failed to tag manifest: %w", err) - } - - // Log the retryable error - logger.Debug("Tag operation failed with retryable error", - "attempt", attempt, - "error", err, - "manifest_digest", manifestDigest, - "tag", tag) - } - - // All retries exhausted - logger.Warn("Tag operation failed after all retries", - "max_retries", maxTagRetries, - "last_error", lastErr, - "manifest_digest", manifestDigest, - "tag", tag) - - return lastErr -} - -// Push record to the OCI registry -// -// This creates a blob, a manifest that points to that blob, and a tagged release for that manifest. -// The tag for the manifest is: . -// The tag for the blob is needed to link the actual record with its associated metadata. -// Note that metadata can be stored in a different store and only wrap this store. -// -// Ref: https://github.com/oras-project/oras-go/blob/main/docs/Modeling-Artifacts.md -func (s *store) Push(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) { - logger.Debug("Pushing record to OCI store", "record", record) - - // Marshal the record using canonical JSON marshaling first - // This ensures consistent bytes for both CID calculation and storage - recordBytes, err := record.Marshal() - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to marshal record: %v", err) - } - - // Step 1: Use oras.PushBytes to push the record data and get Layer Descriptor - layerDesc, err := oras.PushBytes(ctx, s.repo, "application/json", recordBytes) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to push record bytes: %v", err) - } - - // Step 2: Calculate CID from Layer Descriptor's digest using our new utility function - recordCID, err := corev1.ConvertDigestToCID(layerDesc.Digest) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to convert digest to CID: %v", err) - } - - // Validate consistency: CID from ORAS digest should match CID from record - expectedCID := record.GetCid() - if recordCID != expectedCID { - return nil, status.Errorf(codes.Internal, - "CID mismatch: OCI digest CID (%s) != Record CID (%s)", - recordCID, expectedCID) - } - - logger.Debug("CID validation successful", - "cid", recordCID, - "digest", layerDesc.Digest.String(), - "validation", "ORAS digest CID matches Record CID") - - logger.Debug("Calculated CID from ORAS digest", "cid", recordCID, "digest", layerDesc.Digest.String()) - - // Create record reference - recordRef := &corev1.RecordRef{Cid: recordCID} - - // Check if record already exists - if _, err := s.Lookup(ctx, recordRef); err == nil { - logger.Info("Record already exists in OCI store", "cid", recordCID) - - return recordRef, nil - } - - // Step 3: Construct manifest annotations and add CID to annotations - manifestAnnotations := extractManifestAnnotations(record) - // Add the calculated CID to manifest annotations for discovery - manifestAnnotations[ManifestKeyCid] = recordCID - - // Step 4: Pack manifest (in-memory only) - manifestDesc, err := oras.PackManifest(ctx, s.repo, oras.PackManifestVersion1_1, ocispec.MediaTypeImageManifest, - oras.PackManifestOptions{ - ManifestAnnotations: manifestAnnotations, - Layers: []ocispec.Descriptor{ - layerDesc, - }, - }, - ) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to pack manifest: %v", err) - } - - // Step 5: Create CID tag for content-addressable storage - cidTag := recordCID - logger.Debug("Generated CID tag", "cid", recordCID, "tag", cidTag) - - // Step 6: Tag the manifest with CID tag (with retry logic for race conditions) - // => resolve manifest to record which can be looked up (lookup) - // => allows pulling record directly (pull) - if err := s.tagWithRetry(ctx, manifestDesc.Digest.String(), cidTag); err != nil { - return nil, status.Errorf(codes.Internal, "failed to create CID tag: %v", err) - } - - logger.Info("Record pushed to OCI store successfully", "cid", recordCID, "tag", cidTag) - - // Return record reference - return recordRef, nil -} - -// Lookup checks if the ref exists as a tagged record. -func (s *store) Lookup(ctx context.Context, ref *corev1.RecordRef) (*corev1.RecordMeta, error) { - // Input validation using shared helper - if err := validateRecordRef(ref); err != nil { - return nil, err - } - - logger.Debug("Starting record lookup", "cid", ref.GetCid()) - - // Use shared helper to fetch and parse manifest (eliminates code duplication) - manifest, _, err := s.fetchAndParseManifest(ctx, ref.GetCid()) - if err != nil { - return nil, err // Error already has proper context from helper - } - - // Extract and validate record type from manifest metadata - recordType, ok := manifest.Annotations[manifestDirObjectTypeKey] - if !ok { - return nil, status.Errorf(codes.Internal, "record type not found in manifest annotations for CID %s: missing key %s", - ref.GetCid(), manifestDirObjectTypeKey) - } - - // Extract comprehensive metadata from manifest annotations using our enhanced parser - recordMeta := parseManifestAnnotations(manifest.Annotations) - - // Set the CID from the request (this is the primary identifier) - recordMeta.Cid = ref.GetCid() - - logger.Debug("Record metadata retrieved successfully", - "cid", ref.GetCid(), - "type", recordType, - "annotationCount", len(manifest.Annotations)) - - return recordMeta, nil -} - -func (s *store) Pull(ctx context.Context, ref *corev1.RecordRef) (*corev1.Record, error) { - // Input validation using shared helper - if err := validateRecordRef(ref); err != nil { - return nil, err - } - - logger.Debug("Starting record pull", "cid", ref.GetCid()) - - // Use shared helper to fetch and parse manifest (eliminates code duplication) - manifest, manifestDesc, err := s.fetchAndParseManifest(ctx, ref.GetCid()) - if err != nil { - return nil, err // Error already has proper context from helper - } - - // Validate manifest has layers - if len(manifest.Layers) == 0 { - return nil, status.Errorf(codes.Internal, "manifest has no layers for CID %s", ref.GetCid()) - } - - // Handle multiple layers with warning - if len(manifest.Layers) > 1 { - logger.Warn("Manifest has multiple layers, using first layer", - "cid", ref.GetCid(), - "layerCount", len(manifest.Layers)) - } - - // Get the blob descriptor from the first layer - blobDesc := manifest.Layers[0] - - // Validate layer media type - if blobDesc.MediaType != "application/json" { - logger.Warn("Unexpected blob media type", - "cid", ref.GetCid(), - "expected", "application/json", - "actual", blobDesc.MediaType) - } - - logger.Debug("Fetching record blob", - "cid", ref.GetCid(), - "blobDigest", blobDesc.Digest.String(), - "blobSize", blobDesc.Size, - "mediaType", blobDesc.MediaType) - - // Fetch the record data using the correct blob descriptor from the manifest - reader, err := s.repo.Fetch(ctx, blobDesc) - if err != nil { - return nil, status.Errorf(codes.NotFound, "record blob not found for CID %s: %v", ref.GetCid(), err) - } - defer reader.Close() - - // Read all data from the reader - recordData, err := io.ReadAll(reader) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to read record data for CID %s: %v", ref.GetCid(), err) - } - - // Validate blob size matches descriptor - if blobDesc.Size > 0 && int64(len(recordData)) != blobDesc.Size { - logger.Warn("Blob size mismatch", - "cid", ref.GetCid(), - "expected", blobDesc.Size, - "actual", len(recordData)) - } - - // Unmarshal canonical JSON data back to Record - record, err := corev1.UnmarshalRecord(recordData) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to unmarshal record for CID %s: %v", ref.GetCid(), err) - } - - logger.Debug("Record pulled successfully", - "cid", ref.GetCid(), - "blobSize", len(recordData), - "blobDigest", blobDesc.Digest.String(), - "manifestDigest", manifestDesc.Digest.String()) - - return record, nil -} - -func (s *store) Delete(ctx context.Context, ref *corev1.RecordRef) error { - logger.Debug("Deleting record from OCI store", "ref", ref) - - // Input validation using shared helper - if err := validateRecordRef(ref); err != nil { - return err - } - - switch s.repo.(type) { - case *oci.Store: - return s.deleteFromOCIStore(ctx, ref) - case *remote.Repository: - return s.deleteFromRemoteRepository(ctx, ref) - default: - return status.Errorf(codes.FailedPrecondition, "unsupported repo type: %T", s.repo) - } -} - -// IsReady checks if the storage backend is ready to serve traffic. -// For local stores, always returns true. -// For remote OCI registries, checks Zot's /readyz endpoint to verify it's ready. -func (s *store) IsReady(ctx context.Context) bool { - // Local directory stores are always ready - if s.config.LocalDir != "" { - logger.Debug("Store ready: using local directory", "path", s.config.LocalDir) - - return true - } - - // For remote registries, check connectivity - _, ok := s.repo.(*remote.Repository) - if !ok { - // Not a remote repository (could be wrapped), assume ready - logger.Debug("Store ready: not a remote repository") - - return true - } - - // Use the zot utility package to check Zot's readiness - return zot.CheckReadiness(ctx, s.config.RegistryAddress, s.config.Insecure) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package oci + +import ( + "context" + "fmt" + "io" + "strings" + "time" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/server/datastore" + "github.com/agntcy/dir/server/store/cache" + ociconfig "github.com/agntcy/dir/server/store/oci/config" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/utils/logging" + "github.com/agntcy/dir/utils/zot" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "oras.land/oras-go/v2" + "oras.land/oras-go/v2/content/oci" + "oras.land/oras-go/v2/registry/remote" +) + +var logger = logging.Logger("store/oci") + +const ( + // maxTagRetries is the maximum number of retry attempts for Tag operations. + maxTagRetries = 3 + // initialRetryDelay is the initial delay before the first retry. + initialRetryDelay = 50 * time.Millisecond + // maxRetryDelay is the maximum delay between retries. + maxRetryDelay = 500 * time.Millisecond +) + +type store struct { + repo oras.GraphTarget + config ociconfig.Config +} + +// Compile-time interface checks to ensure store implements all capability interfaces. +var ( + _ types.StoreAPI = (*store)(nil) + _ types.ReferrerStoreAPI = (*store)(nil) + _ types.VerifierStore = (*store)(nil) + _ types.FullStore = (*store)(nil) +) + +func New(cfg ociconfig.Config) (types.StoreAPI, error) { + logger.Debug("Creating OCI store with config", "config", cfg) + + // if local dir used, return client for that local path. + // allows mounting of data via volumes + // allows S3 usage for backup store + if repoPath := cfg.LocalDir; repoPath != "" { + repo, err := oci.New(repoPath) + if err != nil { + return nil, fmt.Errorf("failed to create local repo: %w", err) + } + + return &store{ + repo: repo, + config: cfg, + }, nil + } + + repo, err := NewORASRepository(cfg) + if err != nil { + return nil, fmt.Errorf("failed to create remote repo: %w", err) + } + + // Create store API + store := &store{ + repo: repo, + config: cfg, + } + + // If no cache requested, return. + // Do not use in memory cache as it can get large. + if cfg.CacheDir == "" { + return store, nil + } + + // Create cache datastore + cacheDS, err := datastore.New(datastore.WithFsProvider(cfg.CacheDir)) + if err != nil { + return nil, fmt.Errorf("failed to create cache store: %w", err) + } + + // Return cached store + return cache.Wrap(store, cacheDS), nil +} + +// isNotFoundError checks if an error is a "not found" error from the registry. +func isNotFoundError(err error) bool { + if err == nil { + return false + } + + errMsg := err.Error() + + return strings.Contains(errMsg, "not found") || strings.Contains(errMsg, "NOT_FOUND") +} + +// tagWithRetry attempts to tag a manifest with exponential backoff retry logic. +// This is necessary because under concurrent load, oras.PackManifest may push the manifest +// to the registry, but it might not be immediately available when oras.Tag is called. +func (s *store) tagWithRetry(ctx context.Context, manifestDigest, tag string) error { + var lastErr error + + delay := initialRetryDelay + + for attempt := 0; attempt <= maxTagRetries; attempt++ { + if attempt > 0 { + logger.Debug("Retrying Tag operation", + "attempt", attempt, + "max_retries", maxTagRetries, + "delay", delay, + "manifest_digest", manifestDigest, + "tag", tag) + + // Wait before retrying + select { + case <-ctx.Done(): + return fmt.Errorf("context cancelled during tag retry: %w", ctx.Err()) + case <-time.After(delay): + } + + // Exponential backoff with cap + delay *= 2 + if delay > maxRetryDelay { + delay = maxRetryDelay + } + } + + // Attempt to tag the manifest + _, err := oras.Tag(ctx, s.repo, manifestDigest, tag) + if err == nil { + if attempt > 0 { + logger.Info("Tag operation succeeded after retry", + "attempt", attempt, + "manifest_digest", manifestDigest, + "tag", tag) + } + + return nil + } + + lastErr = err + + // Only retry on "not found" errors (transient race condition) + // For other errors, fail immediately + if !isNotFoundError(err) { + logger.Debug("Tag operation failed with non-retryable error", + "error", err, + "manifest_digest", manifestDigest, + "tag", tag) + + return fmt.Errorf("failed to tag manifest: %w", err) + } + + // Log the retryable error + logger.Debug("Tag operation failed with retryable error", + "attempt", attempt, + "error", err, + "manifest_digest", manifestDigest, + "tag", tag) + } + + // All retries exhausted + logger.Warn("Tag operation failed after all retries", + "max_retries", maxTagRetries, + "last_error", lastErr, + "manifest_digest", manifestDigest, + "tag", tag) + + return lastErr +} + +// Push record to the OCI registry +// +// This creates a blob, a manifest that points to that blob, and a tagged release for that manifest. +// The tag for the manifest is: . +// The tag for the blob is needed to link the actual record with its associated metadata. +// Note that metadata can be stored in a different store and only wrap this store. +// +// Ref: https://github.com/oras-project/oras-go/blob/main/docs/Modeling-Artifacts.md +func (s *store) Push(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) { + logger.Debug("Pushing record to OCI store", "record", record) + + // Marshal the record using canonical JSON marshaling first + // This ensures consistent bytes for both CID calculation and storage + recordBytes, err := record.Marshal() + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to marshal record: %v", err) + } + + // Step 1: Use oras.PushBytes to push the record data and get Layer Descriptor + layerDesc, err := oras.PushBytes(ctx, s.repo, "application/json", recordBytes) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to push record bytes: %v", err) + } + + // Step 2: Calculate CID from Layer Descriptor's digest using our new utility function + recordCID, err := corev1.ConvertDigestToCID(layerDesc.Digest) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to convert digest to CID: %v", err) + } + + // Validate consistency: CID from ORAS digest should match CID from record + expectedCID := record.GetCid() + if recordCID != expectedCID { + return nil, status.Errorf(codes.Internal, + "CID mismatch: OCI digest CID (%s) != Record CID (%s)", + recordCID, expectedCID) + } + + logger.Debug("CID validation successful", + "cid", recordCID, + "digest", layerDesc.Digest.String(), + "validation", "ORAS digest CID matches Record CID") + + logger.Debug("Calculated CID from ORAS digest", "cid", recordCID, "digest", layerDesc.Digest.String()) + + // Create record reference + recordRef := &corev1.RecordRef{Cid: recordCID} + + // Check if record already exists + if _, err := s.Lookup(ctx, recordRef); err == nil { + logger.Info("Record already exists in OCI store", "cid", recordCID) + + return recordRef, nil + } + + // Step 3: Construct manifest annotations and add CID to annotations + manifestAnnotations := extractManifestAnnotations(record) + // Add the calculated CID to manifest annotations for discovery + manifestAnnotations[ManifestKeyCid] = recordCID + + // Step 4: Pack manifest (in-memory only) + manifestDesc, err := oras.PackManifest(ctx, s.repo, oras.PackManifestVersion1_1, ocispec.MediaTypeImageManifest, + oras.PackManifestOptions{ + ManifestAnnotations: manifestAnnotations, + Layers: []ocispec.Descriptor{ + layerDesc, + }, + }, + ) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to pack manifest: %v", err) + } + + // Step 5: Create CID tag for content-addressable storage + cidTag := recordCID + logger.Debug("Generated CID tag", "cid", recordCID, "tag", cidTag) + + // Step 6: Tag the manifest with CID tag (with retry logic for race conditions) + // => resolve manifest to record which can be looked up (lookup) + // => allows pulling record directly (pull) + if err := s.tagWithRetry(ctx, manifestDesc.Digest.String(), cidTag); err != nil { + return nil, status.Errorf(codes.Internal, "failed to create CID tag: %v", err) + } + + logger.Info("Record pushed to OCI store successfully", "cid", recordCID, "tag", cidTag) + + // Return record reference + return recordRef, nil +} + +// Lookup checks if the ref exists as a tagged record. +func (s *store) Lookup(ctx context.Context, ref *corev1.RecordRef) (*corev1.RecordMeta, error) { + // Input validation using shared helper + if err := validateRecordRef(ref); err != nil { + return nil, err + } + + logger.Debug("Starting record lookup", "cid", ref.GetCid()) + + // Use shared helper to fetch and parse manifest (eliminates code duplication) + manifest, _, err := s.fetchAndParseManifest(ctx, ref.GetCid()) + if err != nil { + return nil, err // Error already has proper context from helper + } + + // Extract and validate record type from manifest metadata + recordType, ok := manifest.Annotations[manifestDirObjectTypeKey] + if !ok { + return nil, status.Errorf(codes.Internal, "record type not found in manifest annotations for CID %s: missing key %s", + ref.GetCid(), manifestDirObjectTypeKey) + } + + // Extract comprehensive metadata from manifest annotations using our enhanced parser + recordMeta := parseManifestAnnotations(manifest.Annotations) + + // Set the CID from the request (this is the primary identifier) + recordMeta.Cid = ref.GetCid() + + logger.Debug("Record metadata retrieved successfully", + "cid", ref.GetCid(), + "type", recordType, + "annotationCount", len(manifest.Annotations)) + + return recordMeta, nil +} + +func (s *store) Pull(ctx context.Context, ref *corev1.RecordRef) (*corev1.Record, error) { + // Input validation using shared helper + if err := validateRecordRef(ref); err != nil { + return nil, err + } + + logger.Debug("Starting record pull", "cid", ref.GetCid()) + + // Use shared helper to fetch and parse manifest (eliminates code duplication) + manifest, manifestDesc, err := s.fetchAndParseManifest(ctx, ref.GetCid()) + if err != nil { + return nil, err // Error already has proper context from helper + } + + // Validate manifest has layers + if len(manifest.Layers) == 0 { + return nil, status.Errorf(codes.Internal, "manifest has no layers for CID %s", ref.GetCid()) + } + + // Handle multiple layers with warning + if len(manifest.Layers) > 1 { + logger.Warn("Manifest has multiple layers, using first layer", + "cid", ref.GetCid(), + "layerCount", len(manifest.Layers)) + } + + // Get the blob descriptor from the first layer + blobDesc := manifest.Layers[0] + + // Validate layer media type + if blobDesc.MediaType != "application/json" { + logger.Warn("Unexpected blob media type", + "cid", ref.GetCid(), + "expected", "application/json", + "actual", blobDesc.MediaType) + } + + logger.Debug("Fetching record blob", + "cid", ref.GetCid(), + "blobDigest", blobDesc.Digest.String(), + "blobSize", blobDesc.Size, + "mediaType", blobDesc.MediaType) + + // Fetch the record data using the correct blob descriptor from the manifest + reader, err := s.repo.Fetch(ctx, blobDesc) + if err != nil { + return nil, status.Errorf(codes.NotFound, "record blob not found for CID %s: %v", ref.GetCid(), err) + } + defer reader.Close() + + // Read all data from the reader + recordData, err := io.ReadAll(reader) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to read record data for CID %s: %v", ref.GetCid(), err) + } + + // Validate blob size matches descriptor + if blobDesc.Size > 0 && int64(len(recordData)) != blobDesc.Size { + logger.Warn("Blob size mismatch", + "cid", ref.GetCid(), + "expected", blobDesc.Size, + "actual", len(recordData)) + } + + // Unmarshal canonical JSON data back to Record + record, err := corev1.UnmarshalRecord(recordData) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to unmarshal record for CID %s: %v", ref.GetCid(), err) + } + + logger.Debug("Record pulled successfully", + "cid", ref.GetCid(), + "blobSize", len(recordData), + "blobDigest", blobDesc.Digest.String(), + "manifestDigest", manifestDesc.Digest.String()) + + return record, nil +} + +func (s *store) Delete(ctx context.Context, ref *corev1.RecordRef) error { + logger.Debug("Deleting record from OCI store", "ref", ref) + + // Input validation using shared helper + if err := validateRecordRef(ref); err != nil { + return err + } + + switch s.repo.(type) { + case *oci.Store: + return s.deleteFromOCIStore(ctx, ref) + case *remote.Repository: + return s.deleteFromRemoteRepository(ctx, ref) + default: + return status.Errorf(codes.FailedPrecondition, "unsupported repo type: %T", s.repo) + } +} + +// IsReady checks if the storage backend is ready to serve traffic. +// For local stores, always returns true. +// For remote OCI registries, checks Zot's /readyz endpoint to verify it's ready. +func (s *store) IsReady(ctx context.Context) bool { + // Local directory stores are always ready + if s.config.LocalDir != "" { + logger.Debug("Store ready: using local directory", "path", s.config.LocalDir) + + return true + } + + // For remote registries, check connectivity + _, ok := s.repo.(*remote.Repository) + if !ok { + // Not a remote repository (could be wrapped), assume ready + logger.Debug("Store ready: not a remote repository") + + return true + } + + // Use the zot utility package to check Zot's readiness + return zot.CheckReadiness(ctx, s.config.RegistryAddress, s.config.Insecure) +} diff --git a/server/store/oci/oci_test.go b/server/store/oci/oci_test.go index d8df90501..48ece2e31 100644 --- a/server/store/oci/oci_test.go +++ b/server/store/oci/oci_test.go @@ -1,368 +1,368 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint:testifylint -package oci - -import ( - "context" - "os" - "testing" - - typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" - typesv1alpha1 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha1" - corev1 "github.com/agntcy/dir/api/core/v1" - ociconfig "github.com/agntcy/dir/server/store/oci/config" - "github.com/agntcy/dir/server/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TODO: this should be configurable to unified Storage API test flow. -var ( - // test config. - testConfig = ociconfig.Config{ - LocalDir: os.TempDir(), // used for local test/bench - RegistryAddress: "localhost:5000", // used for remote test/bench - RepositoryName: "test-store", // used for remote test/bench - AuthConfig: ociconfig.AuthConfig{Insecure: true}, // used for remote test/bench - } - runLocal = true - // TODO: this may blow quickly when doing rapid benchmarking if not tested against fresh OCI instance. - runRemote = false - - // common test. - testCtx = context.Background() -) - -func TestStorePushLookupPullDelete(t *testing.T) { - store := loadLocalStore(t) - - agent := &typesv1alpha0.Record{ - Name: "test-agent", - SchemaVersion: "v0.3.1", - Description: "A test agent", - } - - record := corev1.New(agent) - - // Calculate CID for the record - recordCID := record.GetCid() - assert.NotEmpty(t, recordCID, "failed to calculate CID") - - // Push operation - recordRef, err := store.Push(testCtx, record) - assert.NoErrorf(t, err, "push failed") - assert.Equal(t, recordCID, recordRef.GetCid()) - - // Lookup operation - recordMeta, err := store.Lookup(testCtx, recordRef) - assert.NoErrorf(t, err, "lookup failed") - assert.Equal(t, recordCID, recordMeta.GetCid()) - - // Pull operation - pulledRecord, err := store.Pull(testCtx, recordRef) - assert.NoErrorf(t, err, "pull failed") - - pulledCID := pulledRecord.GetCid() - assert.NotEmpty(t, pulledCID, "failed to get pulled record CID") - assert.Equal(t, recordCID, pulledCID) - - // Verify the pulled agent data - decoded, _ := record.Decode() - pulledAgent := decoded.GetV1Alpha0() - assert.NotNil(t, pulledAgent, "pulled agent should not be nil") - assert.Equal(t, agent.GetName(), pulledAgent.GetName()) - assert.Equal(t, agent.GetSchemaVersion(), pulledAgent.GetSchemaVersion()) - assert.Equal(t, agent.GetDescription(), pulledAgent.GetDescription()) - - // Delete operation - err = store.Delete(testCtx, recordRef) - assert.NoErrorf(t, err, "delete failed") - - // Lookup should fail after delete - _, err = store.Lookup(testCtx, recordRef) - assert.Error(t, err, "lookup should fail after delete") - assert.ErrorContains(t, err, "not found") - - // Pull should also fail after delete - _, err = store.Pull(testCtx, recordRef) - assert.Error(t, err, "pull should fail after delete") - assert.ErrorContains(t, err, "not found") -} - -func BenchmarkLocalStore(b *testing.B) { - if !runLocal { - b.Skip() - } - - store := loadLocalStore(&testing.T{}) - for range b.N { - benchmarkStep(store) - } -} - -func BenchmarkRemoteStore(b *testing.B) { - if !runRemote { - b.Skip() - } - - store := loadRemoteStore(&testing.T{}) - for range b.N { - benchmarkStep(store) - } -} - -func benchmarkStep(store types.StoreAPI) { - // Create test record - agent := &typesv1alpha0.Record{ - Name: "bench-agent", - SchemaVersion: "v0.3.1", - Description: "A benchmark agent", - } - - record := corev1.New(agent) - - // Record is ready for push operation - - // Push operation - pushedRef, err := store.Push(testCtx, record) - if err != nil { - panic(err) - } - - // Lookup operation - fetchedMeta, err := store.Lookup(testCtx, pushedRef) - if err != nil { - panic(err) - } - - // Assert equal - if pushedRef.GetCid() != fetchedMeta.GetCid() { - panic("not equal lookup") - } -} - -func loadLocalStore(t *testing.T) types.StoreAPI { - t.Helper() - - // create tmp storage for test artifacts - tmpDir, err := os.MkdirTemp(testConfig.LocalDir, "test-oci-store-*") //nolint:usetesting - assert.NoErrorf(t, err, "failed to create test dir") - t.Cleanup(func() { - err := os.RemoveAll(tmpDir) - if err != nil { - t.Fatalf("failed to cleanup: %v", err) - } - }) - - // create local - store, err := New(ociconfig.Config{LocalDir: tmpDir}) - assert.NoErrorf(t, err, "failed to create local store") - - return store -} - -func loadRemoteStore(t *testing.T) types.StoreAPI { - t.Helper() - - // create remote - store, err := New( - ociconfig.Config{ - RegistryAddress: testConfig.RegistryAddress, - RepositoryName: testConfig.RepositoryName, - AuthConfig: testConfig.AuthConfig, - }) - assert.NoErrorf(t, err, "failed to create remote store") - - return store -} - -// TestAllVersionsSkillsAndLocatorsPreservation comprehensively tests skills and locators -// preservation across all OASF versions (v1, v2, v3) through OCI push/pull cycles. -// This addresses the reported issue where v3 record skills become empty after push/pull. -func TestAllVersionsSkillsAndLocatorsPreservation(t *testing.T) { - store := loadLocalStore(t) - - testCases := []struct { - name string - record *corev1.Record - expectedSkillCount int - expectedLocatorCount int - skillVerifier func(t *testing.T, record *corev1.Record) - locatorVerifier func(t *testing.T, record *corev1.Record) - }{ - { - name: "V1_Agent_CategoryClass_Skills", - record: corev1.New(&typesv1alpha0.Record{ - Name: "test-v1-agent", - Version: "1.0.0", - SchemaVersion: "v0.3.1", - Description: "Test v1 agent with hierarchical skills", - Skills: []*typesv1alpha0.Skill{ - { - CategoryName: stringPtr("Natural Language Processing"), - CategoryUid: 1, - ClassName: stringPtr("Text Completion"), - ClassUid: 10201, - }, - { - CategoryName: stringPtr("Machine Learning"), - CategoryUid: 2, - ClassName: stringPtr("Classification"), - ClassUid: 20301, - }, - }, - Locators: []*typesv1alpha0.Locator{ - { - Type: "docker-image", - Url: "ghcr.io/agntcy/test-v1-agent", - }, - { - Type: "helm-chart", - Url: "oci://registry.example.com/charts/test-agent", - }, - }, - }), - expectedSkillCount: 2, - expectedLocatorCount: 2, - skillVerifier: func(t *testing.T, record *corev1.Record) { - t.Helper() - - decoded, _ := record.Decode() - v1Agent := decoded.GetV1Alpha0() - require.NotNil(t, v1Agent, "should be v1 agent") - skills := v1Agent.GetSkills() - require.Len(t, skills, 2, "v1 should have 2 skills") - - // V1 uses category/class format - assert.Equal(t, "Natural Language Processing", skills[0].GetCategoryName()) - assert.Equal(t, "Text Completion", skills[0].GetClassName()) - assert.Equal(t, uint64(10201), skills[0].GetClassUid()) - - assert.Equal(t, "Machine Learning", skills[1].GetCategoryName()) - assert.Equal(t, "Classification", skills[1].GetClassName()) - assert.Equal(t, uint64(20301), skills[1].GetClassUid()) - }, - locatorVerifier: func(t *testing.T, record *corev1.Record) { - t.Helper() - - decoded, _ := record.Decode() - v1Agent := decoded.GetV1Alpha0() - locators := v1Agent.GetLocators() - require.Len(t, locators, 2, "v1 should have 2 locators") - - assert.Equal(t, "docker-image", locators[0].GetType()) - assert.Equal(t, "ghcr.io/agntcy/test-v1-agent", locators[0].GetUrl()) - - assert.Equal(t, "helm-chart", locators[1].GetType()) - assert.Equal(t, "oci://registry.example.com/charts/test-agent", locators[1].GetUrl()) - }, - }, - { - name: "V3_Record_Simple_Skills", - record: corev1.New(&typesv1alpha1.Record{ - Name: "test-v3-record", - Version: "3.0.0", - SchemaVersion: "0.7.0", - Description: "Test v3 record with simple skills", - Skills: []*typesv1alpha1.Skill{ - { - Name: "Natural Language Processing", - Id: 10201, - }, - { - Name: "Data Analysis", - Id: 20301, - }, - }, - Locators: []*typesv1alpha1.Locator{ - { - Type: "docker-image", - Url: "ghcr.io/agntcy/test-v3-record", - }, - { - Type: "oci-artifact", - Url: "oci://registry.example.com/artifacts/test-record", - }, - }, - }), - expectedSkillCount: 2, - expectedLocatorCount: 2, - skillVerifier: func(t *testing.T, record *corev1.Record) { - t.Helper() - - decoded, _ := record.Decode() - v3Record := decoded.GetV1Alpha1() - require.NotNil(t, v3Record, "should be v3 record") - skills := v3Record.GetSkills() - require.Len(t, skills, 2, "SKILLS ISSUE: v3 should have 2 skills but has %d", len(skills)) - - // V3 uses simple name/id format (same as v2) - assert.Equal(t, "Natural Language Processing", skills[0].GetName()) - assert.Equal(t, uint32(10201), skills[0].GetId()) - - assert.Equal(t, "Data Analysis", skills[1].GetName()) - assert.Equal(t, uint32(20301), skills[1].GetId()) - }, - locatorVerifier: func(t *testing.T, record *corev1.Record) { - t.Helper() - - decoded, _ := record.Decode() - v3Record := decoded.GetV1Alpha1() - locators := v3Record.GetLocators() - require.Len(t, locators, 2, "v3 should have 2 locators") - - assert.Equal(t, "docker-image", locators[0].GetType()) - assert.Equal(t, "ghcr.io/agntcy/test-v3-record", locators[0].GetUrl()) - - assert.Equal(t, "oci-artifact", locators[1].GetType()) - assert.Equal(t, "oci://registry.example.com/artifacts/test-record", locators[1].GetUrl()) - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Calculate CID for the original record - originalCID := tc.record.GetCid() - require.NotEmpty(t, originalCID, "failed to calculate CID for %s", tc.name) - - // Log original state - t.Logf("🔄 Testing %s:", tc.name) - t.Logf(" Original CID: %s", originalCID) - t.Logf(" Expected skills: %d, locators: %d", tc.expectedSkillCount, tc.expectedLocatorCount) - - // Verify original skills and locators using verifiers - tc.skillVerifier(t, tc.record) - tc.locatorVerifier(t, tc.record) - - // PUSH operation - recordRef, err := store.Push(testCtx, tc.record) - require.NoError(t, err, "push should succeed for %s", tc.name) - assert.Equal(t, originalCID, recordRef.GetCid(), "pushed CID should match original") - - // PULL operation - pulledRecord, err := store.Pull(testCtx, recordRef) - require.NoError(t, err, "pull should succeed for %s", tc.name) - - // Verify pulled record CID matches - pulledCID := pulledRecord.GetCid() - require.NotEmpty(t, pulledCID, "pulled record should have CID") - assert.Equal(t, originalCID, pulledCID, "pulled CID should match original") - - // CRITICAL TEST: Verify skills and locators are preserved after push/pull cycle - t.Logf(" Verifying skills preservation...") - tc.skillVerifier(t, pulledRecord) - - t.Logf(" Verifying locators preservation...") - tc.locatorVerifier(t, pulledRecord) - - t.Logf("✅ %s: Skills and locators preserved successfully", tc.name) - - // Cleanup - delete the record - err = store.Delete(testCtx, recordRef) - require.NoError(t, err, "cleanup delete should succeed for %s", tc.name) - }) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint:testifylint +package oci + +import ( + "context" + "os" + "testing" + + typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" + typesv1alpha1 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha1" + corev1 "github.com/agntcy/dir/api/core/v1" + ociconfig "github.com/agntcy/dir/server/store/oci/config" + "github.com/agntcy/dir/server/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TODO: this should be configurable to unified Storage API test flow. +var ( + // test config. + testConfig = ociconfig.Config{ + LocalDir: os.TempDir(), // used for local test/bench + RegistryAddress: "localhost:5000", // used for remote test/bench + RepositoryName: "test-store", // used for remote test/bench + AuthConfig: ociconfig.AuthConfig{Insecure: true}, // used for remote test/bench + } + runLocal = true + // TODO: this may blow quickly when doing rapid benchmarking if not tested against fresh OCI instance. + runRemote = false + + // common test. + testCtx = context.Background() +) + +func TestStorePushLookupPullDelete(t *testing.T) { + store := loadLocalStore(t) + + agent := &typesv1alpha0.Record{ + Name: "test-agent", + SchemaVersion: "v0.3.1", + Description: "A test agent", + } + + record := corev1.New(agent) + + // Calculate CID for the record + recordCID := record.GetCid() + assert.NotEmpty(t, recordCID, "failed to calculate CID") + + // Push operation + recordRef, err := store.Push(testCtx, record) + assert.NoErrorf(t, err, "push failed") + assert.Equal(t, recordCID, recordRef.GetCid()) + + // Lookup operation + recordMeta, err := store.Lookup(testCtx, recordRef) + assert.NoErrorf(t, err, "lookup failed") + assert.Equal(t, recordCID, recordMeta.GetCid()) + + // Pull operation + pulledRecord, err := store.Pull(testCtx, recordRef) + assert.NoErrorf(t, err, "pull failed") + + pulledCID := pulledRecord.GetCid() + assert.NotEmpty(t, pulledCID, "failed to get pulled record CID") + assert.Equal(t, recordCID, pulledCID) + + // Verify the pulled agent data + decoded, _ := record.Decode() + pulledAgent := decoded.GetV1Alpha0() + assert.NotNil(t, pulledAgent, "pulled agent should not be nil") + assert.Equal(t, agent.GetName(), pulledAgent.GetName()) + assert.Equal(t, agent.GetSchemaVersion(), pulledAgent.GetSchemaVersion()) + assert.Equal(t, agent.GetDescription(), pulledAgent.GetDescription()) + + // Delete operation + err = store.Delete(testCtx, recordRef) + assert.NoErrorf(t, err, "delete failed") + + // Lookup should fail after delete + _, err = store.Lookup(testCtx, recordRef) + assert.Error(t, err, "lookup should fail after delete") + assert.ErrorContains(t, err, "not found") + + // Pull should also fail after delete + _, err = store.Pull(testCtx, recordRef) + assert.Error(t, err, "pull should fail after delete") + assert.ErrorContains(t, err, "not found") +} + +func BenchmarkLocalStore(b *testing.B) { + if !runLocal { + b.Skip() + } + + store := loadLocalStore(&testing.T{}) + for range b.N { + benchmarkStep(store) + } +} + +func BenchmarkRemoteStore(b *testing.B) { + if !runRemote { + b.Skip() + } + + store := loadRemoteStore(&testing.T{}) + for range b.N { + benchmarkStep(store) + } +} + +func benchmarkStep(store types.StoreAPI) { + // Create test record + agent := &typesv1alpha0.Record{ + Name: "bench-agent", + SchemaVersion: "v0.3.1", + Description: "A benchmark agent", + } + + record := corev1.New(agent) + + // Record is ready for push operation + + // Push operation + pushedRef, err := store.Push(testCtx, record) + if err != nil { + panic(err) + } + + // Lookup operation + fetchedMeta, err := store.Lookup(testCtx, pushedRef) + if err != nil { + panic(err) + } + + // Assert equal + if pushedRef.GetCid() != fetchedMeta.GetCid() { + panic("not equal lookup") + } +} + +func loadLocalStore(t *testing.T) types.StoreAPI { + t.Helper() + + // create tmp storage for test artifacts + tmpDir, err := os.MkdirTemp(testConfig.LocalDir, "test-oci-store-*") //nolint:usetesting + assert.NoErrorf(t, err, "failed to create test dir") + t.Cleanup(func() { + err := os.RemoveAll(tmpDir) + if err != nil { + t.Fatalf("failed to cleanup: %v", err) + } + }) + + // create local + store, err := New(ociconfig.Config{LocalDir: tmpDir}) + assert.NoErrorf(t, err, "failed to create local store") + + return store +} + +func loadRemoteStore(t *testing.T) types.StoreAPI { + t.Helper() + + // create remote + store, err := New( + ociconfig.Config{ + RegistryAddress: testConfig.RegistryAddress, + RepositoryName: testConfig.RepositoryName, + AuthConfig: testConfig.AuthConfig, + }) + assert.NoErrorf(t, err, "failed to create remote store") + + return store +} + +// TestAllVersionsSkillsAndLocatorsPreservation comprehensively tests skills and locators +// preservation across all OASF versions (v1, v2, v3) through OCI push/pull cycles. +// This addresses the reported issue where v3 record skills become empty after push/pull. +func TestAllVersionsSkillsAndLocatorsPreservation(t *testing.T) { + store := loadLocalStore(t) + + testCases := []struct { + name string + record *corev1.Record + expectedSkillCount int + expectedLocatorCount int + skillVerifier func(t *testing.T, record *corev1.Record) + locatorVerifier func(t *testing.T, record *corev1.Record) + }{ + { + name: "V1_Agent_CategoryClass_Skills", + record: corev1.New(&typesv1alpha0.Record{ + Name: "test-v1-agent", + Version: "1.0.0", + SchemaVersion: "v0.3.1", + Description: "Test v1 agent with hierarchical skills", + Skills: []*typesv1alpha0.Skill{ + { + CategoryName: stringPtr("Natural Language Processing"), + CategoryUid: 1, + ClassName: stringPtr("Text Completion"), + ClassUid: 10201, + }, + { + CategoryName: stringPtr("Machine Learning"), + CategoryUid: 2, + ClassName: stringPtr("Classification"), + ClassUid: 20301, + }, + }, + Locators: []*typesv1alpha0.Locator{ + { + Type: "docker-image", + Url: "ghcr.io/agntcy/test-v1-agent", + }, + { + Type: "helm-chart", + Url: "oci://registry.example.com/charts/test-agent", + }, + }, + }), + expectedSkillCount: 2, + expectedLocatorCount: 2, + skillVerifier: func(t *testing.T, record *corev1.Record) { + t.Helper() + + decoded, _ := record.Decode() + v1Agent := decoded.GetV1Alpha0() + require.NotNil(t, v1Agent, "should be v1 agent") + skills := v1Agent.GetSkills() + require.Len(t, skills, 2, "v1 should have 2 skills") + + // V1 uses category/class format + assert.Equal(t, "Natural Language Processing", skills[0].GetCategoryName()) + assert.Equal(t, "Text Completion", skills[0].GetClassName()) + assert.Equal(t, uint64(10201), skills[0].GetClassUid()) + + assert.Equal(t, "Machine Learning", skills[1].GetCategoryName()) + assert.Equal(t, "Classification", skills[1].GetClassName()) + assert.Equal(t, uint64(20301), skills[1].GetClassUid()) + }, + locatorVerifier: func(t *testing.T, record *corev1.Record) { + t.Helper() + + decoded, _ := record.Decode() + v1Agent := decoded.GetV1Alpha0() + locators := v1Agent.GetLocators() + require.Len(t, locators, 2, "v1 should have 2 locators") + + assert.Equal(t, "docker-image", locators[0].GetType()) + assert.Equal(t, "ghcr.io/agntcy/test-v1-agent", locators[0].GetUrl()) + + assert.Equal(t, "helm-chart", locators[1].GetType()) + assert.Equal(t, "oci://registry.example.com/charts/test-agent", locators[1].GetUrl()) + }, + }, + { + name: "V3_Record_Simple_Skills", + record: corev1.New(&typesv1alpha1.Record{ + Name: "test-v3-record", + Version: "3.0.0", + SchemaVersion: "0.7.0", + Description: "Test v3 record with simple skills", + Skills: []*typesv1alpha1.Skill{ + { + Name: "Natural Language Processing", + Id: 10201, + }, + { + Name: "Data Analysis", + Id: 20301, + }, + }, + Locators: []*typesv1alpha1.Locator{ + { + Type: "docker-image", + Url: "ghcr.io/agntcy/test-v3-record", + }, + { + Type: "oci-artifact", + Url: "oci://registry.example.com/artifacts/test-record", + }, + }, + }), + expectedSkillCount: 2, + expectedLocatorCount: 2, + skillVerifier: func(t *testing.T, record *corev1.Record) { + t.Helper() + + decoded, _ := record.Decode() + v3Record := decoded.GetV1Alpha1() + require.NotNil(t, v3Record, "should be v3 record") + skills := v3Record.GetSkills() + require.Len(t, skills, 2, "SKILLS ISSUE: v3 should have 2 skills but has %d", len(skills)) + + // V3 uses simple name/id format (same as v2) + assert.Equal(t, "Natural Language Processing", skills[0].GetName()) + assert.Equal(t, uint32(10201), skills[0].GetId()) + + assert.Equal(t, "Data Analysis", skills[1].GetName()) + assert.Equal(t, uint32(20301), skills[1].GetId()) + }, + locatorVerifier: func(t *testing.T, record *corev1.Record) { + t.Helper() + + decoded, _ := record.Decode() + v3Record := decoded.GetV1Alpha1() + locators := v3Record.GetLocators() + require.Len(t, locators, 2, "v3 should have 2 locators") + + assert.Equal(t, "docker-image", locators[0].GetType()) + assert.Equal(t, "ghcr.io/agntcy/test-v3-record", locators[0].GetUrl()) + + assert.Equal(t, "oci-artifact", locators[1].GetType()) + assert.Equal(t, "oci://registry.example.com/artifacts/test-record", locators[1].GetUrl()) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Calculate CID for the original record + originalCID := tc.record.GetCid() + require.NotEmpty(t, originalCID, "failed to calculate CID for %s", tc.name) + + // Log original state + t.Logf("🔄 Testing %s:", tc.name) + t.Logf(" Original CID: %s", originalCID) + t.Logf(" Expected skills: %d, locators: %d", tc.expectedSkillCount, tc.expectedLocatorCount) + + // Verify original skills and locators using verifiers + tc.skillVerifier(t, tc.record) + tc.locatorVerifier(t, tc.record) + + // PUSH operation + recordRef, err := store.Push(testCtx, tc.record) + require.NoError(t, err, "push should succeed for %s", tc.name) + assert.Equal(t, originalCID, recordRef.GetCid(), "pushed CID should match original") + + // PULL operation + pulledRecord, err := store.Pull(testCtx, recordRef) + require.NoError(t, err, "pull should succeed for %s", tc.name) + + // Verify pulled record CID matches + pulledCID := pulledRecord.GetCid() + require.NotEmpty(t, pulledCID, "pulled record should have CID") + assert.Equal(t, originalCID, pulledCID, "pulled CID should match original") + + // CRITICAL TEST: Verify skills and locators are preserved after push/pull cycle + t.Logf(" Verifying skills preservation...") + tc.skillVerifier(t, pulledRecord) + + t.Logf(" Verifying locators preservation...") + tc.locatorVerifier(t, pulledRecord) + + t.Logf("✅ %s: Skills and locators preserved successfully", tc.name) + + // Cleanup - delete the record + err = store.Delete(testCtx, recordRef) + require.NoError(t, err, "cleanup delete should succeed for %s", tc.name) + }) + } +} diff --git a/server/store/oci/referrers.go b/server/store/oci/referrers.go index 968dd6d11..f0d6209aa 100644 --- a/server/store/oci/referrers.go +++ b/server/store/oci/referrers.go @@ -1,255 +1,255 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package oci - -import ( - "context" - "fmt" - "io" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/utils/logging" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/encoding/protojson" - "oras.land/oras-go/v2" -) - -var referrersLogger = logging.Logger("store/oci/referrers") - -// ReferrerMatcher defines a function type for matching OCI referrer descriptors. -// It returns true if the descriptor matches the expected referrer type. -type ReferrerMatcher func(ctx context.Context, referrer ocispec.Descriptor) bool - -// ReferrersLister interface for repositories that support the OCI Referrers API. -type ReferrersLister interface { - Referrers(ctx context.Context, desc ocispec.Descriptor, artifactType string, fn func(referrers []ocispec.Descriptor) error) error -} - -// PushReferrer pushes a generic RecordReferrer as an OCI artifact that references a record as its subject. -func (s *store) PushReferrer(ctx context.Context, recordCID string, referrer *corev1.RecordReferrer) error { - referrersLogger.Debug("Pushing generic referrer to OCI store", "recordCID", recordCID, "type", referrer.GetType()) - - if referrer == nil { - return status.Error(codes.InvalidArgument, "referrer is required") //nolint:wrapcheck - } - - if recordCID == "" { - return status.Error(codes.InvalidArgument, "record CID is required") //nolint:wrapcheck - } - - if referrer.GetType() == "" { - return status.Error(codes.InvalidArgument, "referrer type is required") //nolint:wrapcheck - } - - // Map API type to internal OCI artifact type - ociArtifactType := apiToOCIType(referrer.GetType()) - - // If the referrer is a public key, upload it to zot for signature verification - if ociArtifactType == PublicKeyArtifactMediaType { - err := s.uploadPublicKey(ctx, referrer) - if err != nil { - return status.Errorf(codes.Internal, "failed to upload public key: %v", err) - } - } - - // If the referrer is a signature, use cosign to attach the signature to the record instead of pushing it as a blob - if ociArtifactType == SignatureArtifactType { - err := s.pushSignature(ctx, recordCID, referrer) - if err != nil { - return status.Errorf(codes.Internal, "failed to push signature: %v", err) - } - - return nil - } - - // Marshal the referrer to JSON - referrerBytes, err := protojson.Marshal(referrer) - if err != nil { - return status.Errorf(codes.Internal, "failed to marshal referrer: %v", err) - } - - // Push the referrer blob using internal OCI artifact type - blobDesc, err := oras.PushBytes(ctx, s.repo, ociArtifactType, referrerBytes) - if err != nil { - return fmt.Errorf("failed to push referrer blob: %w", err) - } - - // Resolve the record manifest to get its descriptor for the subject field - recordManifestDesc, err := s.repo.Resolve(ctx, recordCID) - if err != nil { - return fmt.Errorf("failed to resolve record manifest for subject: %w", err) - } - - // Create annotations for the referrer manifest - annotations := make(map[string]string) - annotations["agntcy.dir.referrer.type"] = referrer.GetType() - - if referrer.GetCreatedAt() != "" { - annotations["agntcy.dir.referrer.created_at"] = referrer.GetCreatedAt() - } - // Add custom annotations from the referrer - for key, value := range referrer.GetAnnotations() { - annotations["agntcy.dir.referrer.annotation."+key] = value - } - - // Create the referrer manifest with proper OCI subject field - manifestDesc, err := oras.PackManifest(ctx, s.repo, oras.PackManifestVersion1_1, ocispec.MediaTypeImageManifest, - oras.PackManifestOptions{ - Subject: &recordManifestDesc, - ManifestAnnotations: annotations, - Layers: []ocispec.Descriptor{ - blobDesc, - }, - }, - ) - if err != nil { - return fmt.Errorf("failed to pack referrer manifest: %w", err) - } - - referrersLogger.Debug("Referrer pushed successfully", "digest", manifestDesc.Digest.String(), "type", referrer.GetType()) - - return nil -} - -// WalkReferrers walks through referrers for a given record CID, calling walkFn for each referrer. -// If referrerType is empty, all referrers are walked, otherwise only referrers of the specified type. -func (s *store) WalkReferrers(ctx context.Context, recordCID string, referrerType string, walkFn func(*corev1.RecordReferrer) error) error { - referrersLogger.Debug("Walking referrers from OCI store", "recordCID", recordCID, "type", referrerType) - - if recordCID == "" { - return status.Error(codes.InvalidArgument, "record CID is required") //nolint:wrapcheck - } - - if walkFn == nil { - return status.Error(codes.InvalidArgument, "walkFn is required") //nolint:wrapcheck - } - - // Get the record manifest descriptor - recordManifestDesc, err := s.repo.Resolve(ctx, recordCID) - if err != nil { - return status.Errorf(codes.NotFound, "failed to resolve record manifest for CID %s: %v", recordCID, err) - } - - // Determine the matcher based on referrerType - var matcher ReferrerMatcher - - if referrerType != "" { - // Map API type to internal OCI artifact type for matching - ociArtifactType := apiToOCIType(referrerType) - - matcher = s.MediaTypeReferrerMatcher(ociArtifactType) - } - - // Use the OCI referrers API to walk through referrers efficiently - referrersLister, ok := s.repo.(ReferrersLister) - if !ok { - return status.Errorf(codes.Unimplemented, "repository does not support OCI referrers API") - } - - var walkErr error - - err = referrersLister.Referrers(ctx, recordManifestDesc, "", func(referrers []ocispec.Descriptor) error { - for _, referrerDesc := range referrers { - // Apply matcher if specified - if matcher != nil && !matcher(ctx, referrerDesc) { - continue - } - - // Extract referrer data from manifest - referrer, err := s.extractReferrerFromManifest(ctx, referrerDesc, recordCID) - if err != nil { - referrersLogger.Error("Failed to extract referrer from manifest", "digest", referrerDesc.Digest.String(), "error", err) - - continue // Skip this referrer but continue with others - } - - // Call the walk function - if err := walkFn(referrer); err != nil { - walkErr = err - - return err // Stop walking on error - } - - referrersLogger.Debug("Referrer processed successfully", "digest", referrerDesc.Digest.String(), "type", referrer.GetType()) - } - - return nil // Continue with next batch - }) - - if walkErr != nil { - return walkErr - } - - if err != nil { - return status.Errorf(codes.Internal, "failed to walk referrers for manifest %s: %v", recordManifestDesc.Digest.String(), err) - } - - referrersLogger.Debug("Successfully walked referrers", "recordCID", recordCID, "type", referrerType) - - return nil -} - -// extractReferrerFromManifest extracts the referrer data from a referrer manifest. -func (s *store) extractReferrerFromManifest(ctx context.Context, manifestDesc ocispec.Descriptor, recordCID string) (*corev1.RecordReferrer, error) { - manifest, err := s.fetchAndParseManifestFromDescriptor(ctx, manifestDesc) - if err != nil { - return nil, err // Error already includes proper gRPC status - } - - if len(manifest.Layers) == 0 { - return nil, status.Errorf(codes.Internal, "referrer manifest has no layers") - } - - blobDesc := manifest.Layers[0] - - reader, err := s.repo.Fetch(ctx, blobDesc) - if err != nil { - return nil, status.Errorf(codes.NotFound, "referrer blob not found for CID %s: %v", recordCID, err) - } - defer reader.Close() - - referrerData, err := io.ReadAll(reader) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to read referrer data for CID %s: %v", recordCID, err) - } - - referrer := &corev1.RecordReferrer{} - - // If the referrer is not a signature, unmarshal the referrer from JSON - if blobDesc.MediaType != SignatureArtifactType { - // Unmarshal the referrer from JSON - if err := protojson.Unmarshal(referrerData, referrer); err != nil { - return nil, status.Errorf(codes.Internal, "failed to unmarshal referrer for CID %s: %v", recordCID, err) - } - } else { // If the referrer is a signature, convert the cosign signature to a referrer - referrer, err = s.convertCosignSignatureToReferrer(blobDesc, referrerData) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to convert cosign signature to referrer: %v", err) - } - } - - // Map internal OCI artifact type back to Dir API type - if referrer.GetType() != "" { - referrer.Type = ociToAPIType(referrer.GetType()) - } - - return referrer, nil -} - -// MediaTypeReferrerMatcher creates a ReferrerMatcher that checks for a specific media type. -func (s *store) MediaTypeReferrerMatcher(expectedMediaType string) ReferrerMatcher { - return func(ctx context.Context, referrer ocispec.Descriptor) bool { - manifest, err := s.fetchAndParseManifestFromDescriptor(ctx, referrer) - if err != nil { - referrersLogger.Debug("Failed to fetch and parse referrer manifest", "digest", referrer.Digest.String(), "error", err) - - return false - } - - // Check if this manifest contains a layer with the expected media type - return len(manifest.Layers) > 0 && manifest.Layers[0].MediaType == expectedMediaType - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package oci + +import ( + "context" + "fmt" + "io" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/utils/logging" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protojson" + "oras.land/oras-go/v2" +) + +var referrersLogger = logging.Logger("store/oci/referrers") + +// ReferrerMatcher defines a function type for matching OCI referrer descriptors. +// It returns true if the descriptor matches the expected referrer type. +type ReferrerMatcher func(ctx context.Context, referrer ocispec.Descriptor) bool + +// ReferrersLister interface for repositories that support the OCI Referrers API. +type ReferrersLister interface { + Referrers(ctx context.Context, desc ocispec.Descriptor, artifactType string, fn func(referrers []ocispec.Descriptor) error) error +} + +// PushReferrer pushes a generic RecordReferrer as an OCI artifact that references a record as its subject. +func (s *store) PushReferrer(ctx context.Context, recordCID string, referrer *corev1.RecordReferrer) error { + referrersLogger.Debug("Pushing generic referrer to OCI store", "recordCID", recordCID, "type", referrer.GetType()) + + if referrer == nil { + return status.Error(codes.InvalidArgument, "referrer is required") //nolint:wrapcheck + } + + if recordCID == "" { + return status.Error(codes.InvalidArgument, "record CID is required") //nolint:wrapcheck + } + + if referrer.GetType() == "" { + return status.Error(codes.InvalidArgument, "referrer type is required") //nolint:wrapcheck + } + + // Map API type to internal OCI artifact type + ociArtifactType := apiToOCIType(referrer.GetType()) + + // If the referrer is a public key, upload it to zot for signature verification + if ociArtifactType == PublicKeyArtifactMediaType { + err := s.uploadPublicKey(ctx, referrer) + if err != nil { + return status.Errorf(codes.Internal, "failed to upload public key: %v", err) + } + } + + // If the referrer is a signature, use cosign to attach the signature to the record instead of pushing it as a blob + if ociArtifactType == SignatureArtifactType { + err := s.pushSignature(ctx, recordCID, referrer) + if err != nil { + return status.Errorf(codes.Internal, "failed to push signature: %v", err) + } + + return nil + } + + // Marshal the referrer to JSON + referrerBytes, err := protojson.Marshal(referrer) + if err != nil { + return status.Errorf(codes.Internal, "failed to marshal referrer: %v", err) + } + + // Push the referrer blob using internal OCI artifact type + blobDesc, err := oras.PushBytes(ctx, s.repo, ociArtifactType, referrerBytes) + if err != nil { + return fmt.Errorf("failed to push referrer blob: %w", err) + } + + // Resolve the record manifest to get its descriptor for the subject field + recordManifestDesc, err := s.repo.Resolve(ctx, recordCID) + if err != nil { + return fmt.Errorf("failed to resolve record manifest for subject: %w", err) + } + + // Create annotations for the referrer manifest + annotations := make(map[string]string) + annotations["agntcy.dir.referrer.type"] = referrer.GetType() + + if referrer.GetCreatedAt() != "" { + annotations["agntcy.dir.referrer.created_at"] = referrer.GetCreatedAt() + } + // Add custom annotations from the referrer + for key, value := range referrer.GetAnnotations() { + annotations["agntcy.dir.referrer.annotation."+key] = value + } + + // Create the referrer manifest with proper OCI subject field + manifestDesc, err := oras.PackManifest(ctx, s.repo, oras.PackManifestVersion1_1, ocispec.MediaTypeImageManifest, + oras.PackManifestOptions{ + Subject: &recordManifestDesc, + ManifestAnnotations: annotations, + Layers: []ocispec.Descriptor{ + blobDesc, + }, + }, + ) + if err != nil { + return fmt.Errorf("failed to pack referrer manifest: %w", err) + } + + referrersLogger.Debug("Referrer pushed successfully", "digest", manifestDesc.Digest.String(), "type", referrer.GetType()) + + return nil +} + +// WalkReferrers walks through referrers for a given record CID, calling walkFn for each referrer. +// If referrerType is empty, all referrers are walked, otherwise only referrers of the specified type. +func (s *store) WalkReferrers(ctx context.Context, recordCID string, referrerType string, walkFn func(*corev1.RecordReferrer) error) error { + referrersLogger.Debug("Walking referrers from OCI store", "recordCID", recordCID, "type", referrerType) + + if recordCID == "" { + return status.Error(codes.InvalidArgument, "record CID is required") //nolint:wrapcheck + } + + if walkFn == nil { + return status.Error(codes.InvalidArgument, "walkFn is required") //nolint:wrapcheck + } + + // Get the record manifest descriptor + recordManifestDesc, err := s.repo.Resolve(ctx, recordCID) + if err != nil { + return status.Errorf(codes.NotFound, "failed to resolve record manifest for CID %s: %v", recordCID, err) + } + + // Determine the matcher based on referrerType + var matcher ReferrerMatcher + + if referrerType != "" { + // Map API type to internal OCI artifact type for matching + ociArtifactType := apiToOCIType(referrerType) + + matcher = s.MediaTypeReferrerMatcher(ociArtifactType) + } + + // Use the OCI referrers API to walk through referrers efficiently + referrersLister, ok := s.repo.(ReferrersLister) + if !ok { + return status.Errorf(codes.Unimplemented, "repository does not support OCI referrers API") + } + + var walkErr error + + err = referrersLister.Referrers(ctx, recordManifestDesc, "", func(referrers []ocispec.Descriptor) error { + for _, referrerDesc := range referrers { + // Apply matcher if specified + if matcher != nil && !matcher(ctx, referrerDesc) { + continue + } + + // Extract referrer data from manifest + referrer, err := s.extractReferrerFromManifest(ctx, referrerDesc, recordCID) + if err != nil { + referrersLogger.Error("Failed to extract referrer from manifest", "digest", referrerDesc.Digest.String(), "error", err) + + continue // Skip this referrer but continue with others + } + + // Call the walk function + if err := walkFn(referrer); err != nil { + walkErr = err + + return err // Stop walking on error + } + + referrersLogger.Debug("Referrer processed successfully", "digest", referrerDesc.Digest.String(), "type", referrer.GetType()) + } + + return nil // Continue with next batch + }) + + if walkErr != nil { + return walkErr + } + + if err != nil { + return status.Errorf(codes.Internal, "failed to walk referrers for manifest %s: %v", recordManifestDesc.Digest.String(), err) + } + + referrersLogger.Debug("Successfully walked referrers", "recordCID", recordCID, "type", referrerType) + + return nil +} + +// extractReferrerFromManifest extracts the referrer data from a referrer manifest. +func (s *store) extractReferrerFromManifest(ctx context.Context, manifestDesc ocispec.Descriptor, recordCID string) (*corev1.RecordReferrer, error) { + manifest, err := s.fetchAndParseManifestFromDescriptor(ctx, manifestDesc) + if err != nil { + return nil, err // Error already includes proper gRPC status + } + + if len(manifest.Layers) == 0 { + return nil, status.Errorf(codes.Internal, "referrer manifest has no layers") + } + + blobDesc := manifest.Layers[0] + + reader, err := s.repo.Fetch(ctx, blobDesc) + if err != nil { + return nil, status.Errorf(codes.NotFound, "referrer blob not found for CID %s: %v", recordCID, err) + } + defer reader.Close() + + referrerData, err := io.ReadAll(reader) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to read referrer data for CID %s: %v", recordCID, err) + } + + referrer := &corev1.RecordReferrer{} + + // If the referrer is not a signature, unmarshal the referrer from JSON + if blobDesc.MediaType != SignatureArtifactType { + // Unmarshal the referrer from JSON + if err := protojson.Unmarshal(referrerData, referrer); err != nil { + return nil, status.Errorf(codes.Internal, "failed to unmarshal referrer for CID %s: %v", recordCID, err) + } + } else { // If the referrer is a signature, convert the cosign signature to a referrer + referrer, err = s.convertCosignSignatureToReferrer(blobDesc, referrerData) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to convert cosign signature to referrer: %v", err) + } + } + + // Map internal OCI artifact type back to Dir API type + if referrer.GetType() != "" { + referrer.Type = ociToAPIType(referrer.GetType()) + } + + return referrer, nil +} + +// MediaTypeReferrerMatcher creates a ReferrerMatcher that checks for a specific media type. +func (s *store) MediaTypeReferrerMatcher(expectedMediaType string) ReferrerMatcher { + return func(ctx context.Context, referrer ocispec.Descriptor) bool { + manifest, err := s.fetchAndParseManifestFromDescriptor(ctx, referrer) + if err != nil { + referrersLogger.Debug("Failed to fetch and parse referrer manifest", "digest", referrer.Digest.String(), "error", err) + + return false + } + + // Check if this manifest contains a layer with the expected media type + return len(manifest.Layers) > 0 && manifest.Layers[0].MediaType == expectedMediaType + } +} diff --git a/server/store/oci/signatures.go b/server/store/oci/signatures.go index ea7682c62..9d881a93c 100644 --- a/server/store/oci/signatures.go +++ b/server/store/oci/signatures.go @@ -1,172 +1,172 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package oci - -import ( - "context" - "fmt" - "strings" - - corev1 "github.com/agntcy/dir/api/core/v1" - signv1 "github.com/agntcy/dir/api/sign/v1" - "github.com/agntcy/dir/utils/cosign" - "github.com/agntcy/dir/utils/zot" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// pushSignature stores OCI signature artifacts for a record using cosign attach signature and uploads public key to zot for verification. -func (s *store) pushSignature(ctx context.Context, recordCID string, referrer *corev1.RecordReferrer) error { - referrersLogger.Debug("Pushing signature artifact to OCI store", "recordCID", recordCID) - - // Decode the signature from the referrer - signature := &signv1.Signature{} - if err := signature.UnmarshalReferrer(referrer); err != nil { - return status.Errorf(codes.Internal, "failed to decode signature from referrer: %v", err) - } - - if recordCID == "" { - return status.Error(codes.InvalidArgument, "record CID is required") //nolint:wrapcheck - } - - // Use cosign attach signature to attach the signature to the record - if err := s.attachSignatureWithCosign(ctx, recordCID, signature); err != nil { - return status.Errorf(codes.Internal, "failed to attach signature with cosign: %v", err) - } - - referrersLogger.Debug("Signature attached successfully using cosign", "recordCID", recordCID) - - return nil -} - -// uploadPublicKey uploads a public key to zot for signature verification. -func (s *store) uploadPublicKey(ctx context.Context, referrer *corev1.RecordReferrer) error { - referrersLogger.Debug("Uploading public key to zot for signature verification") - - // Decode the public key from the referrer - pk := &signv1.PublicKey{} - if err := pk.UnmarshalReferrer(referrer); err != nil { - return status.Errorf(codes.Internal, "failed to get public key from referrer: %v", err) - } - - publicKey := pk.GetKey() - if publicKey == "" { - return status.Error(codes.InvalidArgument, "public key is required") //nolint:wrapcheck - } - - // Upload the public key to zot for signature verification - // This enables zot to mark this signature as "trusted" in verification queries - uploadOpts := &zot.UploadPublicKeyOptions{ - Config: s.buildZotConfig(), - PublicKey: publicKey, - } - - if err := zot.UploadPublicKey(ctx, uploadOpts); err != nil { - return status.Errorf(codes.Internal, "failed to upload public key to zot for verification: %v", err) - } - - referrersLogger.Debug("Successfully uploaded public key to zot for verification") - - return nil -} - -// attachSignatureWithCosign uses cosign attach signature to attach a signature to a record in the OCI registry. -func (s *store) attachSignatureWithCosign(ctx context.Context, recordCID string, signature *signv1.Signature) error { - referrersLogger.Debug("Attaching signature using cosign attach signature", "recordCID", recordCID) - - // Construct the OCI image reference for the record - imageRef := s.constructImageReference(recordCID) - - // Prepare options for attaching signature - attachOpts := &cosign.AttachSignatureOptions{ - ImageRef: imageRef, - Signature: signature.GetSignature(), - Payload: signature.GetAnnotations()["payload"], - Username: s.config.Username, - Password: s.config.Password, - } - - // Attach signature using utility function - err := cosign.AttachSignature(ctx, attachOpts) - if err != nil { - return fmt.Errorf("failed to attach signature: %w", err) - } - - referrersLogger.Debug("Cosign attach signature completed successfully") - - return nil -} - -// constructImageReference builds the OCI image reference for a record CID. -func (s *store) constructImageReference(recordCID string) string { - // Get the registry and repository from the config - registry := s.config.RegistryAddress - repository := s.config.RepositoryName - - // Remove any protocol prefix from registry address for the image reference - registry = strings.TrimPrefix(registry, "http://") - registry = strings.TrimPrefix(registry, "https://") - - // Use CID as tag to match the oras.Tag operation in Push method - return fmt.Sprintf("%s/%s:%s", registry, repository, recordCID) -} - -// buildZotConfig creates a ZotConfig from the store configuration. -func (s *store) buildZotConfig() *zot.VerifyConfig { - return &zot.VerifyConfig{ - RegistryAddress: s.config.RegistryAddress, - RepositoryName: s.config.RepositoryName, - Username: s.config.Username, - Password: s.config.Password, - AccessToken: s.config.AccessToken, - Insecure: s.config.Insecure, - } -} - -// convertCosignSignatureToReferrer converts cosign signature data to a referrer. -func (s *store) convertCosignSignatureToReferrer(blobDesc ocispec.Descriptor, data []byte) (*corev1.RecordReferrer, error) { - // Extract the signature from the layer annotations - var signatureValue string - - if blobDesc.Annotations != nil { - if sig, exists := blobDesc.Annotations["dev.cosignproject.cosign/signature"]; exists { - signatureValue = sig - } - } - - if signatureValue == "" { - return nil, status.Errorf(codes.Internal, "no signature value found in annotations") - } - - signature := &signv1.Signature{ - Signature: signatureValue, - Annotations: map[string]string{ - "payload": string(data), - }, - } - - referrer, err := signature.MarshalReferrer() - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to encode signature to referrer: %v", err) - } - - return referrer, nil -} - -// VerifyWithZot queries zot's verification API to check if a signature is valid. -func (s *store) VerifyWithZot(ctx context.Context, recordCID string) (bool, error) { - verifyOpts := &zot.VerificationOptions{ - Config: s.buildZotConfig(), - RecordCID: recordCID, - } - - result, err := zot.Verify(ctx, verifyOpts) - if err != nil { - return false, fmt.Errorf("failed to verify with zot: %w", err) - } - - // Return the trusted status (which implies signed as well) - return result.IsTrusted, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package oci + +import ( + "context" + "fmt" + "strings" + + corev1 "github.com/agntcy/dir/api/core/v1" + signv1 "github.com/agntcy/dir/api/sign/v1" + "github.com/agntcy/dir/utils/cosign" + "github.com/agntcy/dir/utils/zot" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// pushSignature stores OCI signature artifacts for a record using cosign attach signature and uploads public key to zot for verification. +func (s *store) pushSignature(ctx context.Context, recordCID string, referrer *corev1.RecordReferrer) error { + referrersLogger.Debug("Pushing signature artifact to OCI store", "recordCID", recordCID) + + // Decode the signature from the referrer + signature := &signv1.Signature{} + if err := signature.UnmarshalReferrer(referrer); err != nil { + return status.Errorf(codes.Internal, "failed to decode signature from referrer: %v", err) + } + + if recordCID == "" { + return status.Error(codes.InvalidArgument, "record CID is required") //nolint:wrapcheck + } + + // Use cosign attach signature to attach the signature to the record + if err := s.attachSignatureWithCosign(ctx, recordCID, signature); err != nil { + return status.Errorf(codes.Internal, "failed to attach signature with cosign: %v", err) + } + + referrersLogger.Debug("Signature attached successfully using cosign", "recordCID", recordCID) + + return nil +} + +// uploadPublicKey uploads a public key to zot for signature verification. +func (s *store) uploadPublicKey(ctx context.Context, referrer *corev1.RecordReferrer) error { + referrersLogger.Debug("Uploading public key to zot for signature verification") + + // Decode the public key from the referrer + pk := &signv1.PublicKey{} + if err := pk.UnmarshalReferrer(referrer); err != nil { + return status.Errorf(codes.Internal, "failed to get public key from referrer: %v", err) + } + + publicKey := pk.GetKey() + if publicKey == "" { + return status.Error(codes.InvalidArgument, "public key is required") //nolint:wrapcheck + } + + // Upload the public key to zot for signature verification + // This enables zot to mark this signature as "trusted" in verification queries + uploadOpts := &zot.UploadPublicKeyOptions{ + Config: s.buildZotConfig(), + PublicKey: publicKey, + } + + if err := zot.UploadPublicKey(ctx, uploadOpts); err != nil { + return status.Errorf(codes.Internal, "failed to upload public key to zot for verification: %v", err) + } + + referrersLogger.Debug("Successfully uploaded public key to zot for verification") + + return nil +} + +// attachSignatureWithCosign uses cosign attach signature to attach a signature to a record in the OCI registry. +func (s *store) attachSignatureWithCosign(ctx context.Context, recordCID string, signature *signv1.Signature) error { + referrersLogger.Debug("Attaching signature using cosign attach signature", "recordCID", recordCID) + + // Construct the OCI image reference for the record + imageRef := s.constructImageReference(recordCID) + + // Prepare options for attaching signature + attachOpts := &cosign.AttachSignatureOptions{ + ImageRef: imageRef, + Signature: signature.GetSignature(), + Payload: signature.GetAnnotations()["payload"], + Username: s.config.Username, + Password: s.config.Password, + } + + // Attach signature using utility function + err := cosign.AttachSignature(ctx, attachOpts) + if err != nil { + return fmt.Errorf("failed to attach signature: %w", err) + } + + referrersLogger.Debug("Cosign attach signature completed successfully") + + return nil +} + +// constructImageReference builds the OCI image reference for a record CID. +func (s *store) constructImageReference(recordCID string) string { + // Get the registry and repository from the config + registry := s.config.RegistryAddress + repository := s.config.RepositoryName + + // Remove any protocol prefix from registry address for the image reference + registry = strings.TrimPrefix(registry, "http://") + registry = strings.TrimPrefix(registry, "https://") + + // Use CID as tag to match the oras.Tag operation in Push method + return fmt.Sprintf("%s/%s:%s", registry, repository, recordCID) +} + +// buildZotConfig creates a ZotConfig from the store configuration. +func (s *store) buildZotConfig() *zot.VerifyConfig { + return &zot.VerifyConfig{ + RegistryAddress: s.config.RegistryAddress, + RepositoryName: s.config.RepositoryName, + Username: s.config.Username, + Password: s.config.Password, + AccessToken: s.config.AccessToken, + Insecure: s.config.Insecure, + } +} + +// convertCosignSignatureToReferrer converts cosign signature data to a referrer. +func (s *store) convertCosignSignatureToReferrer(blobDesc ocispec.Descriptor, data []byte) (*corev1.RecordReferrer, error) { + // Extract the signature from the layer annotations + var signatureValue string + + if blobDesc.Annotations != nil { + if sig, exists := blobDesc.Annotations["dev.cosignproject.cosign/signature"]; exists { + signatureValue = sig + } + } + + if signatureValue == "" { + return nil, status.Errorf(codes.Internal, "no signature value found in annotations") + } + + signature := &signv1.Signature{ + Signature: signatureValue, + Annotations: map[string]string{ + "payload": string(data), + }, + } + + referrer, err := signature.MarshalReferrer() + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to encode signature to referrer: %v", err) + } + + return referrer, nil +} + +// VerifyWithZot queries zot's verification API to check if a signature is valid. +func (s *store) VerifyWithZot(ctx context.Context, recordCID string) (bool, error) { + verifyOpts := &zot.VerificationOptions{ + Config: s.buildZotConfig(), + RecordCID: recordCID, + } + + result, err := zot.Verify(ctx, verifyOpts) + if err != nil { + return false, fmt.Errorf("failed to verify with zot: %w", err) + } + + // Return the trusted status (which implies signed as well) + return result.IsTrusted, nil +} diff --git a/server/store/oci/types.go b/server/store/oci/types.go index b54e40bda..8ab2766d9 100644 --- a/server/store/oci/types.go +++ b/server/store/oci/types.go @@ -1,45 +1,45 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package oci - -import ( - corev1 "github.com/agntcy/dir/api/core/v1" -) - -// Internal OCI artifact media types used for storage implementation. -// These are mapped from proto full names at the server boundary. -const ( - // PublicKeyArtifactMediaType defines the internal OCI media type for public key blobs. - PublicKeyArtifactMediaType = "application/vnd.agntcy.dir.publickey.v1+pem" - - // SignatureArtifactType defines the internal OCI media type for signature layers. - SignatureArtifactType = "application/vnd.dev.cosign.simplesigning.v1+json" - - // DefaultReferrerArtifactMediaType defines the default internal OCI media type for referrer blobs. - DefaultReferrerArtifactMediaType = "application/vnd.agntcy.dir.referrer.v1+json" -) - -// apiToOCIType maps Dir API types to internal OCI artifact types. -func apiToOCIType(apiType string) string { - switch apiType { - case corev1.SignatureReferrerType: - return SignatureArtifactType - case corev1.PublicKeyReferrerType: - return PublicKeyArtifactMediaType - default: - return DefaultReferrerArtifactMediaType - } -} - -// ociToAPIType maps internal OCI artifact types back to Dir API types. -func ociToAPIType(ociType string) string { - switch ociType { - case SignatureArtifactType: - return corev1.SignatureReferrerType - case PublicKeyArtifactMediaType: - return corev1.PublicKeyReferrerType - default: - return ociType // Return the original OCI type if not found - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package oci + +import ( + corev1 "github.com/agntcy/dir/api/core/v1" +) + +// Internal OCI artifact media types used for storage implementation. +// These are mapped from proto full names at the server boundary. +const ( + // PublicKeyArtifactMediaType defines the internal OCI media type for public key blobs. + PublicKeyArtifactMediaType = "application/vnd.agntcy.dir.publickey.v1+pem" + + // SignatureArtifactType defines the internal OCI media type for signature layers. + SignatureArtifactType = "application/vnd.dev.cosign.simplesigning.v1+json" + + // DefaultReferrerArtifactMediaType defines the default internal OCI media type for referrer blobs. + DefaultReferrerArtifactMediaType = "application/vnd.agntcy.dir.referrer.v1+json" +) + +// apiToOCIType maps Dir API types to internal OCI artifact types. +func apiToOCIType(apiType string) string { + switch apiType { + case corev1.SignatureReferrerType: + return SignatureArtifactType + case corev1.PublicKeyReferrerType: + return PublicKeyArtifactMediaType + default: + return DefaultReferrerArtifactMediaType + } +} + +// ociToAPIType maps internal OCI artifact types back to Dir API types. +func ociToAPIType(ociType string) string { + switch ociType { + case SignatureArtifactType: + return corev1.SignatureReferrerType + case PublicKeyArtifactMediaType: + return corev1.PublicKeyReferrerType + default: + return ociType // Return the original OCI type if not found + } +} diff --git a/server/store/oci/utils.go b/server/store/oci/utils.go index d4aa83bfd..453d0e661 100644 --- a/server/store/oci/utils.go +++ b/server/store/oci/utils.go @@ -1,47 +1,47 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package oci - -import ( - "fmt" - "net/http" - - ociconfig "github.com/agntcy/dir/server/store/oci/config" - "oras.land/oras-go/v2/registry/remote" - "oras.land/oras-go/v2/registry/remote/auth" - "oras.land/oras-go/v2/registry/remote/retry" -) - -func stringPtr(s string) *string { - return &s -} - -// NewORASRepository creates a new ORAS repository client configured with authentication. -func NewORASRepository(cfg ociconfig.Config) (*remote.Repository, error) { - repo, err := remote.NewRepository(fmt.Sprintf("%s/%s", cfg.RegistryAddress, cfg.RepositoryName)) - if err != nil { - return nil, fmt.Errorf("failed to connect to remote repo: %w", err) - } - - // Configure repository - repo.PlainHTTP = cfg.Insecure - repo.Client = &auth.Client{ - Client: retry.DefaultClient, - Header: http.Header{ - "User-Agent": {"dir-client"}, - }, - Cache: auth.DefaultCache, - Credential: auth.StaticCredential( - cfg.RegistryAddress, - auth.Credential{ - Username: cfg.Username, - Password: cfg.Password, - RefreshToken: cfg.RefreshToken, - AccessToken: cfg.AccessToken, - }, - ), - } - - return repo, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package oci + +import ( + "fmt" + "net/http" + + ociconfig "github.com/agntcy/dir/server/store/oci/config" + "oras.land/oras-go/v2/registry/remote" + "oras.land/oras-go/v2/registry/remote/auth" + "oras.land/oras-go/v2/registry/remote/retry" +) + +func stringPtr(s string) *string { + return &s +} + +// NewORASRepository creates a new ORAS repository client configured with authentication. +func NewORASRepository(cfg ociconfig.Config) (*remote.Repository, error) { + repo, err := remote.NewRepository(fmt.Sprintf("%s/%s", cfg.RegistryAddress, cfg.RepositoryName)) + if err != nil { + return nil, fmt.Errorf("failed to connect to remote repo: %w", err) + } + + // Configure repository + repo.PlainHTTP = cfg.Insecure + repo.Client = &auth.Client{ + Client: retry.DefaultClient, + Header: http.Header{ + "User-Agent": {"dir-client"}, + }, + Cache: auth.DefaultCache, + Credential: auth.StaticCredential( + cfg.RegistryAddress, + auth.Credential{ + Username: cfg.Username, + Password: cfg.Password, + RefreshToken: cfg.RefreshToken, + AccessToken: cfg.AccessToken, + }, + ), + } + + return repo, nil +} diff --git a/server/store/store.go b/server/store/store.go index 75e1de8f5..7757a74df 100644 --- a/server/store/store.go +++ b/server/store/store.go @@ -1,37 +1,37 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package store - -import ( - "fmt" - - "github.com/agntcy/dir/server/store/eventswrap" - "github.com/agntcy/dir/server/store/oci" - "github.com/agntcy/dir/server/types" -) - -type Provider string - -const ( - OCI = Provider("oci") -) - -// TODO: add options for adding cache. -func New(opts types.APIOptions) (types.StoreAPI, error) { - switch provider := Provider(opts.Config().Store.Provider); provider { - case OCI: - store, err := oci.New(opts.Config().Store.OCI) - if err != nil { - return nil, fmt.Errorf("failed to create OCI store: %w", err) - } - - // Wrap with event emitter - store = eventswrap.Wrap(store, opts.EventBus()) - - return store, nil - - default: - return nil, fmt.Errorf("unsupported provider=%s", provider) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package store + +import ( + "fmt" + + "github.com/agntcy/dir/server/store/eventswrap" + "github.com/agntcy/dir/server/store/oci" + "github.com/agntcy/dir/server/types" +) + +type Provider string + +const ( + OCI = Provider("oci") +) + +// TODO: add options for adding cache. +func New(opts types.APIOptions) (types.StoreAPI, error) { + switch provider := Provider(opts.Config().Store.Provider); provider { + case OCI: + store, err := oci.New(opts.Config().Store.OCI) + if err != nil { + return nil, fmt.Errorf("failed to create OCI store: %w", err) + } + + // Wrap with event emitter + store = eventswrap.Wrap(store, opts.EventBus()) + + return store, nil + + default: + return nil, fmt.Errorf("unsupported provider=%s", provider) + } +} diff --git a/server/sync/config/config.go b/server/sync/config/config.go index 518e8d1a9..c024b58e5 100644 --- a/server/sync/config/config.go +++ b/server/sync/config/config.go @@ -1,41 +1,41 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "time" - - monitor "github.com/agntcy/dir/server/sync/monitor/config" -) - -const ( - DefaultSyncSchedulerInterval = 30 * time.Second - DefaultSyncWorkerCount = 1 - DefaultSyncWorkerTimeout = 10 * time.Minute -) - -type Config struct { - // Scheduler interval. - // The interval at which the scheduler will check for pending syncs. - SchedulerInterval time.Duration `json:"scheduler_interval,omitempty" mapstructure:"scheduler_interval"` - - // Worker count. - // The maximum number of workers that can be running concurrently. - WorkerCount int `json:"worker_count,omitempty" mapstructure:"worker_count"` - - // Worker timeout. - WorkerTimeout time.Duration `json:"worker_timeout,omitempty" mapstructure:"worker_timeout"` - - // Registry monitor configuration - RegistryMonitor monitor.Config `json:"registry_monitor,omitempty" mapstructure:"registry_monitor"` - - // Authentication configuration - AuthConfig `json:"auth_config,omitempty" mapstructure:"auth_config"` -} - -// AuthConfig represents the configuration for authentication. -type AuthConfig struct { - Username string `json:"username,omitempty" mapstructure:"username"` - Password string `json:"password,omitempty" mapstructure:"password"` -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "time" + + monitor "github.com/agntcy/dir/server/sync/monitor/config" +) + +const ( + DefaultSyncSchedulerInterval = 30 * time.Second + DefaultSyncWorkerCount = 1 + DefaultSyncWorkerTimeout = 10 * time.Minute +) + +type Config struct { + // Scheduler interval. + // The interval at which the scheduler will check for pending syncs. + SchedulerInterval time.Duration `json:"scheduler_interval,omitempty" mapstructure:"scheduler_interval"` + + // Worker count. + // The maximum number of workers that can be running concurrently. + WorkerCount int `json:"worker_count,omitempty" mapstructure:"worker_count"` + + // Worker timeout. + WorkerTimeout time.Duration `json:"worker_timeout,omitempty" mapstructure:"worker_timeout"` + + // Registry monitor configuration + RegistryMonitor monitor.Config `json:"registry_monitor,omitempty" mapstructure:"registry_monitor"` + + // Authentication configuration + AuthConfig `json:"auth_config,omitempty" mapstructure:"auth_config"` +} + +// AuthConfig represents the configuration for authentication. +type AuthConfig struct { + Username string `json:"username,omitempty" mapstructure:"username"` + Password string `json:"password,omitempty" mapstructure:"password"` +} diff --git a/server/sync/monitor/config/config.go b/server/sync/monitor/config/config.go index 78cebf599..a08d09feb 100644 --- a/server/sync/monitor/config/config.go +++ b/server/sync/monitor/config/config.go @@ -1,16 +1,16 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package config - -import "time" - -const ( - DefaultCheckInterval = 60 * time.Second -) - -type Config struct { - // Check interval. - // The interval at which the monitor will check for changes. - CheckInterval time.Duration `json:"check_interval,omitempty" mapstructure:"check_interval"` -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package config + +import "time" + +const ( + DefaultCheckInterval = 60 * time.Second +) + +type Config struct { + // Check interval. + // The interval at which the monitor will check for changes. + CheckInterval time.Duration `json:"check_interval,omitempty" mapstructure:"check_interval"` +} diff --git a/server/sync/monitor/monitor.go b/server/sync/monitor/monitor.go index 3cca9c1ab..b5b8d6e93 100644 --- a/server/sync/monitor/monitor.go +++ b/server/sync/monitor/monitor.go @@ -1,482 +1,482 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package monitor - -import ( - "context" - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - "strings" - "sync" - "time" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/server/store/oci" - ociconfig "github.com/agntcy/dir/server/store/oci/config" - "github.com/agntcy/dir/server/sync/monitor/config" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/server/types/adapters" - "github.com/agntcy/dir/utils/logging" - "github.com/agntcy/dir/utils/zot" - "oras.land/oras-go/v2/registry/remote" -) - -var logger = logging.Logger("sync/monitor") - -// MonitorService manages registry monitoring based on active sync operations. -// -//nolint:revive -type MonitorService struct { - // Configuration - db types.DatabaseAPI - store types.StoreAPI - ociConfig ociconfig.Config - checkInterval time.Duration - - // Monitoring state - mu sync.RWMutex - isRunning bool - lastSnapshot *RegistrySnapshot - ticker *time.Ticker - cancelMonitor context.CancelFunc - - // Sync management - activeSyncs map[string]struct{} // Track active sync operations - - // ORAS repository client - repo *remote.Repository -} - -// NewMonitorService creates a new monitor service. -func NewMonitorService(db types.DatabaseAPI, store types.StoreAPI, ociConfig ociconfig.Config, monitorConfig config.Config) (*MonitorService, error) { - // Create ORAS repository client - repo, err := oci.NewORASRepository(ociConfig) - if err != nil { - return nil, fmt.Errorf("failed to create ORAS repository client: %w", err) - } - - return &MonitorService{ - db: db, - store: store, - ociConfig: ociConfig, - checkInterval: monitorConfig.CheckInterval, - activeSyncs: make(map[string]struct{}), - repo: repo, - }, nil -} - -// Stop gracefully shuts down the monitor service. -func (s *MonitorService) Stop() error { - s.mu.Lock() - defer s.mu.Unlock() - - logger.Info("Stopping monitor service") - - // Stop monitoring if active - if s.isRunning { - logger.Info("Stopping registry monitoring") - - // Cancel monitoring - if s.cancelMonitor != nil { - s.cancelMonitor() - } - - s.ticker.Stop() - - // Update state - s.isRunning = false - } - - // Clear active syncs - s.activeSyncs = make(map[string]struct{}) - - logger.Info("Monitor service stopped") - - return nil -} - -// StartSyncMonitoring begins monitoring when a sync operation starts. -func (s *MonitorService) StartSyncMonitoring(syncID string) error { - s.mu.Lock() - defer s.mu.Unlock() - - // Add sync to active list - s.activeSyncs[syncID] = struct{}{} - - // Start monitoring if this is the first active sync - if len(s.activeSyncs) == 1 && !s.isRunning { - s.startMonitoring(context.Background()) - - logger.Info("Started registry monitoring", "active_syncs", len(s.activeSyncs)) - } - - logger.Debug("Sync added to monitoring", "sync_id", syncID, "active_syncs", len(s.activeSyncs)) - - return nil -} - -// StopSyncMonitoring stops monitoring when a sync operation ends. -// It performs a final indexing scan before stopping to ensure no records are missed. -func (s *MonitorService) StopSyncMonitoring(syncID string) error { - s.mu.Lock() - defer s.mu.Unlock() - - // Remove sync from active list - delete(s.activeSyncs, syncID) - - // Stop monitoring if no more active syncs - if len(s.activeSyncs) == 0 && s.isRunning { - // Cancel monitoring - if s.cancelMonitor != nil { - s.cancelMonitor() - } - - // Update state - s.isRunning = false - - // Run graceful shutdown in background to not block deletion - go s.gracefulShutdown() - - s.ticker.Stop() - - logger.Info("Stopped registry monitoring") - } - - logger.Debug("Sync removed from monitoring", "sync_id", syncID, "active_syncs", len(s.activeSyncs)) - - return nil -} - -// gracefulShutdown performs final monitoring checks in the background -// to ensure all synced records are indexed before stopping monitoring. -func (s *MonitorService) gracefulShutdown() { - logger.Debug("Starting graceful shutdown with final monitoring checks") - - // Perform final indexing scan - ctx, cancel := context.WithTimeout(context.Background(), config.DefaultCheckInterval*2) //nolint:mnd - defer cancel() - - // Create a separate ticker for graceful shutdown since main ticker is stopped - shutdownTicker := time.NewTicker(config.DefaultCheckInterval) //nolint:mnd - defer shutdownTicker.Stop() - - // Perform monitoring checks until timeout is reached - for { - select { - case <-ctx.Done(): - logger.Debug("Graceful shutdown timeout reached") - - return - case <-shutdownTicker.C: - s.performMonitoringCheck(ctx) - } - } -} - -// startMonitoring begins registry monitoring. -func (s *MonitorService) startMonitoring(ctx context.Context) { - if s.isRunning { - logger.Debug("Registry monitoring already running") - - return - } - - // Initialize monitoring state - s.isRunning = true - s.ticker = time.NewTicker(s.checkInterval) - - // Create cancelable context for monitoring - monitorCtx, cancel := context.WithCancel(ctx) - s.cancelMonitor = cancel - - // Start monitoring goroutine - go s.runRegistryMonitoring(monitorCtx) -} - -// runRegistryMonitoring runs the registry monitoring loop. -func (s *MonitorService) runRegistryMonitoring(ctx context.Context) { - logger.Info("Registry monitoring started") - - // Create initial snapshot - snapshot, err := s.createRegistrySnapshot(ctx) - if err != nil { - logger.Error("Failed to create initial registry snapshot", "error", err) - - return - } - - s.lastSnapshot = snapshot - - for { - select { - case <-ctx.Done(): - logger.Info("Registry monitoring stopping") - - return - case <-s.ticker.C: - s.performMonitoringCheck(ctx) - } - } -} - -// performMonitoringCheck performs a single monitoring check. -func (s *MonitorService) performMonitoringCheck(ctx context.Context) { - logger.Debug("Performing registry monitoring check") - - s.mu.Lock() - defer s.mu.Unlock() - - // Get current registry snapshot - snapshot, err := s.createRegistrySnapshot(ctx) - if err != nil { - logger.Error("Failed to create registry snapshot", "error", err) - - return - } - - // Compare with last snapshot to detect changes - changes := s.detectChanges(s.lastSnapshot, snapshot) - if changes.HasChanges { - logger.Info("Registry changes detected", "new_tags", len(changes.NewTags)) - s.processChanges(ctx, changes) - } else { - logger.Debug("No registry changes detected") - } - - // Update last snapshot - s.lastSnapshot = snapshot -} - -// createRegistrySnapshot creates a snapshot of the current registry state. -func (s *MonitorService) createRegistrySnapshot(ctx context.Context) (*RegistrySnapshot, error) { - // List all tags in the repository - // No sorting needed, OCI spec requires tags in lexical order - var tags []string - - err := s.repo.Tags(ctx, "", func(tagDescriptors []string) error { - // Filter tags to only include valid CIDs - for _, tag := range tagDescriptors { - if corev1.IsValidCID(tag) { - tags = append(tags, tag) - } else { - logger.Debug("Skipping non-CID tag", "tag", tag) - } - } - - return nil - }) - if err != nil { - // Check if this is a "repository not found" error (404) - if isRepositoryNotFoundError(err) { - logger.Debug("Repository not found yet, returning empty snapshot", "error", err) - - // Return empty snapshot - this is normal when sync hasn't pulled content yet - return EmptySnapshot, nil - } - - return nil, fmt.Errorf("failed to list repository tags: %w", err) - } - - // Create content hash for quick comparison - contentHash := s.createContentHash(tags) - - return &RegistrySnapshot{ - Timestamp: time.Now(), - Tags: tags, - ContentHash: contentHash, - LastModified: time.Now(), - }, nil -} - -// createContentHash creates a hash of the tags for quick comparison. -func (s *MonitorService) createContentHash(tags []string) string { - // Create a deterministic hash by writing each tag individually - hasher := sha256.New() - - for _, tag := range tags { - hasher.Write([]byte(tag)) - hasher.Write([]byte("|")) - } - - return hex.EncodeToString(hasher.Sum(nil)) -} - -// detectChanges compares two registry snapshots and detects changes. -func (s *MonitorService) detectChanges(oldSnapshot, newSnapshot *RegistrySnapshot) *RegistryChanges { - // If content hashes match, no changes - if oldSnapshot.ContentHash == newSnapshot.ContentHash { - return &RegistryChanges{ - HasChanges: false, - DetectedAt: time.Now(), - } - } - - // Create sets for efficient comparison - oldTags := make(map[string]struct{}) - for _, tag := range oldSnapshot.Tags { - oldTags[tag] = struct{}{} - } - - // Find new tags - var addedTags []string - - for _, tag := range newSnapshot.Tags { - if _, ok := oldTags[tag]; !ok { - addedTags = append(addedTags, tag) - } - } - - changes := &RegistryChanges{ - NewTags: addedTags, - HasChanges: len(addedTags) > 0, - DetectedAt: time.Now(), - } - - return changes -} - -// processChanges processes detected registry changes by indexing new records. -func (s *MonitorService) processChanges(ctx context.Context, changes *RegistryChanges) { - for _, tag := range changes.NewTags { - // Index record - if err := s.indexRecord(ctx, tag); err != nil { - // Warn but continue processing other records even if one fails - logger.Error("Failed to index record", "tag", tag, "error", err) - } else { - logger.Debug("Successfully indexed record", "tag", tag) - } - - // Upload public key to OCI store - if err := s.uploadPublicKey(ctx, tag); err != nil { - logger.Error("Failed to upload public key", "tag", tag, "error", err) - } - } -} - -// indexRecord indexes a single record from the registry into the database. -func (s *MonitorService) indexRecord(ctx context.Context, tag string) error { - logger.Debug("Indexing record", "tag", tag) - - // Pull record from local store - recordRef := &corev1.RecordRef{Cid: tag} - - record, err := s.store.Pull(ctx, recordRef) - if err != nil { - return fmt.Errorf("failed to pull record from local store: %w", err) - } - - isValid, validationErrors, err := record.Validate(ctx) - if err != nil { - return fmt.Errorf("failed to validate record: %w", err) - } - - if !isValid { - return fmt.Errorf("record validation failed: %v", validationErrors) - } - - // Add to database - recordAdapter := adapters.NewRecordAdapter(record) - if err := s.db.AddRecord(recordAdapter); err != nil { - // Check if this is a duplicate record error - if so, it's not really an error - if s.isDuplicateRecordError(err) { - logger.Debug("Record already indexed, skipping", "cid", tag) - - return nil - } - - return fmt.Errorf("failed to add record to database: %w", err) - } - - logger.Info("Successfully indexed local record", "cid", tag) - - return nil -} - -// uploadPublicKey uploads a public key to the OCI store. -func (s *MonitorService) uploadPublicKey(ctx context.Context, tag string) error { - logger.Debug("Uploading public key", "tag", tag) - - // Try to use signature storage if the store supports it - referrerStore, ok := s.store.(types.ReferrerStoreAPI) - if !ok { - logger.Error("Store does not support public key upload, skipping", "tag", tag) - - return nil - } - - // Walk public key referrers from referrer store - walkFn := func(referrer *corev1.RecordReferrer) error { - publicKeyValue, ok := referrer.GetData().AsMap()["publicKey"] - if !ok { - return errors.New("publicKey field not found in referrer data") - } - - publicKey, ok := publicKeyValue.(string) - if !ok { - return errors.New("publicKey field is not a string") - } - - // Upload the public key to zot for signature verification - // This enables zot to mark this signature as "trusted" in verification queries - uploadOpts := &zot.UploadPublicKeyOptions{ - Config: &zot.VerifyConfig{ - RegistryAddress: s.ociConfig.RegistryAddress, - RepositoryName: s.ociConfig.RepositoryName, - Username: s.ociConfig.Username, - Password: s.ociConfig.Password, - AccessToken: s.ociConfig.AccessToken, - Insecure: s.ociConfig.Insecure, - }, - PublicKey: publicKey, - } - - err := zot.UploadPublicKey(ctx, uploadOpts) - if err != nil { - return fmt.Errorf("failed to upload public key to zot for verification: %w", err) - } - - return nil // Continue walking - } - - // Walk public key referrers - err := referrerStore.WalkReferrers(ctx, tag, corev1.PublicKeyReferrerType, walkFn) - if err != nil { - return fmt.Errorf("failed to walk public key referrers: %w", err) - } - - logger.Debug("Successfully uploaded public keys to zot for verification", "tag", tag) - - return nil -} - -// isDuplicateRecordError checks if the error indicates a duplicate record. -func (s *MonitorService) isDuplicateRecordError(err error) bool { - if err == nil { - return false - } - - errStr := strings.ToLower(err.Error()) - - return strings.Contains(errStr, "duplicate") || - strings.Contains(errStr, "already exists") || - strings.Contains(errStr, "unique constraint") || - strings.Contains(errStr, "primary key") -} - -// isRepositoryNotFoundError checks if the error is a "repository not found" (404) error. -func isRepositoryNotFoundError(err error) bool { - if err == nil { - return false - } - - // Check if it's an HTTP error with status code 404 - errStr := err.Error() - - return strings.Contains(errStr, "404") && - (strings.Contains(errStr, "name unknown") || - strings.Contains(errStr, "repository name not known") || - strings.Contains(errStr, "not found")) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package monitor + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "strings" + "sync" + "time" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/server/store/oci" + ociconfig "github.com/agntcy/dir/server/store/oci/config" + "github.com/agntcy/dir/server/sync/monitor/config" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/server/types/adapters" + "github.com/agntcy/dir/utils/logging" + "github.com/agntcy/dir/utils/zot" + "oras.land/oras-go/v2/registry/remote" +) + +var logger = logging.Logger("sync/monitor") + +// MonitorService manages registry monitoring based on active sync operations. +// +//nolint:revive +type MonitorService struct { + // Configuration + db types.DatabaseAPI + store types.StoreAPI + ociConfig ociconfig.Config + checkInterval time.Duration + + // Monitoring state + mu sync.RWMutex + isRunning bool + lastSnapshot *RegistrySnapshot + ticker *time.Ticker + cancelMonitor context.CancelFunc + + // Sync management + activeSyncs map[string]struct{} // Track active sync operations + + // ORAS repository client + repo *remote.Repository +} + +// NewMonitorService creates a new monitor service. +func NewMonitorService(db types.DatabaseAPI, store types.StoreAPI, ociConfig ociconfig.Config, monitorConfig config.Config) (*MonitorService, error) { + // Create ORAS repository client + repo, err := oci.NewORASRepository(ociConfig) + if err != nil { + return nil, fmt.Errorf("failed to create ORAS repository client: %w", err) + } + + return &MonitorService{ + db: db, + store: store, + ociConfig: ociConfig, + checkInterval: monitorConfig.CheckInterval, + activeSyncs: make(map[string]struct{}), + repo: repo, + }, nil +} + +// Stop gracefully shuts down the monitor service. +func (s *MonitorService) Stop() error { + s.mu.Lock() + defer s.mu.Unlock() + + logger.Info("Stopping monitor service") + + // Stop monitoring if active + if s.isRunning { + logger.Info("Stopping registry monitoring") + + // Cancel monitoring + if s.cancelMonitor != nil { + s.cancelMonitor() + } + + s.ticker.Stop() + + // Update state + s.isRunning = false + } + + // Clear active syncs + s.activeSyncs = make(map[string]struct{}) + + logger.Info("Monitor service stopped") + + return nil +} + +// StartSyncMonitoring begins monitoring when a sync operation starts. +func (s *MonitorService) StartSyncMonitoring(syncID string) error { + s.mu.Lock() + defer s.mu.Unlock() + + // Add sync to active list + s.activeSyncs[syncID] = struct{}{} + + // Start monitoring if this is the first active sync + if len(s.activeSyncs) == 1 && !s.isRunning { + s.startMonitoring(context.Background()) + + logger.Info("Started registry monitoring", "active_syncs", len(s.activeSyncs)) + } + + logger.Debug("Sync added to monitoring", "sync_id", syncID, "active_syncs", len(s.activeSyncs)) + + return nil +} + +// StopSyncMonitoring stops monitoring when a sync operation ends. +// It performs a final indexing scan before stopping to ensure no records are missed. +func (s *MonitorService) StopSyncMonitoring(syncID string) error { + s.mu.Lock() + defer s.mu.Unlock() + + // Remove sync from active list + delete(s.activeSyncs, syncID) + + // Stop monitoring if no more active syncs + if len(s.activeSyncs) == 0 && s.isRunning { + // Cancel monitoring + if s.cancelMonitor != nil { + s.cancelMonitor() + } + + // Update state + s.isRunning = false + + // Run graceful shutdown in background to not block deletion + go s.gracefulShutdown() + + s.ticker.Stop() + + logger.Info("Stopped registry monitoring") + } + + logger.Debug("Sync removed from monitoring", "sync_id", syncID, "active_syncs", len(s.activeSyncs)) + + return nil +} + +// gracefulShutdown performs final monitoring checks in the background +// to ensure all synced records are indexed before stopping monitoring. +func (s *MonitorService) gracefulShutdown() { + logger.Debug("Starting graceful shutdown with final monitoring checks") + + // Perform final indexing scan + ctx, cancel := context.WithTimeout(context.Background(), config.DefaultCheckInterval*2) //nolint:mnd + defer cancel() + + // Create a separate ticker for graceful shutdown since main ticker is stopped + shutdownTicker := time.NewTicker(config.DefaultCheckInterval) //nolint:mnd + defer shutdownTicker.Stop() + + // Perform monitoring checks until timeout is reached + for { + select { + case <-ctx.Done(): + logger.Debug("Graceful shutdown timeout reached") + + return + case <-shutdownTicker.C: + s.performMonitoringCheck(ctx) + } + } +} + +// startMonitoring begins registry monitoring. +func (s *MonitorService) startMonitoring(ctx context.Context) { + if s.isRunning { + logger.Debug("Registry monitoring already running") + + return + } + + // Initialize monitoring state + s.isRunning = true + s.ticker = time.NewTicker(s.checkInterval) + + // Create cancelable context for monitoring + monitorCtx, cancel := context.WithCancel(ctx) + s.cancelMonitor = cancel + + // Start monitoring goroutine + go s.runRegistryMonitoring(monitorCtx) +} + +// runRegistryMonitoring runs the registry monitoring loop. +func (s *MonitorService) runRegistryMonitoring(ctx context.Context) { + logger.Info("Registry monitoring started") + + // Create initial snapshot + snapshot, err := s.createRegistrySnapshot(ctx) + if err != nil { + logger.Error("Failed to create initial registry snapshot", "error", err) + + return + } + + s.lastSnapshot = snapshot + + for { + select { + case <-ctx.Done(): + logger.Info("Registry monitoring stopping") + + return + case <-s.ticker.C: + s.performMonitoringCheck(ctx) + } + } +} + +// performMonitoringCheck performs a single monitoring check. +func (s *MonitorService) performMonitoringCheck(ctx context.Context) { + logger.Debug("Performing registry monitoring check") + + s.mu.Lock() + defer s.mu.Unlock() + + // Get current registry snapshot + snapshot, err := s.createRegistrySnapshot(ctx) + if err != nil { + logger.Error("Failed to create registry snapshot", "error", err) + + return + } + + // Compare with last snapshot to detect changes + changes := s.detectChanges(s.lastSnapshot, snapshot) + if changes.HasChanges { + logger.Info("Registry changes detected", "new_tags", len(changes.NewTags)) + s.processChanges(ctx, changes) + } else { + logger.Debug("No registry changes detected") + } + + // Update last snapshot + s.lastSnapshot = snapshot +} + +// createRegistrySnapshot creates a snapshot of the current registry state. +func (s *MonitorService) createRegistrySnapshot(ctx context.Context) (*RegistrySnapshot, error) { + // List all tags in the repository + // No sorting needed, OCI spec requires tags in lexical order + var tags []string + + err := s.repo.Tags(ctx, "", func(tagDescriptors []string) error { + // Filter tags to only include valid CIDs + for _, tag := range tagDescriptors { + if corev1.IsValidCID(tag) { + tags = append(tags, tag) + } else { + logger.Debug("Skipping non-CID tag", "tag", tag) + } + } + + return nil + }) + if err != nil { + // Check if this is a "repository not found" error (404) + if isRepositoryNotFoundError(err) { + logger.Debug("Repository not found yet, returning empty snapshot", "error", err) + + // Return empty snapshot - this is normal when sync hasn't pulled content yet + return EmptySnapshot, nil + } + + return nil, fmt.Errorf("failed to list repository tags: %w", err) + } + + // Create content hash for quick comparison + contentHash := s.createContentHash(tags) + + return &RegistrySnapshot{ + Timestamp: time.Now(), + Tags: tags, + ContentHash: contentHash, + LastModified: time.Now(), + }, nil +} + +// createContentHash creates a hash of the tags for quick comparison. +func (s *MonitorService) createContentHash(tags []string) string { + // Create a deterministic hash by writing each tag individually + hasher := sha256.New() + + for _, tag := range tags { + hasher.Write([]byte(tag)) + hasher.Write([]byte("|")) + } + + return hex.EncodeToString(hasher.Sum(nil)) +} + +// detectChanges compares two registry snapshots and detects changes. +func (s *MonitorService) detectChanges(oldSnapshot, newSnapshot *RegistrySnapshot) *RegistryChanges { + // If content hashes match, no changes + if oldSnapshot.ContentHash == newSnapshot.ContentHash { + return &RegistryChanges{ + HasChanges: false, + DetectedAt: time.Now(), + } + } + + // Create sets for efficient comparison + oldTags := make(map[string]struct{}) + for _, tag := range oldSnapshot.Tags { + oldTags[tag] = struct{}{} + } + + // Find new tags + var addedTags []string + + for _, tag := range newSnapshot.Tags { + if _, ok := oldTags[tag]; !ok { + addedTags = append(addedTags, tag) + } + } + + changes := &RegistryChanges{ + NewTags: addedTags, + HasChanges: len(addedTags) > 0, + DetectedAt: time.Now(), + } + + return changes +} + +// processChanges processes detected registry changes by indexing new records. +func (s *MonitorService) processChanges(ctx context.Context, changes *RegistryChanges) { + for _, tag := range changes.NewTags { + // Index record + if err := s.indexRecord(ctx, tag); err != nil { + // Warn but continue processing other records even if one fails + logger.Error("Failed to index record", "tag", tag, "error", err) + } else { + logger.Debug("Successfully indexed record", "tag", tag) + } + + // Upload public key to OCI store + if err := s.uploadPublicKey(ctx, tag); err != nil { + logger.Error("Failed to upload public key", "tag", tag, "error", err) + } + } +} + +// indexRecord indexes a single record from the registry into the database. +func (s *MonitorService) indexRecord(ctx context.Context, tag string) error { + logger.Debug("Indexing record", "tag", tag) + + // Pull record from local store + recordRef := &corev1.RecordRef{Cid: tag} + + record, err := s.store.Pull(ctx, recordRef) + if err != nil { + return fmt.Errorf("failed to pull record from local store: %w", err) + } + + isValid, validationErrors, err := record.Validate(ctx) + if err != nil { + return fmt.Errorf("failed to validate record: %w", err) + } + + if !isValid { + return fmt.Errorf("record validation failed: %v", validationErrors) + } + + // Add to database + recordAdapter := adapters.NewRecordAdapter(record) + if err := s.db.AddRecord(recordAdapter); err != nil { + // Check if this is a duplicate record error - if so, it's not really an error + if s.isDuplicateRecordError(err) { + logger.Debug("Record already indexed, skipping", "cid", tag) + + return nil + } + + return fmt.Errorf("failed to add record to database: %w", err) + } + + logger.Info("Successfully indexed local record", "cid", tag) + + return nil +} + +// uploadPublicKey uploads a public key to the OCI store. +func (s *MonitorService) uploadPublicKey(ctx context.Context, tag string) error { + logger.Debug("Uploading public key", "tag", tag) + + // Try to use signature storage if the store supports it + referrerStore, ok := s.store.(types.ReferrerStoreAPI) + if !ok { + logger.Error("Store does not support public key upload, skipping", "tag", tag) + + return nil + } + + // Walk public key referrers from referrer store + walkFn := func(referrer *corev1.RecordReferrer) error { + publicKeyValue, ok := referrer.GetData().AsMap()["publicKey"] + if !ok { + return errors.New("publicKey field not found in referrer data") + } + + publicKey, ok := publicKeyValue.(string) + if !ok { + return errors.New("publicKey field is not a string") + } + + // Upload the public key to zot for signature verification + // This enables zot to mark this signature as "trusted" in verification queries + uploadOpts := &zot.UploadPublicKeyOptions{ + Config: &zot.VerifyConfig{ + RegistryAddress: s.ociConfig.RegistryAddress, + RepositoryName: s.ociConfig.RepositoryName, + Username: s.ociConfig.Username, + Password: s.ociConfig.Password, + AccessToken: s.ociConfig.AccessToken, + Insecure: s.ociConfig.Insecure, + }, + PublicKey: publicKey, + } + + err := zot.UploadPublicKey(ctx, uploadOpts) + if err != nil { + return fmt.Errorf("failed to upload public key to zot for verification: %w", err) + } + + return nil // Continue walking + } + + // Walk public key referrers + err := referrerStore.WalkReferrers(ctx, tag, corev1.PublicKeyReferrerType, walkFn) + if err != nil { + return fmt.Errorf("failed to walk public key referrers: %w", err) + } + + logger.Debug("Successfully uploaded public keys to zot for verification", "tag", tag) + + return nil +} + +// isDuplicateRecordError checks if the error indicates a duplicate record. +func (s *MonitorService) isDuplicateRecordError(err error) bool { + if err == nil { + return false + } + + errStr := strings.ToLower(err.Error()) + + return strings.Contains(errStr, "duplicate") || + strings.Contains(errStr, "already exists") || + strings.Contains(errStr, "unique constraint") || + strings.Contains(errStr, "primary key") +} + +// isRepositoryNotFoundError checks if the error is a "repository not found" (404) error. +func isRepositoryNotFoundError(err error) bool { + if err == nil { + return false + } + + // Check if it's an HTTP error with status code 404 + errStr := err.Error() + + return strings.Contains(errStr, "404") && + (strings.Contains(errStr, "name unknown") || + strings.Contains(errStr, "repository name not known") || + strings.Contains(errStr, "not found")) +} diff --git a/server/sync/monitor/types.go b/server/sync/monitor/types.go index bbda8a2ce..de1d30275 100644 --- a/server/sync/monitor/types.go +++ b/server/sync/monitor/types.go @@ -1,31 +1,31 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package monitor - -import ( - "time" -) - -// RegistrySnapshot represents the state of the local registry at a point in time. -type RegistrySnapshot struct { - Timestamp time.Time `json:"timestamp"` - Tags []string `json:"tags"` - ContentHash string `json:"content_hash"` // Hash of all tags for quick comparison - LastModified time.Time `json:"last_modified"` -} - -// EmptySnapshot is a snapshot of an empty registry. -var EmptySnapshot = &RegistrySnapshot{ - Timestamp: time.Now(), - Tags: []string{}, - ContentHash: "", - LastModified: time.Now(), -} - -// RegistryChanges represents detected changes between registry snapshots. -type RegistryChanges struct { - NewTags []string `json:"new_tags"` - HasChanges bool `json:"has_changes"` - DetectedAt time.Time `json:"detected_at"` -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package monitor + +import ( + "time" +) + +// RegistrySnapshot represents the state of the local registry at a point in time. +type RegistrySnapshot struct { + Timestamp time.Time `json:"timestamp"` + Tags []string `json:"tags"` + ContentHash string `json:"content_hash"` // Hash of all tags for quick comparison + LastModified time.Time `json:"last_modified"` +} + +// EmptySnapshot is a snapshot of an empty registry. +var EmptySnapshot = &RegistrySnapshot{ + Timestamp: time.Now(), + Tags: []string{}, + ContentHash: "", + LastModified: time.Now(), +} + +// RegistryChanges represents detected changes between registry snapshots. +type RegistryChanges struct { + NewTags []string `json:"new_tags"` + HasChanges bool `json:"has_changes"` + DetectedAt time.Time `json:"detected_at"` +} diff --git a/server/sync/scheduler.go b/server/sync/scheduler.go index 78da6a933..62c897a0f 100644 --- a/server/sync/scheduler.go +++ b/server/sync/scheduler.go @@ -1,148 +1,148 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package sync - -import ( - "context" - "errors" - "fmt" - "time" - - storev1 "github.com/agntcy/dir/api/store/v1" - synctypes "github.com/agntcy/dir/server/sync/types" - "github.com/agntcy/dir/server/types" -) - -// Scheduler monitors the database for pending sync operations. -type Scheduler struct { - db types.SyncDatabaseAPI - workQueue chan<- synctypes.WorkItem - interval time.Duration -} - -// NewScheduler creates a new scheduler instance. -func NewScheduler(db types.SyncDatabaseAPI, workQueue chan<- synctypes.WorkItem, interval time.Duration) *Scheduler { - return &Scheduler{ - db: db, - workQueue: workQueue, - interval: interval, - } -} - -// Run starts the scheduler loop. -func (s *Scheduler) Run(ctx context.Context, stopCh <-chan struct{}) { - logger.Info("Starting sync scheduler", "interval", s.interval) - - ticker := time.NewTicker(s.interval) - defer ticker.Stop() - - // Process immediately on start - s.processPendingSyncs(ctx) - - for { - select { - case <-ctx.Done(): - logger.Info("Scheduler stopping due to context cancellation") - - return - case <-stopCh: - logger.Info("Scheduler stopping due to stop signal") - - return - case <-ticker.C: - s.processPendingSyncs(ctx) - } - } -} - -// processPendingSyncs finds pending syncs and dispatches them to workers. -func (s *Scheduler) processPendingSyncs(ctx context.Context) { - logger.Debug("Processing pending syncs") - - // Process pending sync creations - if err := s.processPendingSyncCreations(ctx); err != nil { - logger.Error("Failed to process pending sync creations", "error", err) - } - - // Process pending sync deletions - if err := s.processPendingSyncDeletions(ctx); err != nil { - logger.Error("Failed to process pending sync deletions", "error", err) - } -} - -// processPendingSyncCreations handles syncs that need to be created. -func (s *Scheduler) processPendingSyncCreations(ctx context.Context) error { - syncs, err := s.db.GetSyncsByStatus(storev1.SyncStatus_SYNC_STATUS_PENDING) - if err != nil { - return fmt.Errorf("failed to get pending syncs from database: %w", err) - } - - for _, sync := range syncs { - // Transition to IN_PROGRESS before dispatching - if err := s.db.UpdateSyncStatus(sync.GetID(), storev1.SyncStatus_SYNC_STATUS_IN_PROGRESS); err != nil { - logger.Error("Failed to update sync status to IN_PROGRESS", "sync_id", sync.GetID(), "error", err) - - continue - } - - // Dispatch to worker queue - workItem := synctypes.WorkItem{ - Type: synctypes.WorkItemTypeSyncCreate, - SyncID: sync.GetID(), - RemoteDirectoryURL: sync.GetRemoteDirectoryURL(), - CIDs: sync.GetCIDs(), - } - - if err := s.dispatchWorkItem(ctx, workItem); err != nil { - // Revert status back to PENDING since we couldn't dispatch - if err := s.db.UpdateSyncStatus(sync.GetID(), storev1.SyncStatus_SYNC_STATUS_PENDING); err != nil { - logger.Error("Failed to revert sync status to PENDING", "sync_id", sync.GetID(), "error", err) - } - } - } - - return nil -} - -// processPendingSyncDeletions handles syncs that need to be deleted. -func (s *Scheduler) processPendingSyncDeletions(ctx context.Context) error { - syncs, err := s.db.GetSyncsByStatus(storev1.SyncStatus_SYNC_STATUS_DELETE_PENDING) - if err != nil { - return fmt.Errorf("failed to get delete pending syncs from database: %w", err) - } - - for _, sync := range syncs { - // Create delete work item - workItem := synctypes.WorkItem{ - Type: synctypes.WorkItemTypeSyncDelete, - SyncID: sync.GetID(), - RemoteDirectoryURL: sync.GetRemoteDirectoryURL(), - CIDs: sync.GetCIDs(), - } - - if err := s.dispatchWorkItem(ctx, workItem); err != nil { - logger.Error("Failed to dispatch delete work item", "sync_id", sync.GetID(), "error", err) - } - } - - return nil -} - -// dispatchWorkItem handles the common logic for dispatching work items to the queue. -func (s *Scheduler) dispatchWorkItem(ctx context.Context, workItem synctypes.WorkItem) error { - select { - case s.workQueue <- workItem: - logger.Debug("Dispatched work item to queue", "type", workItem.Type, "sync_id", workItem.SyncID) - - return nil - case <-ctx.Done(): - logger.Info("Context cancelled while dispatching work item") - - return ctx.Err() //nolint:wrapcheck - default: - logger.Warn("Worker queue is full, skipping work item", "type", workItem.Type, "sync_id", workItem.SyncID) - - return errors.New("worker queue is full") - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package sync + +import ( + "context" + "errors" + "fmt" + "time" + + storev1 "github.com/agntcy/dir/api/store/v1" + synctypes "github.com/agntcy/dir/server/sync/types" + "github.com/agntcy/dir/server/types" +) + +// Scheduler monitors the database for pending sync operations. +type Scheduler struct { + db types.SyncDatabaseAPI + workQueue chan<- synctypes.WorkItem + interval time.Duration +} + +// NewScheduler creates a new scheduler instance. +func NewScheduler(db types.SyncDatabaseAPI, workQueue chan<- synctypes.WorkItem, interval time.Duration) *Scheduler { + return &Scheduler{ + db: db, + workQueue: workQueue, + interval: interval, + } +} + +// Run starts the scheduler loop. +func (s *Scheduler) Run(ctx context.Context, stopCh <-chan struct{}) { + logger.Info("Starting sync scheduler", "interval", s.interval) + + ticker := time.NewTicker(s.interval) + defer ticker.Stop() + + // Process immediately on start + s.processPendingSyncs(ctx) + + for { + select { + case <-ctx.Done(): + logger.Info("Scheduler stopping due to context cancellation") + + return + case <-stopCh: + logger.Info("Scheduler stopping due to stop signal") + + return + case <-ticker.C: + s.processPendingSyncs(ctx) + } + } +} + +// processPendingSyncs finds pending syncs and dispatches them to workers. +func (s *Scheduler) processPendingSyncs(ctx context.Context) { + logger.Debug("Processing pending syncs") + + // Process pending sync creations + if err := s.processPendingSyncCreations(ctx); err != nil { + logger.Error("Failed to process pending sync creations", "error", err) + } + + // Process pending sync deletions + if err := s.processPendingSyncDeletions(ctx); err != nil { + logger.Error("Failed to process pending sync deletions", "error", err) + } +} + +// processPendingSyncCreations handles syncs that need to be created. +func (s *Scheduler) processPendingSyncCreations(ctx context.Context) error { + syncs, err := s.db.GetSyncsByStatus(storev1.SyncStatus_SYNC_STATUS_PENDING) + if err != nil { + return fmt.Errorf("failed to get pending syncs from database: %w", err) + } + + for _, sync := range syncs { + // Transition to IN_PROGRESS before dispatching + if err := s.db.UpdateSyncStatus(sync.GetID(), storev1.SyncStatus_SYNC_STATUS_IN_PROGRESS); err != nil { + logger.Error("Failed to update sync status to IN_PROGRESS", "sync_id", sync.GetID(), "error", err) + + continue + } + + // Dispatch to worker queue + workItem := synctypes.WorkItem{ + Type: synctypes.WorkItemTypeSyncCreate, + SyncID: sync.GetID(), + RemoteDirectoryURL: sync.GetRemoteDirectoryURL(), + CIDs: sync.GetCIDs(), + } + + if err := s.dispatchWorkItem(ctx, workItem); err != nil { + // Revert status back to PENDING since we couldn't dispatch + if err := s.db.UpdateSyncStatus(sync.GetID(), storev1.SyncStatus_SYNC_STATUS_PENDING); err != nil { + logger.Error("Failed to revert sync status to PENDING", "sync_id", sync.GetID(), "error", err) + } + } + } + + return nil +} + +// processPendingSyncDeletions handles syncs that need to be deleted. +func (s *Scheduler) processPendingSyncDeletions(ctx context.Context) error { + syncs, err := s.db.GetSyncsByStatus(storev1.SyncStatus_SYNC_STATUS_DELETE_PENDING) + if err != nil { + return fmt.Errorf("failed to get delete pending syncs from database: %w", err) + } + + for _, sync := range syncs { + // Create delete work item + workItem := synctypes.WorkItem{ + Type: synctypes.WorkItemTypeSyncDelete, + SyncID: sync.GetID(), + RemoteDirectoryURL: sync.GetRemoteDirectoryURL(), + CIDs: sync.GetCIDs(), + } + + if err := s.dispatchWorkItem(ctx, workItem); err != nil { + logger.Error("Failed to dispatch delete work item", "sync_id", sync.GetID(), "error", err) + } + } + + return nil +} + +// dispatchWorkItem handles the common logic for dispatching work items to the queue. +func (s *Scheduler) dispatchWorkItem(ctx context.Context, workItem synctypes.WorkItem) error { + select { + case s.workQueue <- workItem: + logger.Debug("Dispatched work item to queue", "type", workItem.Type, "sync_id", workItem.SyncID) + + return nil + case <-ctx.Done(): + logger.Info("Context cancelled while dispatching work item") + + return ctx.Err() //nolint:wrapcheck + default: + logger.Warn("Worker queue is full, skipping work item", "type", workItem.Type, "sync_id", workItem.SyncID) + + return errors.New("worker queue is full") + } +} diff --git a/server/sync/sync.go b/server/sync/sync.go index 9685c4907..bad006551 100644 --- a/server/sync/sync.go +++ b/server/sync/sync.go @@ -1,132 +1,132 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package sync - -import ( - "context" - "fmt" - "sync" - - "github.com/agntcy/dir/server/events" - "github.com/agntcy/dir/server/sync/config" - "github.com/agntcy/dir/server/sync/monitor" - synctypes "github.com/agntcy/dir/server/sync/types" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/utils/logging" -) - -var logger = logging.Logger("sync") - -// Service manages the synchronization operations. -type Service struct { - db types.DatabaseAPI - store types.StoreAPI - config config.Config - monitorService *monitor.MonitorService - eventBus *events.SafeEventBus - - scheduler *Scheduler - workers []*Worker - - stopCh chan struct{} - wg sync.WaitGroup -} - -// New creates a new sync service. -func New(db types.DatabaseAPI, store types.StoreAPI, opts types.APIOptions) (*Service, error) { - monitorService, err := monitor.NewMonitorService(db, store, opts.Config().Store.OCI, opts.Config().Sync.RegistryMonitor) - if err != nil { - return nil, fmt.Errorf("failed to create registry monitor service: %w", err) - } - - return &Service{ - db: db, - store: store, - config: opts.Config().Sync, - monitorService: monitorService, - eventBus: opts.EventBus(), - stopCh: make(chan struct{}), - }, nil -} - -// Start begins the sync service operations. -func (s *Service) Start(ctx context.Context) error { - logger.Info("Starting sync service", "workers", s.config.WorkerCount, "interval", s.config.SchedulerInterval) - - // Create work queue - workQueue := make(chan synctypes.WorkItem, 100) //nolint:mnd - - // Create and start scheduler - s.scheduler = NewScheduler(s.db, workQueue, s.config.SchedulerInterval) - - // Create and start workers - s.workers = make([]*Worker, s.config.WorkerCount) - for i := range s.config.WorkerCount { - s.workers[i] = NewWorker(i, s.db, s.store, workQueue, s.config.WorkerTimeout, s.monitorService, s.eventBus) - } - - // Start scheduler - s.wg.Add(1) - - go func() { - defer s.wg.Done() - - s.scheduler.Run(ctx, s.stopCh) - }() - - // Start workers - for _, worker := range s.workers { - s.wg.Add(1) - - go func(w *Worker) { - defer s.wg.Done() - - w.Run(ctx, s.stopCh) - }(worker) - } - - logger.Info("Sync service started successfully") - - return nil -} - -// Stop gracefully shuts down the sync service. -func (s *Service) Stop() error { - logger.Info("Stopping sync service") - - // Stop all workers and scheduler first - close(s.stopCh) - s.wg.Wait() - - // Stop monitor service - if err := s.monitorService.Stop(); err != nil { - logger.Error("Failed to stop monitor service", "error", err) - - return fmt.Errorf("failed to stop monitor service: %w", err) - } - - logger.Info("Sync service stopped") - - return nil -} - -// IsReady checks if the sync service is ready to process sync operations. -// Returns true if the scheduler and workers have been started. -func (s *Service) IsReady(_ context.Context) bool { - if s.scheduler == nil { - logger.Debug("Sync service not ready: scheduler not initialized") - - return false - } - - if len(s.workers) == 0 { - logger.Debug("Sync service not ready: no workers initialized") - - return false - } - - logger.Debug("Sync service ready", "workers", len(s.workers)) - - return true -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package sync + +import ( + "context" + "fmt" + "sync" + + "github.com/agntcy/dir/server/events" + "github.com/agntcy/dir/server/sync/config" + "github.com/agntcy/dir/server/sync/monitor" + synctypes "github.com/agntcy/dir/server/sync/types" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/utils/logging" +) + +var logger = logging.Logger("sync") + +// Service manages the synchronization operations. +type Service struct { + db types.DatabaseAPI + store types.StoreAPI + config config.Config + monitorService *monitor.MonitorService + eventBus *events.SafeEventBus + + scheduler *Scheduler + workers []*Worker + + stopCh chan struct{} + wg sync.WaitGroup +} + +// New creates a new sync service. +func New(db types.DatabaseAPI, store types.StoreAPI, opts types.APIOptions) (*Service, error) { + monitorService, err := monitor.NewMonitorService(db, store, opts.Config().Store.OCI, opts.Config().Sync.RegistryMonitor) + if err != nil { + return nil, fmt.Errorf("failed to create registry monitor service: %w", err) + } + + return &Service{ + db: db, + store: store, + config: opts.Config().Sync, + monitorService: monitorService, + eventBus: opts.EventBus(), + stopCh: make(chan struct{}), + }, nil +} + +// Start begins the sync service operations. +func (s *Service) Start(ctx context.Context) error { + logger.Info("Starting sync service", "workers", s.config.WorkerCount, "interval", s.config.SchedulerInterval) + + // Create work queue + workQueue := make(chan synctypes.WorkItem, 100) //nolint:mnd + + // Create and start scheduler + s.scheduler = NewScheduler(s.db, workQueue, s.config.SchedulerInterval) + + // Create and start workers + s.workers = make([]*Worker, s.config.WorkerCount) + for i := range s.config.WorkerCount { + s.workers[i] = NewWorker(i, s.db, s.store, workQueue, s.config.WorkerTimeout, s.monitorService, s.eventBus) + } + + // Start scheduler + s.wg.Add(1) + + go func() { + defer s.wg.Done() + + s.scheduler.Run(ctx, s.stopCh) + }() + + // Start workers + for _, worker := range s.workers { + s.wg.Add(1) + + go func(w *Worker) { + defer s.wg.Done() + + w.Run(ctx, s.stopCh) + }(worker) + } + + logger.Info("Sync service started successfully") + + return nil +} + +// Stop gracefully shuts down the sync service. +func (s *Service) Stop() error { + logger.Info("Stopping sync service") + + // Stop all workers and scheduler first + close(s.stopCh) + s.wg.Wait() + + // Stop monitor service + if err := s.monitorService.Stop(); err != nil { + logger.Error("Failed to stop monitor service", "error", err) + + return fmt.Errorf("failed to stop monitor service: %w", err) + } + + logger.Info("Sync service stopped") + + return nil +} + +// IsReady checks if the sync service is ready to process sync operations. +// Returns true if the scheduler and workers have been started. +func (s *Service) IsReady(_ context.Context) bool { + if s.scheduler == nil { + logger.Debug("Sync service not ready: scheduler not initialized") + + return false + } + + if len(s.workers) == 0 { + logger.Debug("Sync service not ready: no workers initialized") + + return false + } + + logger.Debug("Sync service ready", "workers", len(s.workers)) + + return true +} diff --git a/server/sync/sync_events_test.go b/server/sync/sync_events_test.go index eefb12868..9523d1746 100644 --- a/server/sync/sync_events_test.go +++ b/server/sync/sync_events_test.go @@ -1,119 +1,119 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package sync - -import ( - "testing" - - eventsv1 "github.com/agntcy/dir/api/events/v1" - "github.com/agntcy/dir/server/events" -) - -const ( - testSyncID = "test-sync" - testRemoteURL = "https://remote.example.com" - testErrorMsg = "connection refused" - testRecordCnt5 = "5" -) - -// TestSyncEventsEmission is a simple test to verify that sync events are emitted. -// This test verifies that the event bus methods are called correctly, -// without testing the complex sync logic itself. -// -//nolint:gocognit,cyclop // Test has multiple subtests with similar patterns -func TestSyncEventsEmission(t *testing.T) { - // Create event bus and subscribe - bus := events.NewEventBus() - safeEventBus := events.NewSafeEventBus(bus) - - req := &eventsv1.ListenRequest{} - - subID, eventCh := bus.Subscribe(req) - defer bus.Unsubscribe(subID) - - // Test SYNC_CREATED event - t.Run("SYNC_CREATED", func(t *testing.T) { - safeEventBus.SyncCreated(testSyncID+"-1", testRemoteURL) - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_SYNC_CREATED { - t.Errorf("Expected SYNC_CREATED, got %v", event.Type) - } - - if event.ResourceID != testSyncID+"-1" { - t.Errorf("Expected sync ID '%s-1', got %s", testSyncID, event.ResourceID) - } - - if event.Metadata["remote_url"] != testRemoteURL { - t.Errorf("Expected remote_url in metadata, got %v", event.Metadata) - } - default: - t.Error("Expected to receive SYNC_CREATED event") - } - }) - - // Test SYNC_COMPLETED event - t.Run("SYNC_COMPLETED", func(t *testing.T) { - safeEventBus.SyncCompleted(testSyncID+"-2", testRemoteURL, 5) - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED { - t.Errorf("Expected SYNC_COMPLETED, got %v", event.Type) - } - - if event.ResourceID != testSyncID+"-2" { - t.Errorf("Expected sync ID '%s-2', got %s", testSyncID, event.ResourceID) - } - - if event.Metadata["record_count"] != testRecordCnt5 { - t.Errorf("Expected record_count=%s in metadata, got %v", testRecordCnt5, event.Metadata) - } - default: - t.Error("Expected to receive SYNC_COMPLETED event") - } - }) - - // Test SYNC_FAILED event - t.Run("SYNC_FAILED", func(t *testing.T) { - safeEventBus.SyncFailed(testSyncID+"-3", testRemoteURL, testErrorMsg) - - // Wait for async delivery to complete - bus.WaitForAsyncPublish() - - select { - case event := <-eventCh: - if event.Type != eventsv1.EventType_EVENT_TYPE_SYNC_FAILED { - t.Errorf("Expected SYNC_FAILED, got %v", event.Type) - } - - if event.ResourceID != testSyncID+"-3" { - t.Errorf("Expected sync ID '%s-3', got %s", testSyncID, event.ResourceID) - } - - if event.Metadata["error"] != testErrorMsg { - t.Errorf("Expected error in metadata, got %v", event.Metadata) - } - default: - t.Error("Expected to receive SYNC_FAILED event") - } - }) -} - -// TestSyncWithNilEventBus verifies that sync works even with nil event bus (shouldn't panic). -func TestSyncWithNilEventBus(t *testing.T) { - safeEventBus := events.NewSafeEventBus(nil) - - // Should not panic - safeEventBus.SyncCreated(testSyncID, testRemoteURL) - safeEventBus.SyncCompleted(testSyncID, testRemoteURL, 10) - safeEventBus.SyncFailed(testSyncID, testRemoteURL, "error") -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package sync + +import ( + "testing" + + eventsv1 "github.com/agntcy/dir/api/events/v1" + "github.com/agntcy/dir/server/events" +) + +const ( + testSyncID = "test-sync" + testRemoteURL = "https://remote.example.com" + testErrorMsg = "connection refused" + testRecordCnt5 = "5" +) + +// TestSyncEventsEmission is a simple test to verify that sync events are emitted. +// This test verifies that the event bus methods are called correctly, +// without testing the complex sync logic itself. +// +//nolint:gocognit,cyclop // Test has multiple subtests with similar patterns +func TestSyncEventsEmission(t *testing.T) { + // Create event bus and subscribe + bus := events.NewEventBus() + safeEventBus := events.NewSafeEventBus(bus) + + req := &eventsv1.ListenRequest{} + + subID, eventCh := bus.Subscribe(req) + defer bus.Unsubscribe(subID) + + // Test SYNC_CREATED event + t.Run("SYNC_CREATED", func(t *testing.T) { + safeEventBus.SyncCreated(testSyncID+"-1", testRemoteURL) + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_SYNC_CREATED { + t.Errorf("Expected SYNC_CREATED, got %v", event.Type) + } + + if event.ResourceID != testSyncID+"-1" { + t.Errorf("Expected sync ID '%s-1', got %s", testSyncID, event.ResourceID) + } + + if event.Metadata["remote_url"] != testRemoteURL { + t.Errorf("Expected remote_url in metadata, got %v", event.Metadata) + } + default: + t.Error("Expected to receive SYNC_CREATED event") + } + }) + + // Test SYNC_COMPLETED event + t.Run("SYNC_COMPLETED", func(t *testing.T) { + safeEventBus.SyncCompleted(testSyncID+"-2", testRemoteURL, 5) + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_SYNC_COMPLETED { + t.Errorf("Expected SYNC_COMPLETED, got %v", event.Type) + } + + if event.ResourceID != testSyncID+"-2" { + t.Errorf("Expected sync ID '%s-2', got %s", testSyncID, event.ResourceID) + } + + if event.Metadata["record_count"] != testRecordCnt5 { + t.Errorf("Expected record_count=%s in metadata, got %v", testRecordCnt5, event.Metadata) + } + default: + t.Error("Expected to receive SYNC_COMPLETED event") + } + }) + + // Test SYNC_FAILED event + t.Run("SYNC_FAILED", func(t *testing.T) { + safeEventBus.SyncFailed(testSyncID+"-3", testRemoteURL, testErrorMsg) + + // Wait for async delivery to complete + bus.WaitForAsyncPublish() + + select { + case event := <-eventCh: + if event.Type != eventsv1.EventType_EVENT_TYPE_SYNC_FAILED { + t.Errorf("Expected SYNC_FAILED, got %v", event.Type) + } + + if event.ResourceID != testSyncID+"-3" { + t.Errorf("Expected sync ID '%s-3', got %s", testSyncID, event.ResourceID) + } + + if event.Metadata["error"] != testErrorMsg { + t.Errorf("Expected error in metadata, got %v", event.Metadata) + } + default: + t.Error("Expected to receive SYNC_FAILED event") + } + }) +} + +// TestSyncWithNilEventBus verifies that sync works even with nil event bus (shouldn't panic). +func TestSyncWithNilEventBus(t *testing.T) { + safeEventBus := events.NewSafeEventBus(nil) + + // Should not panic + safeEventBus.SyncCreated(testSyncID, testRemoteURL) + safeEventBus.SyncCompleted(testSyncID, testRemoteURL, 10) + safeEventBus.SyncFailed(testSyncID, testRemoteURL, "error") +} diff --git a/server/sync/types/types.go b/server/sync/types/types.go index 3298ac804..52b8256e5 100644 --- a/server/sync/types/types.go +++ b/server/sync/types/types.go @@ -1,20 +1,20 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package types - -// WorkItem represents a sync task to be processed by workers. -type WorkItem struct { - Type WorkItemType - SyncID string - RemoteDirectoryURL string - CIDs []string -} - -// WorkItemType represents the type of sync task. -type WorkItemType string - -const ( - WorkItemTypeSyncCreate WorkItemType = "sync-create" - WorkItemTypeSyncDelete WorkItemType = "sync-delete" -) +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package types + +// WorkItem represents a sync task to be processed by workers. +type WorkItem struct { + Type WorkItemType + SyncID string + RemoteDirectoryURL string + CIDs []string +} + +// WorkItemType represents the type of sync task. +type WorkItemType string + +const ( + WorkItemTypeSyncCreate WorkItemType = "sync-create" + WorkItemTypeSyncDelete WorkItemType = "sync-delete" +) diff --git a/server/sync/worker.go b/server/sync/worker.go index f18a078a6..5c5039494 100644 --- a/server/sync/worker.go +++ b/server/sync/worker.go @@ -1,220 +1,220 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package sync - -import ( - "context" - "fmt" - "time" - - storev1 "github.com/agntcy/dir/api/store/v1" - "github.com/agntcy/dir/server/events" - ociconfig "github.com/agntcy/dir/server/store/oci/config" - syncconfig "github.com/agntcy/dir/server/sync/config" - "github.com/agntcy/dir/server/sync/monitor" - synctypes "github.com/agntcy/dir/server/sync/types" - "github.com/agntcy/dir/server/types" - zotutils "github.com/agntcy/dir/utils/zot" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - zotsyncconfig "zotregistry.dev/zot/v2/pkg/extensions/config/sync" -) - -// Worker processes sync work items. -type Worker struct { - id int - db types.DatabaseAPI - store types.StoreAPI - workQueue <-chan synctypes.WorkItem - timeout time.Duration - monitorService *monitor.MonitorService - eventBus *events.SafeEventBus -} - -// NewWorker creates a new worker instance. -func NewWorker(id int, db types.DatabaseAPI, store types.StoreAPI, workQueue <-chan synctypes.WorkItem, timeout time.Duration, monitorService *monitor.MonitorService, eventBus *events.SafeEventBus) *Worker { - return &Worker{ - id: id, - db: db, - store: store, - workQueue: workQueue, - timeout: timeout, - monitorService: monitorService, - eventBus: eventBus, - } -} - -// Run starts the worker loop. -func (w *Worker) Run(ctx context.Context, stopCh <-chan struct{}) { - logger.Info("Starting sync worker", "worker_id", w.id) - - for { - select { - case <-ctx.Done(): - logger.Info("Worker stopping due to context cancellation", "worker_id", w.id) - - return - case <-stopCh: - logger.Info("Worker stopping due to stop signal", "worker_id", w.id) - - return - case workItem := <-w.workQueue: - w.processWorkItem(ctx, workItem) - } - } -} - -// processWorkItem handles a single sync work item. -func (w *Worker) processWorkItem(ctx context.Context, item synctypes.WorkItem) { - logger.Info("Processing sync work item", "worker_id", w.id, "sync_id", item.SyncID, "remote_url", item.RemoteDirectoryURL) - // TODO Check if store is oci and zot. If not, fail - - // Create timeout context for this work item - workCtx, cancel := context.WithTimeout(ctx, w.timeout) - defer cancel() - - var finalStatus storev1.SyncStatus - - switch item.Type { - case synctypes.WorkItemTypeSyncCreate: - // Emit SYNC_CREATED event when sync operation begins - w.eventBus.SyncCreated(item.SyncID, item.RemoteDirectoryURL) - - finalStatus = storev1.SyncStatus_SYNC_STATUS_IN_PROGRESS - - err := w.addSync(workCtx, item) - if err != nil { - logger.Error("Sync failed", "worker_id", w.id, "sync_id", item.SyncID, "error", err) - - // Emit SYNC_FAILED event - w.eventBus.SyncFailed(item.SyncID, item.RemoteDirectoryURL, err.Error()) - - finalStatus = storev1.SyncStatus_SYNC_STATUS_FAILED - } else { - // Emit SYNC_COMPLETED event when sync succeeds - // Note: The sync continues monitoring in background, but the initial sync operation is complete - recordCount := len(item.CIDs) - w.eventBus.SyncCompleted(item.SyncID, item.RemoteDirectoryURL, recordCount) - } - - case synctypes.WorkItemTypeSyncDelete: - finalStatus = storev1.SyncStatus_SYNC_STATUS_DELETED - - err := w.deleteSync(workCtx, item) - if err != nil { - logger.Error("Sync delete failed", "worker_id", w.id, "sync_id", item.SyncID, "error", err) - - finalStatus = storev1.SyncStatus_SYNC_STATUS_FAILED - } - - default: - logger.Error("Unknown work item type", "worker_id", w.id, "sync_id", item.SyncID, "type", item.Type) - } - - // Update status in database - if err := w.db.UpdateSyncStatus(item.SyncID, finalStatus); err != nil { - logger.Error("Failed to update sync status", "worker_id", w.id, "sync_id", item.SyncID, "status", finalStatus, "error", err) - } -} - -func (w *Worker) deleteSync(_ context.Context, item synctypes.WorkItem) error { - logger.Debug("Starting sync delete operation", "worker_id", w.id, "sync_id", item.SyncID, "remote_url", item.RemoteDirectoryURL) - - // Get remote registry URL from sync object - remoteRegistryURL, err := w.db.GetSyncRemoteRegistry(item.SyncID) - if err != nil { - return fmt.Errorf("failed to get remote registry URL: %w", err) - } - - // Remove registry from zot configuration - if err := zotutils.RemoveRegistryFromSyncConfig(zotutils.DefaultZotConfigPath, remoteRegistryURL); err != nil { - return fmt.Errorf("failed to remove registry from zot sync: %w", err) - } - - // Start graceful monitoring shutdown - this will continue monitoring - // until all records that zot may still be syncing are indexed - if err := w.monitorService.StopSyncMonitoring(item.SyncID); err != nil { //nolint:contextcheck - // Warn but continue - logger.Warn("Failed to initiate graceful monitoring shutdown", "worker_id", w.id, "sync_id", item.SyncID, "error", err) - } - - return nil -} - -// addSync implements the core synchronization logic. -// -//nolint:unparam -func (w *Worker) addSync(ctx context.Context, item synctypes.WorkItem) error { - logger.Debug("Starting sync operation", "worker_id", w.id, "sync_id", item.SyncID, "remote_url", item.RemoteDirectoryURL) - - // Negotiate credentials with remote node using RequestRegistryCredentials RPC - remoteRegistryURL, credentials, err := w.negotiateCredentials(ctx, item.RemoteDirectoryURL) - if err != nil { - return fmt.Errorf("failed to negotiate credentials: %w", err) - } - - // Store credentials for later use in sync process - logger.Debug("Credentials negotiated successfully", "worker_id", w.id, "sync_id", item.SyncID) - - // Update sync object with remote registry URL - if err := w.db.UpdateSyncRemoteRegistry(item.SyncID, remoteRegistryURL); err != nil { - return fmt.Errorf("failed to update sync remote registry: %w", err) - } - - // Update zot configuration with sync extension to trigger sync - if err := zotutils.AddRegistryToSyncConfig(zotutils.DefaultZotConfigPath, remoteRegistryURL, ociconfig.DefaultRepositoryName, zotsyncconfig.Credentials{ - Username: credentials.Username, - Password: credentials.Password, - }, item.CIDs); err != nil { - return fmt.Errorf("failed to add registry to zot sync: %w", err) - } - - // Start monitoring the local registry for changes after Zot sync is configured - if err := w.monitorService.StartSyncMonitoring(item.SyncID); err != nil { //nolint:contextcheck - return fmt.Errorf("failed to start registry monitoring: %w", err) - } - - logger.Debug("Sync operation completed", "worker_id", w.id, "sync_id", item.SyncID) - - return nil -} - -// negotiateCredentials negotiates registry credentials with the remote Directory node. -func (w *Worker) negotiateCredentials(ctx context.Context, remoteDirectoryURL string) (string, syncconfig.AuthConfig, error) { - logger.Debug("Starting credential negotiation", "worker_id", w.id, "remote_url", remoteDirectoryURL) - - // Create gRPC connection to the remote Directory node - conn, err := grpc.NewClient( - remoteDirectoryURL, - grpc.WithTransportCredentials(insecure.NewCredentials()), - ) - if err != nil { - return "", syncconfig.AuthConfig{}, fmt.Errorf("failed to create gRPC connection to remote node %s: %w", remoteDirectoryURL, err) - } - defer conn.Close() - - // Create SyncService client - syncClient := storev1.NewSyncServiceClient(conn) - - // TODO: Get actual peer ID from the routing system or configuration - requestingNodeID := "directory://local-node" - - // Make the credential negotiation request - resp, err := syncClient.RequestRegistryCredentials(ctx, &storev1.RequestRegistryCredentialsRequest{ - RequestingNodeId: requestingNodeID, - }) - if err != nil { - return "", syncconfig.AuthConfig{}, fmt.Errorf("failed to request registry credentials from %s: %w", remoteDirectoryURL, err) - } - - // Check if the negotiation was successful - if !resp.GetSuccess() { - return "", syncconfig.AuthConfig{}, fmt.Errorf("credential negotiation failed: %s", resp.GetErrorMessage()) - } - - return resp.GetRemoteRegistryUrl(), syncconfig.AuthConfig{ - Username: resp.GetBasicAuth().GetUsername(), - Password: resp.GetBasicAuth().GetPassword(), - }, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package sync + +import ( + "context" + "fmt" + "time" + + storev1 "github.com/agntcy/dir/api/store/v1" + "github.com/agntcy/dir/server/events" + ociconfig "github.com/agntcy/dir/server/store/oci/config" + syncconfig "github.com/agntcy/dir/server/sync/config" + "github.com/agntcy/dir/server/sync/monitor" + synctypes "github.com/agntcy/dir/server/sync/types" + "github.com/agntcy/dir/server/types" + zotutils "github.com/agntcy/dir/utils/zot" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + zotsyncconfig "zotregistry.dev/zot/v2/pkg/extensions/config/sync" +) + +// Worker processes sync work items. +type Worker struct { + id int + db types.DatabaseAPI + store types.StoreAPI + workQueue <-chan synctypes.WorkItem + timeout time.Duration + monitorService *monitor.MonitorService + eventBus *events.SafeEventBus +} + +// NewWorker creates a new worker instance. +func NewWorker(id int, db types.DatabaseAPI, store types.StoreAPI, workQueue <-chan synctypes.WorkItem, timeout time.Duration, monitorService *monitor.MonitorService, eventBus *events.SafeEventBus) *Worker { + return &Worker{ + id: id, + db: db, + store: store, + workQueue: workQueue, + timeout: timeout, + monitorService: monitorService, + eventBus: eventBus, + } +} + +// Run starts the worker loop. +func (w *Worker) Run(ctx context.Context, stopCh <-chan struct{}) { + logger.Info("Starting sync worker", "worker_id", w.id) + + for { + select { + case <-ctx.Done(): + logger.Info("Worker stopping due to context cancellation", "worker_id", w.id) + + return + case <-stopCh: + logger.Info("Worker stopping due to stop signal", "worker_id", w.id) + + return + case workItem := <-w.workQueue: + w.processWorkItem(ctx, workItem) + } + } +} + +// processWorkItem handles a single sync work item. +func (w *Worker) processWorkItem(ctx context.Context, item synctypes.WorkItem) { + logger.Info("Processing sync work item", "worker_id", w.id, "sync_id", item.SyncID, "remote_url", item.RemoteDirectoryURL) + // TODO Check if store is oci and zot. If not, fail + + // Create timeout context for this work item + workCtx, cancel := context.WithTimeout(ctx, w.timeout) + defer cancel() + + var finalStatus storev1.SyncStatus + + switch item.Type { + case synctypes.WorkItemTypeSyncCreate: + // Emit SYNC_CREATED event when sync operation begins + w.eventBus.SyncCreated(item.SyncID, item.RemoteDirectoryURL) + + finalStatus = storev1.SyncStatus_SYNC_STATUS_IN_PROGRESS + + err := w.addSync(workCtx, item) + if err != nil { + logger.Error("Sync failed", "worker_id", w.id, "sync_id", item.SyncID, "error", err) + + // Emit SYNC_FAILED event + w.eventBus.SyncFailed(item.SyncID, item.RemoteDirectoryURL, err.Error()) + + finalStatus = storev1.SyncStatus_SYNC_STATUS_FAILED + } else { + // Emit SYNC_COMPLETED event when sync succeeds + // Note: The sync continues monitoring in background, but the initial sync operation is complete + recordCount := len(item.CIDs) + w.eventBus.SyncCompleted(item.SyncID, item.RemoteDirectoryURL, recordCount) + } + + case synctypes.WorkItemTypeSyncDelete: + finalStatus = storev1.SyncStatus_SYNC_STATUS_DELETED + + err := w.deleteSync(workCtx, item) + if err != nil { + logger.Error("Sync delete failed", "worker_id", w.id, "sync_id", item.SyncID, "error", err) + + finalStatus = storev1.SyncStatus_SYNC_STATUS_FAILED + } + + default: + logger.Error("Unknown work item type", "worker_id", w.id, "sync_id", item.SyncID, "type", item.Type) + } + + // Update status in database + if err := w.db.UpdateSyncStatus(item.SyncID, finalStatus); err != nil { + logger.Error("Failed to update sync status", "worker_id", w.id, "sync_id", item.SyncID, "status", finalStatus, "error", err) + } +} + +func (w *Worker) deleteSync(_ context.Context, item synctypes.WorkItem) error { + logger.Debug("Starting sync delete operation", "worker_id", w.id, "sync_id", item.SyncID, "remote_url", item.RemoteDirectoryURL) + + // Get remote registry URL from sync object + remoteRegistryURL, err := w.db.GetSyncRemoteRegistry(item.SyncID) + if err != nil { + return fmt.Errorf("failed to get remote registry URL: %w", err) + } + + // Remove registry from zot configuration + if err := zotutils.RemoveRegistryFromSyncConfig(zotutils.DefaultZotConfigPath, remoteRegistryURL); err != nil { + return fmt.Errorf("failed to remove registry from zot sync: %w", err) + } + + // Start graceful monitoring shutdown - this will continue monitoring + // until all records that zot may still be syncing are indexed + if err := w.monitorService.StopSyncMonitoring(item.SyncID); err != nil { //nolint:contextcheck + // Warn but continue + logger.Warn("Failed to initiate graceful monitoring shutdown", "worker_id", w.id, "sync_id", item.SyncID, "error", err) + } + + return nil +} + +// addSync implements the core synchronization logic. +// +//nolint:unparam +func (w *Worker) addSync(ctx context.Context, item synctypes.WorkItem) error { + logger.Debug("Starting sync operation", "worker_id", w.id, "sync_id", item.SyncID, "remote_url", item.RemoteDirectoryURL) + + // Negotiate credentials with remote node using RequestRegistryCredentials RPC + remoteRegistryURL, credentials, err := w.negotiateCredentials(ctx, item.RemoteDirectoryURL) + if err != nil { + return fmt.Errorf("failed to negotiate credentials: %w", err) + } + + // Store credentials for later use in sync process + logger.Debug("Credentials negotiated successfully", "worker_id", w.id, "sync_id", item.SyncID) + + // Update sync object with remote registry URL + if err := w.db.UpdateSyncRemoteRegistry(item.SyncID, remoteRegistryURL); err != nil { + return fmt.Errorf("failed to update sync remote registry: %w", err) + } + + // Update zot configuration with sync extension to trigger sync + if err := zotutils.AddRegistryToSyncConfig(zotutils.DefaultZotConfigPath, remoteRegistryURL, ociconfig.DefaultRepositoryName, zotsyncconfig.Credentials{ + Username: credentials.Username, + Password: credentials.Password, + }, item.CIDs); err != nil { + return fmt.Errorf("failed to add registry to zot sync: %w", err) + } + + // Start monitoring the local registry for changes after Zot sync is configured + if err := w.monitorService.StartSyncMonitoring(item.SyncID); err != nil { //nolint:contextcheck + return fmt.Errorf("failed to start registry monitoring: %w", err) + } + + logger.Debug("Sync operation completed", "worker_id", w.id, "sync_id", item.SyncID) + + return nil +} + +// negotiateCredentials negotiates registry credentials with the remote Directory node. +func (w *Worker) negotiateCredentials(ctx context.Context, remoteDirectoryURL string) (string, syncconfig.AuthConfig, error) { + logger.Debug("Starting credential negotiation", "worker_id", w.id, "remote_url", remoteDirectoryURL) + + // Create gRPC connection to the remote Directory node + conn, err := grpc.NewClient( + remoteDirectoryURL, + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + if err != nil { + return "", syncconfig.AuthConfig{}, fmt.Errorf("failed to create gRPC connection to remote node %s: %w", remoteDirectoryURL, err) + } + defer conn.Close() + + // Create SyncService client + syncClient := storev1.NewSyncServiceClient(conn) + + // TODO: Get actual peer ID from the routing system or configuration + requestingNodeID := "directory://local-node" + + // Make the credential negotiation request + resp, err := syncClient.RequestRegistryCredentials(ctx, &storev1.RequestRegistryCredentialsRequest{ + RequestingNodeId: requestingNodeID, + }) + if err != nil { + return "", syncconfig.AuthConfig{}, fmt.Errorf("failed to request registry credentials from %s: %w", remoteDirectoryURL, err) + } + + // Check if the negotiation was successful + if !resp.GetSuccess() { + return "", syncconfig.AuthConfig{}, fmt.Errorf("credential negotiation failed: %s", resp.GetErrorMessage()) + } + + return resp.GetRemoteRegistryUrl(), syncconfig.AuthConfig{ + Username: resp.GetBasicAuth().GetUsername(), + Password: resp.GetBasicAuth().GetPassword(), + }, nil +} diff --git a/server/types/README.md b/server/types/README.md index a8038e61e..50a649793 100644 --- a/server/types/README.md +++ b/server/types/README.md @@ -1,633 +1,633 @@ -# Server Types - -The `server/types` package provides a unified type system and interface abstraction layer for the Dir server. It enables version-agnostic handling of OASF (Open Agent Specification Format) records across different schema versions while maintaining a consistent API surface. - -## Overview - -The types system enables: -- **Version-agnostic record processing** across OASF versions -- **Unified API interfaces** for storage, search, and routing operations -- **Adapter pattern implementation** for seamless version compatibility -- **Rich filtering and search capabilities** with composable filter options -- **Type-safe abstractions** over protocol buffer definitions - -## Architecture - -``` -┌─────────────────────┐ ┌─────────────────────┐ ┌─────────────────────┐ -│ OASF v0.X.X │ │ OASF v0.Y.Y │ │ OASF v0.Z.Z │ -│ (types/X) │ │ (types/Y) │ │ (types/Z) │ -└─────────────────────┘ └─────────────────────┘ └─────────────────────┘ - │ │ │ - └─────────────┬─────────────┴─────────────┬───────────┘ - │ │ - ┌─────────────────────┐ ┌─────────────────────┐ - │ Adapter Pattern │ │ Unified Interfaces │ - │ (Version Bridge) │ │ (types.Record) │ - └─────────────────────┘ └─────────────────────┘ - │ │ - └─────────────┬─────────────┘ - │ - ┌─────────────────────────────┐ - │ API Implementations │ - │ (Store, Search, Routing) │ - └─────────────────────────────┘ -``` - -## Core Interfaces - -### Record System - -The record system provides unified interfaces for handling agent records regardless of their OASF version: - -```go -// Core record interface - all records implement this -type Record interface { - GetCid() string - GetRecordData() (RecordData, error) -} - -// Metadata-only interface for fast lookups -type RecordMeta interface { - GetCid() string - GetAnnotations() map[string]string - GetSchemaVersion() string - GetCreatedAt() string -} - -// Reference interface for record identification -type RecordRef interface { - GetCid() string -} -``` - -### RecordData Interface - -The `RecordData` interface provides version-agnostic access to all record fields: - -```go -type RecordData interface { - // Core Identity - GetName() string - GetVersion() string - GetDescription() string - GetSchemaVersion() string - GetCreatedAt() string - GetAuthors() []string - - // Capabilities - GetSkills() []Skill - GetLocators() []Locator - GetModules() []Module - - // Security & Versioning - GetSignature() Signature - GetPreviousRecordCid() string - - // Custom Metadata - GetAnnotations() map[string]string -} -``` - -### Component Interfaces - -Each record component has its own interface for consistent access: - -```go -// Skills represent agent capabilities -type Skill interface { - GetAnnotations() map[string]string - GetName() string - GetID() uint64 -} - -// Locators define deployment information -type Locator interface { - GetAnnotations() map[string]string - GetType() string - GetURL() string - GetSize() uint64 - GetDigest() string -} - -// Modules provide additional functionality -type Module interface { - GetName() string - GetData() map[string]any -} - -// Signature provides integrity verification -type Signature interface { - GetAnnotations() map[string]string - GetSignedAt() string - GetAlgorithm() string - GetSignature() string - GetCertificate() string - GetContentType() string - GetContentBundle() string -} -``` - -## Adapter Pattern - -The adapter pattern bridges the gap between different OASF versions, allowing the same code to work with all schema versions. - -### Core Adapter - -The main [RecordAdapter](./adapters/record.go) automatically selects the appropriate version-specific adapter. -Each OASF version has its own adapter that implements the unified interface. - -## API Interfaces - -The types package defines three main API interfaces for server operations: - -### StoreAPI - Content Storage - -Handles content-addressable storage operations: - -```go -type StoreAPI interface { - // Push record to content store - Push(context.Context, *corev1.Record) (*corev1.RecordRef, error) - - // Pull record from content store - Pull(context.Context, *corev1.RecordRef) (*corev1.Record, error) - - // Lookup metadata about the record from reference - Lookup(context.Context, *corev1.RecordRef) (*corev1.RecordMeta, error) - - // Delete the record - Delete(context.Context, *corev1.RecordRef) error -} -``` - -**Example Usage:** -```go -// Store an agent record -recordRef, err := store.Push(ctx, record) -if err != nil { - return fmt.Errorf("failed to store record: %w", err) -} - -// Fast metadata lookup -meta, err := store.Lookup(ctx, recordRef) -if err != nil { - return fmt.Errorf("record not found: %w", err) -} - -// Full record retrieval -fullRecord, err := store.Pull(ctx, recordRef) -if err != nil { - return fmt.Errorf("failed to pull record: %w", err) -} -``` - -### SearchAPI - Content Discovery - -Provides rich search and filtering capabilities: - -```go -type SearchAPI interface { - // AddRecord adds a new record to the search database - AddRecord(record Record) error - - // GetRecords retrieves records based on filters - GetRecords(opts ...FilterOption) ([]Record, error) -} -``` - -**Filter Options:** -```go -// Core filtering -WithLimit(limit int) // Pagination limit -WithOffset(offset int) // Pagination offset -WithName(name string) // Name partial match -WithVersion(version string) // Exact version match - -// Capability filtering -WithSkillIDs(ids ...uint64) // Filter by skill IDs -WithSkillNames(names ...string) // Filter by skill names -WithModuleNames(names ...string) // Filter by module names - -// Infrastructure filtering -WithLocatorTypes(types ...string) // Filter by deployment types -WithLocatorURLs(urls ...string) // Filter by locator URLs -``` - -**Example Usage:** -```go -// Search for Docker-deployable agents with NLP skills -records, err := search.GetRecords( - WithSkillNames("natural-language-processing", "text-analysis"), - WithLocatorTypes("docker"), - WithLimit(10), -) - -// Search for specific agent versions -records, err := search.GetRecords( - WithName("aws-ec2-agent"), - WithVersion("1.2.0"), -) - -// Search for agents by organization -records, err := search.GetRecords( - WithModuleNames("monitoring"), - WithLocatorTypes("kubernetes", "helm"), - WithOffset(20), - WithLimit(10), -) -``` - -### RoutingAPI - Network Operations - -Handles peer-to-peer network operations: - -```go -type RoutingAPI interface { - // Publish record to the network - Publish(context.Context, *corev1.RecordRef, *corev1.Record) error - - // Search records from the network - List(context.Context, *routingv1.ListRequest) (<-chan *routingv1.LegacyListResponse_Item, error) - - // Unpublish record from the network - Unpublish(context.Context, *corev1.RecordRef, *corev1.Record) error -} -``` - -**Example Usage:** -```go -// Publish agent to network -err := routing.Publish(ctx, recordRef, record) -if err != nil { - return fmt.Errorf("failed to publish: %w", err) -} - -// Search network for agents -listReq := &routingv1.ListRequest{ - Limit: 50, - // ... other search criteria -} - -resultChan, err := routing.List(ctx, listReq) -if err != nil { - return fmt.Errorf("search failed: %w", err) -} - -// Process results -for item := range resultChan { - // Handle each discovered agent - processDiscoveredAgent(item) -} -``` - -## Usage Examples - -### Version-Agnostic Record Processing - -The adapter pattern allows the same code to work with any OASF version: - -```go -func ProcessAnyRecord(record *corev1.Record) error { - // Create adapter - automatically handles version detection - adapter := adapters.NewRecordAdapter(record) - data := adapter.GetRecordData() - - // Now use unified interface regardless of version - fmt.Printf("Agent: %s v%s\n", data.GetName(), data.GetVersion()) - fmt.Printf("Description: %s\n", data.GetDescription()) - - // Process skills - works for all versions - skills := data.GetSkills() - for _, skill := range skills { - fmt.Printf("Skill: %s (ID: %d)\n", skill.GetName(), skill.GetID()) - } - - // Process locators - consistent interface - locators := data.GetLocators() - for _, locator := range locators { - fmt.Printf("Deployment: %s at %s\n", locator.GetType(), locator.GetURL()) - } - - return nil -} -``` - -### Cross-Version Compatibility - -The same function works with different OASF versions: - -```go -// OASF v0.3.1 record -v1Record := corev1.New(&typesv1alpha0.Agent{ - Name: "nlp-agent", - Skills: []*typesv1alpha0.Skill{ - {CategoryName: stringPtr("nlp"), ClassName: stringPtr("processing")}, - }, -}) - -// OASF 0.7.0 record -v3Record := corev1.New(&typesv1alpha1.Record{ - Name: "nlp-agent", - Skills: []*typesv1alpha1.Skill{ - {Name: "natural-language-processing"}, - }, -}) - -// Same processing function works for both -ProcessAnyRecord(v1Record) // Works! -ProcessAnyRecord(v3Record) // Works! -``` - -### Advanced Search Patterns - -Building complex search queries: - -```go -func FindProductionAgents(search SearchAPI) ([]Record, error) { - // Find production-ready agents with specific capabilities - return search.GetRecords( - WithSkillNames("production-monitoring", "error-handling"), - WithModuleNames("security", "logging"), - WithLocatorTypes("kubernetes"), - WithLimit(20), - ) -} - -func FindDevelopmentAgents(search SearchAPI, team string) ([]Record, error) { - // Find development agents for a specific team - return search.GetRecords( - WithName("*-dev"), // Development naming pattern - WithModuleNames("debugging", "testing"), - WithLocatorTypes("docker"), - WithLimit(50), - ) -} - -func PaginateAllAgents(search SearchAPI) error { - offset := 0 - limit := 10 - - for { - records, err := search.GetRecords( - WithOffset(offset), - WithLimit(limit), - ) - if err != nil { - return err - } - - if len(records) == 0 { - break // No more records - } - - // Process batch - for _, record := range records { - processRecord(record) - } - - offset += limit - } - - return nil -} -``` - -## Data Store Integration - -The types package integrates with the datastore abstraction: - -```go -// Datastore provides key-value storage with path-like queries -type Datastore interface { - datastore.Batching // From go-datastore -} -``` - -**Supported Backends:** -- **Badger** - High-performance embedded database -- **BoltDB** - Pure Go embedded key-value store -- **LevelDB** - Fast key-value storage library -- **Memory** - In-memory storage for testing -- **Map** - Simple map-based storage - -**Use Cases:** -- **Peer Information** - Store known peer addresses and capabilities -- **Content Cache** - Cache frequently accessed records -- **Metadata Storage** - Store search indices and annotations -- **Session Data** - Temporary data and state information - -## Configuration and Setup - -### API Options - -The API system uses dependency injection for configuration: - -```go -type APIOptions interface { - Config() *config.Config // Read-only configuration access -} - -// Create API options -opts := NewOptions(configInstance) - -// Access configuration in implementations -func (s *storeImpl) setupStorage() error { - cfg := s.opts.Config() - // Use configuration... -} -``` - -### Main API Interface - -The unified API provides access to all subsystems: - -```go -type API interface { - Options() APIOptions // Get configuration options - Store() StoreAPI // Content storage operations - Routing() RoutingAPI // Network routing operations - Search() SearchAPI // Search and discovery -} - -// Usage example -func setupServer(api API) error { - store := api.Store() - search := api.Search() - routing := api.Routing() - - // Configure and use services... - return nil -} -``` - -## Error Handling - -The types package uses standard Go error handling patterns: - -```go -// Storage errors -if err := store.Push(ctx, record); err != nil { - switch { - case errors.Is(err, ErrRecordExists): - // Handle duplicate record - case errors.Is(err, ErrInvalidCID): - // Handle invalid content identifier - default: - // Handle general error - } -} - -// Search errors -records, err := search.GetRecords(WithName("invalid-agent")) -if err != nil { - return fmt.Errorf("search failed: %w", err) -} - -if len(records) == 0 { - // No results found -} -``` - -## Testing Support - -The types package provides interfaces that are easily mockable: - -```go -// Mock implementations for testing -type MockStore struct { - records map[string]*corev1.Record -} - -func (m *MockStore) Push(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) { - cid := record.GetCid() - m.records[cid] = record - return &corev1.RecordRef{Cid: cid}, nil -} - -// Use in tests -func TestAgentProcessing(t *testing.T) { - mockStore := &MockStore{records: make(map[string]*corev1.Record)} - - // Test with mock - err := processAgent(mockStore, testRecord) - assert.NoError(t, err) -} -``` - -## Performance Considerations - -### Adapter Overhead - -The adapter pattern introduces minimal overhead: -- **Memory**: Small wrapper objects around existing structs -- **CPU**: Single virtual function call per field access -- **GC**: No additional allocations for simple field access - -### Interface Benefits - -Despite minimal overhead, the benefits are significant: -- **Code Reuse**: Same code works across all OASF versions -- **Maintainability**: Single implementation to maintain -- **Type Safety**: Compile-time verification of compatibility -- **Testing**: Easy to mock and test components - -### Optimization Tips - -```go -// Cache adapters when processing many fields -adapter := adapters.NewRecordAdapter(record) -data := adapter.GetRecordData() - -// Process multiple fields efficiently -name := data.GetName() -version := data.GetVersion() -skills := data.GetSkills() - -// Avoid recreating adapters in loops -for _, record := range records { - adapter := adapters.NewRecordAdapter(record) // OK: single creation - processRecord(adapter.GetRecordData()) -} -``` - -## Best Practices - -### Record Processing - -1. **Use adapters for version-agnostic code**: - ```go - // Good: Works with all versions - adapter := adapters.NewRecordAdapter(record) - data := adapter.GetRecordData() - - // Avoid: Version-specific access - if v1 := record.GetV1(); v1 != nil { - // V1-specific code - } - ``` - -2. **Handle nil cases gracefully**: - ```go - data := adapter.GetRecordData() - if data == nil { - return errors.New("invalid record data") - } - ``` - -3. **Process collections efficiently**: - ```go - skills := data.GetSkills() - if len(skills) == 0 { - return nil // No skills to process - } - - for _, skill := range skills { - processSkill(skill) - } - ``` - -### API Design - -1. **Use interface composition**: - ```go - type ExtendedAPI interface { - API - // Additional methods - Backup() error - } - ``` - -2. **Leverage filter options**: - ```go - // Composable and readable - records, err := search.GetRecords( - WithName("production-*"), - WithSkillNames("monitoring"), - WithLimit(50), - ) - ``` - -3. **Handle context properly**: - ```go - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - err := store.Push(ctx, record) - ``` - -## Dependencies - -### Core Dependencies -- **`github.com/agntcy/dir/api/core/v1`** - Core protobuf definitions -- **`github.com/agntcy/dir/api/objects/*`** - OASF object definitions -- **`github.com/ipfs/go-datastore`** - Datastore abstraction -- **`google.golang.org/protobuf`** - Protocol buffer support - -### Integration Points -- **Storage Layer** - OCI implementations -- **Search Layer** - SQLite, in-memory implementations -- **Routing Layer** - P2P networking implementations -- **Config System** - Server configuration management - +# Server Types + +The `server/types` package provides a unified type system and interface abstraction layer for the Dir server. It enables version-agnostic handling of OASF (Open Agent Specification Format) records across different schema versions while maintaining a consistent API surface. + +## Overview + +The types system enables: +- **Version-agnostic record processing** across OASF versions +- **Unified API interfaces** for storage, search, and routing operations +- **Adapter pattern implementation** for seamless version compatibility +- **Rich filtering and search capabilities** with composable filter options +- **Type-safe abstractions** over protocol buffer definitions + +## Architecture + +``` +┌─────────────────────┐ ┌─────────────────────┐ ┌─────────────────────┐ +│ OASF v0.X.X │ │ OASF v0.Y.Y │ │ OASF v0.Z.Z │ +│ (types/X) │ │ (types/Y) │ │ (types/Z) │ +└─────────────────────┘ └─────────────────────┘ └─────────────────────┘ + │ │ │ + └─────────────┬─────────────┴─────────────┬───────────┘ + │ │ + ┌─────────────────────┐ ┌─────────────────────┐ + │ Adapter Pattern │ │ Unified Interfaces │ + │ (Version Bridge) │ │ (types.Record) │ + └─────────────────────┘ └─────────────────────┘ + │ │ + └─────────────┬─────────────┘ + │ + ┌─────────────────────────────┐ + │ API Implementations │ + │ (Store, Search, Routing) │ + └─────────────────────────────┘ +``` + +## Core Interfaces + +### Record System + +The record system provides unified interfaces for handling agent records regardless of their OASF version: + +```go +// Core record interface - all records implement this +type Record interface { + GetCid() string + GetRecordData() (RecordData, error) +} + +// Metadata-only interface for fast lookups +type RecordMeta interface { + GetCid() string + GetAnnotations() map[string]string + GetSchemaVersion() string + GetCreatedAt() string +} + +// Reference interface for record identification +type RecordRef interface { + GetCid() string +} +``` + +### RecordData Interface + +The `RecordData` interface provides version-agnostic access to all record fields: + +```go +type RecordData interface { + // Core Identity + GetName() string + GetVersion() string + GetDescription() string + GetSchemaVersion() string + GetCreatedAt() string + GetAuthors() []string + + // Capabilities + GetSkills() []Skill + GetLocators() []Locator + GetModules() []Module + + // Security & Versioning + GetSignature() Signature + GetPreviousRecordCid() string + + // Custom Metadata + GetAnnotations() map[string]string +} +``` + +### Component Interfaces + +Each record component has its own interface for consistent access: + +```go +// Skills represent agent capabilities +type Skill interface { + GetAnnotations() map[string]string + GetName() string + GetID() uint64 +} + +// Locators define deployment information +type Locator interface { + GetAnnotations() map[string]string + GetType() string + GetURL() string + GetSize() uint64 + GetDigest() string +} + +// Modules provide additional functionality +type Module interface { + GetName() string + GetData() map[string]any +} + +// Signature provides integrity verification +type Signature interface { + GetAnnotations() map[string]string + GetSignedAt() string + GetAlgorithm() string + GetSignature() string + GetCertificate() string + GetContentType() string + GetContentBundle() string +} +``` + +## Adapter Pattern + +The adapter pattern bridges the gap between different OASF versions, allowing the same code to work with all schema versions. + +### Core Adapter + +The main [RecordAdapter](./adapters/record.go) automatically selects the appropriate version-specific adapter. +Each OASF version has its own adapter that implements the unified interface. + +## API Interfaces + +The types package defines three main API interfaces for server operations: + +### StoreAPI - Content Storage + +Handles content-addressable storage operations: + +```go +type StoreAPI interface { + // Push record to content store + Push(context.Context, *corev1.Record) (*corev1.RecordRef, error) + + // Pull record from content store + Pull(context.Context, *corev1.RecordRef) (*corev1.Record, error) + + // Lookup metadata about the record from reference + Lookup(context.Context, *corev1.RecordRef) (*corev1.RecordMeta, error) + + // Delete the record + Delete(context.Context, *corev1.RecordRef) error +} +``` + +**Example Usage:** +```go +// Store an agent record +recordRef, err := store.Push(ctx, record) +if err != nil { + return fmt.Errorf("failed to store record: %w", err) +} + +// Fast metadata lookup +meta, err := store.Lookup(ctx, recordRef) +if err != nil { + return fmt.Errorf("record not found: %w", err) +} + +// Full record retrieval +fullRecord, err := store.Pull(ctx, recordRef) +if err != nil { + return fmt.Errorf("failed to pull record: %w", err) +} +``` + +### SearchAPI - Content Discovery + +Provides rich search and filtering capabilities: + +```go +type SearchAPI interface { + // AddRecord adds a new record to the search database + AddRecord(record Record) error + + // GetRecords retrieves records based on filters + GetRecords(opts ...FilterOption) ([]Record, error) +} +``` + +**Filter Options:** +```go +// Core filtering +WithLimit(limit int) // Pagination limit +WithOffset(offset int) // Pagination offset +WithName(name string) // Name partial match +WithVersion(version string) // Exact version match + +// Capability filtering +WithSkillIDs(ids ...uint64) // Filter by skill IDs +WithSkillNames(names ...string) // Filter by skill names +WithModuleNames(names ...string) // Filter by module names + +// Infrastructure filtering +WithLocatorTypes(types ...string) // Filter by deployment types +WithLocatorURLs(urls ...string) // Filter by locator URLs +``` + +**Example Usage:** +```go +// Search for Docker-deployable agents with NLP skills +records, err := search.GetRecords( + WithSkillNames("natural-language-processing", "text-analysis"), + WithLocatorTypes("docker"), + WithLimit(10), +) + +// Search for specific agent versions +records, err := search.GetRecords( + WithName("aws-ec2-agent"), + WithVersion("1.2.0"), +) + +// Search for agents by organization +records, err := search.GetRecords( + WithModuleNames("monitoring"), + WithLocatorTypes("kubernetes", "helm"), + WithOffset(20), + WithLimit(10), +) +``` + +### RoutingAPI - Network Operations + +Handles peer-to-peer network operations: + +```go +type RoutingAPI interface { + // Publish record to the network + Publish(context.Context, *corev1.RecordRef, *corev1.Record) error + + // Search records from the network + List(context.Context, *routingv1.ListRequest) (<-chan *routingv1.LegacyListResponse_Item, error) + + // Unpublish record from the network + Unpublish(context.Context, *corev1.RecordRef, *corev1.Record) error +} +``` + +**Example Usage:** +```go +// Publish agent to network +err := routing.Publish(ctx, recordRef, record) +if err != nil { + return fmt.Errorf("failed to publish: %w", err) +} + +// Search network for agents +listReq := &routingv1.ListRequest{ + Limit: 50, + // ... other search criteria +} + +resultChan, err := routing.List(ctx, listReq) +if err != nil { + return fmt.Errorf("search failed: %w", err) +} + +// Process results +for item := range resultChan { + // Handle each discovered agent + processDiscoveredAgent(item) +} +``` + +## Usage Examples + +### Version-Agnostic Record Processing + +The adapter pattern allows the same code to work with any OASF version: + +```go +func ProcessAnyRecord(record *corev1.Record) error { + // Create adapter - automatically handles version detection + adapter := adapters.NewRecordAdapter(record) + data := adapter.GetRecordData() + + // Now use unified interface regardless of version + fmt.Printf("Agent: %s v%s\n", data.GetName(), data.GetVersion()) + fmt.Printf("Description: %s\n", data.GetDescription()) + + // Process skills - works for all versions + skills := data.GetSkills() + for _, skill := range skills { + fmt.Printf("Skill: %s (ID: %d)\n", skill.GetName(), skill.GetID()) + } + + // Process locators - consistent interface + locators := data.GetLocators() + for _, locator := range locators { + fmt.Printf("Deployment: %s at %s\n", locator.GetType(), locator.GetURL()) + } + + return nil +} +``` + +### Cross-Version Compatibility + +The same function works with different OASF versions: + +```go +// OASF v0.3.1 record +v1Record := corev1.New(&typesv1alpha0.Agent{ + Name: "nlp-agent", + Skills: []*typesv1alpha0.Skill{ + {CategoryName: stringPtr("nlp"), ClassName: stringPtr("processing")}, + }, +}) + +// OASF 0.7.0 record +v3Record := corev1.New(&typesv1alpha1.Record{ + Name: "nlp-agent", + Skills: []*typesv1alpha1.Skill{ + {Name: "natural-language-processing"}, + }, +}) + +// Same processing function works for both +ProcessAnyRecord(v1Record) // Works! +ProcessAnyRecord(v3Record) // Works! +``` + +### Advanced Search Patterns + +Building complex search queries: + +```go +func FindProductionAgents(search SearchAPI) ([]Record, error) { + // Find production-ready agents with specific capabilities + return search.GetRecords( + WithSkillNames("production-monitoring", "error-handling"), + WithModuleNames("security", "logging"), + WithLocatorTypes("kubernetes"), + WithLimit(20), + ) +} + +func FindDevelopmentAgents(search SearchAPI, team string) ([]Record, error) { + // Find development agents for a specific team + return search.GetRecords( + WithName("*-dev"), // Development naming pattern + WithModuleNames("debugging", "testing"), + WithLocatorTypes("docker"), + WithLimit(50), + ) +} + +func PaginateAllAgents(search SearchAPI) error { + offset := 0 + limit := 10 + + for { + records, err := search.GetRecords( + WithOffset(offset), + WithLimit(limit), + ) + if err != nil { + return err + } + + if len(records) == 0 { + break // No more records + } + + // Process batch + for _, record := range records { + processRecord(record) + } + + offset += limit + } + + return nil +} +``` + +## Data Store Integration + +The types package integrates with the datastore abstraction: + +```go +// Datastore provides key-value storage with path-like queries +type Datastore interface { + datastore.Batching // From go-datastore +} +``` + +**Supported Backends:** +- **Badger** - High-performance embedded database +- **BoltDB** - Pure Go embedded key-value store +- **LevelDB** - Fast key-value storage library +- **Memory** - In-memory storage for testing +- **Map** - Simple map-based storage + +**Use Cases:** +- **Peer Information** - Store known peer addresses and capabilities +- **Content Cache** - Cache frequently accessed records +- **Metadata Storage** - Store search indices and annotations +- **Session Data** - Temporary data and state information + +## Configuration and Setup + +### API Options + +The API system uses dependency injection for configuration: + +```go +type APIOptions interface { + Config() *config.Config // Read-only configuration access +} + +// Create API options +opts := NewOptions(configInstance) + +// Access configuration in implementations +func (s *storeImpl) setupStorage() error { + cfg := s.opts.Config() + // Use configuration... +} +``` + +### Main API Interface + +The unified API provides access to all subsystems: + +```go +type API interface { + Options() APIOptions // Get configuration options + Store() StoreAPI // Content storage operations + Routing() RoutingAPI // Network routing operations + Search() SearchAPI // Search and discovery +} + +// Usage example +func setupServer(api API) error { + store := api.Store() + search := api.Search() + routing := api.Routing() + + // Configure and use services... + return nil +} +``` + +## Error Handling + +The types package uses standard Go error handling patterns: + +```go +// Storage errors +if err := store.Push(ctx, record); err != nil { + switch { + case errors.Is(err, ErrRecordExists): + // Handle duplicate record + case errors.Is(err, ErrInvalidCID): + // Handle invalid content identifier + default: + // Handle general error + } +} + +// Search errors +records, err := search.GetRecords(WithName("invalid-agent")) +if err != nil { + return fmt.Errorf("search failed: %w", err) +} + +if len(records) == 0 { + // No results found +} +``` + +## Testing Support + +The types package provides interfaces that are easily mockable: + +```go +// Mock implementations for testing +type MockStore struct { + records map[string]*corev1.Record +} + +func (m *MockStore) Push(ctx context.Context, record *corev1.Record) (*corev1.RecordRef, error) { + cid := record.GetCid() + m.records[cid] = record + return &corev1.RecordRef{Cid: cid}, nil +} + +// Use in tests +func TestAgentProcessing(t *testing.T) { + mockStore := &MockStore{records: make(map[string]*corev1.Record)} + + // Test with mock + err := processAgent(mockStore, testRecord) + assert.NoError(t, err) +} +``` + +## Performance Considerations + +### Adapter Overhead + +The adapter pattern introduces minimal overhead: +- **Memory**: Small wrapper objects around existing structs +- **CPU**: Single virtual function call per field access +- **GC**: No additional allocations for simple field access + +### Interface Benefits + +Despite minimal overhead, the benefits are significant: +- **Code Reuse**: Same code works across all OASF versions +- **Maintainability**: Single implementation to maintain +- **Type Safety**: Compile-time verification of compatibility +- **Testing**: Easy to mock and test components + +### Optimization Tips + +```go +// Cache adapters when processing many fields +adapter := adapters.NewRecordAdapter(record) +data := adapter.GetRecordData() + +// Process multiple fields efficiently +name := data.GetName() +version := data.GetVersion() +skills := data.GetSkills() + +// Avoid recreating adapters in loops +for _, record := range records { + adapter := adapters.NewRecordAdapter(record) // OK: single creation + processRecord(adapter.GetRecordData()) +} +``` + +## Best Practices + +### Record Processing + +1. **Use adapters for version-agnostic code**: + ```go + // Good: Works with all versions + adapter := adapters.NewRecordAdapter(record) + data := adapter.GetRecordData() + + // Avoid: Version-specific access + if v1 := record.GetV1(); v1 != nil { + // V1-specific code + } + ``` + +2. **Handle nil cases gracefully**: + ```go + data := adapter.GetRecordData() + if data == nil { + return errors.New("invalid record data") + } + ``` + +3. **Process collections efficiently**: + ```go + skills := data.GetSkills() + if len(skills) == 0 { + return nil // No skills to process + } + + for _, skill := range skills { + processSkill(skill) + } + ``` + +### API Design + +1. **Use interface composition**: + ```go + type ExtendedAPI interface { + API + // Additional methods + Backup() error + } + ``` + +2. **Leverage filter options**: + ```go + // Composable and readable + records, err := search.GetRecords( + WithName("production-*"), + WithSkillNames("monitoring"), + WithLimit(50), + ) + ``` + +3. **Handle context properly**: + ```go + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + err := store.Push(ctx, record) + ``` + +## Dependencies + +### Core Dependencies +- **`github.com/agntcy/dir/api/core/v1`** - Core protobuf definitions +- **`github.com/agntcy/dir/api/objects/*`** - OASF object definitions +- **`github.com/ipfs/go-datastore`** - Datastore abstraction +- **`google.golang.org/protobuf`** - Protocol buffer support + +### Integration Points +- **Storage Layer** - OCI implementations +- **Search Layer** - SQLite, in-memory implementations +- **Routing Layer** - P2P networking implementations +- **Config System** - Server configuration management + The types package serves as the foundation for all server operations, providing consistent interfaces and seamless version compatibility across the entire system. \ No newline at end of file diff --git a/server/types/adapters/oasf_v1alpha0.go b/server/types/adapters/oasf_v1alpha0.go index 9b6f8da3e..b42917cea 100644 --- a/server/types/adapters/oasf_v1alpha0.go +++ b/server/types/adapters/oasf_v1alpha0.go @@ -1,449 +1,449 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package adapters - -import ( - "fmt" - "strings" - - typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/oasf-sdk/pkg/decoder" -) - -const featuresSchemaPrefix = "schema.oasf.agntcy.org/features/" - -// V1Alpha0Adapter adapts typesv1alpha0.Record to types.RecordData interface. -type V1Alpha0Adapter struct { - record *typesv1alpha0.Record -} - -// Compile-time interface checks. -var ( - _ types.RecordData = (*V1Alpha0Adapter)(nil) - _ types.LabelProvider = (*V1Alpha0Adapter)(nil) -) - -// NewV1Alpha0Adapter creates a new V1Alpha0Adapter. -func NewV1Alpha0Adapter(record *typesv1alpha0.Record) *V1Alpha0Adapter { - return &V1Alpha0Adapter{record: record} -} - -// GetAnnotations implements types.RecordData interface. -func (a *V1Alpha0Adapter) GetAnnotations() map[string]string { - if a.record == nil { - return nil - } - - return a.record.GetAnnotations() -} - -// GetSchemaVersion implements types.RecordData interface. -func (a *V1Alpha0Adapter) GetSchemaVersion() string { - if a.record == nil { - return "" - } - - return a.record.GetSchemaVersion() -} - -// GetDomains implements types.RecordData. -// -// NOTE: V1Alpha0 doesn't have domains, so we return an empty slice. -func (a *V1Alpha0Adapter) GetDomains() []types.Domain { - return []types.Domain{} -} - -// GetName implements types.RecordData interface. -func (a *V1Alpha0Adapter) GetName() string { - if a.record == nil { - return "" - } - - return a.record.GetName() -} - -// GetVersion implements types.RecordData interface. -func (a *V1Alpha0Adapter) GetVersion() string { - if a.record == nil { - return "" - } - - return a.record.GetVersion() -} - -// GetDescription implements types.RecordData interface. -func (a *V1Alpha0Adapter) GetDescription() string { - if a.record == nil { - return "" - } - - return a.record.GetDescription() -} - -// GetAuthors implements types.RecordData interface. -func (a *V1Alpha0Adapter) GetAuthors() []string { - if a.record == nil { - return nil - } - - return a.record.GetAuthors() -} - -// GetCreatedAt implements types.RecordData interface. -func (a *V1Alpha0Adapter) GetCreatedAt() string { - if a.record == nil { - return "" - } - - return a.record.GetCreatedAt() -} - -// GetSkills implements types.RecordData interface. -func (a *V1Alpha0Adapter) GetSkills() []types.Skill { - if a.record == nil { - return nil - } - - skills := a.record.GetSkills() - result := make([]types.Skill, len(skills)) - - for i, skill := range skills { - result[i] = NewV1Alpha0SkillAdapter(skill) - } - - return result -} - -// GetLocators implements types.RecordData interface. -func (a *V1Alpha0Adapter) GetLocators() []types.Locator { - if a.record == nil { - return nil - } - - locators := a.record.GetLocators() - result := make([]types.Locator, len(locators)) - - for i, locator := range locators { - result[i] = NewV1Alpha0LocatorAdapter(locator) - } - - return result -} - -// GetModules implements types.RecordData interface. -func (a *V1Alpha0Adapter) GetModules() []types.Module { - if a.record == nil { - return nil - } - - extensions := a.record.GetExtensions() - result := make([]types.Module, len(extensions)) - - for i, extension := range extensions { - result[i] = NewV1Alpha0ModuleAdapter(extension) - } - - return result -} - -// GetSignature implements types.RecordData interface. -func (a *V1Alpha0Adapter) GetSignature() types.Signature { - if a.record == nil || a.record.GetSignature() == nil { - return nil - } - - return NewV1Alpha0SignatureAdapter(a.record.GetSignature()) -} - -// GetPreviousRecordCid implements types.RecordData interface. -func (a *V1Alpha0Adapter) GetPreviousRecordCid() string { - // V1 doesn't have previous record CID - return "" -} - -// V1Alpha0SignatureAdapter adapts typesv1alpha0.Signature to types.Signature interface. -type V1Alpha0SignatureAdapter struct { - signature *typesv1alpha0.Signature -} - -// NewV1Alpha0SignatureAdapter creates a new V1Alpha0SignatureAdapter. -func NewV1Alpha0SignatureAdapter(signature *typesv1alpha0.Signature) *V1Alpha0SignatureAdapter { - return &V1Alpha0SignatureAdapter{signature: signature} -} - -// GetAnnotations implements types.Signature interface. -func (s *V1Alpha0SignatureAdapter) GetAnnotations() map[string]string { - // V1 signature doesn't have annotations - return nil -} - -// GetSignedAt implements types.Signature interface. -func (s *V1Alpha0SignatureAdapter) GetSignedAt() string { - if s.signature == nil { - return "" - } - - return s.signature.GetSignedAt() -} - -// GetAlgorithm implements types.Signature interface. -func (s *V1Alpha0SignatureAdapter) GetAlgorithm() string { - if s.signature == nil { - return "" - } - - return s.signature.GetAlgorithm() -} - -// GetSignature implements types.Signature interface. -func (s *V1Alpha0SignatureAdapter) GetSignature() string { - if s.signature == nil { - return "" - } - - return s.signature.GetSignature() -} - -// GetCertificate implements types.Signature interface. -func (s *V1Alpha0SignatureAdapter) GetCertificate() string { - if s.signature == nil { - return "" - } - - return s.signature.GetCertificate() -} - -// GetContentType implements types.Signature interface. -func (s *V1Alpha0SignatureAdapter) GetContentType() string { - if s.signature == nil { - return "" - } - - return s.signature.GetContentType() -} - -// GetContentBundle implements types.Signature interface. -func (s *V1Alpha0SignatureAdapter) GetContentBundle() string { - if s.signature == nil { - return "" - } - - return s.signature.GetContentBundle() -} - -// V1Alpha0SkillAdapter adapts typesv1alpha0.Skill to types.Skill interface. -type V1Alpha0SkillAdapter struct { - skill *typesv1alpha0.Skill -} - -// NewV1Alpha0SkillAdapter creates a new V1Alpha0SkillAdapter. -func NewV1Alpha0SkillAdapter(skill *typesv1alpha0.Skill) *V1Alpha0SkillAdapter { - return &V1Alpha0SkillAdapter{skill: skill} -} - -// GetAnnotations implements types.Skill interface. -func (s *V1Alpha0SkillAdapter) GetAnnotations() map[string]string { - if s.skill == nil { - return nil - } - - return s.skill.GetAnnotations() -} - -// GetName implements types.Skill interface. -func (s *V1Alpha0SkillAdapter) GetName() string { - if s.skill == nil { - return "" - } - - if s.skill.GetClassName() == "" { - return s.skill.GetCategoryName() - } - - return fmt.Sprintf("%s/%s", s.skill.GetCategoryName(), s.skill.GetClassName()) -} - -// GetID implements types.Skill interface. -func (s *V1Alpha0SkillAdapter) GetID() uint64 { - if s.skill == nil { - return 0 - } - - return s.skill.GetClassUid() -} - -// V1Alpha0LocatorAdapter adapts typesv1alpha0.Locator to types.Locator interface. -type V1Alpha0LocatorAdapter struct { - locator *typesv1alpha0.Locator -} - -// NewV1Alpha0LocatorAdapter creates a new V1Alpha0LocatorAdapter. -func NewV1Alpha0LocatorAdapter(locator *typesv1alpha0.Locator) *V1Alpha0LocatorAdapter { - return &V1Alpha0LocatorAdapter{locator: locator} -} - -// GetAnnotations implements types.Locator interface. -func (l *V1Alpha0LocatorAdapter) GetAnnotations() map[string]string { - if l.locator == nil { - return nil - } - - return l.locator.GetAnnotations() -} - -// GetType implements types.Locator interface. -func (l *V1Alpha0LocatorAdapter) GetType() string { - if l.locator == nil { - return "" - } - - return l.locator.GetType() -} - -// GetURL implements types.Locator interface. -func (l *V1Alpha0LocatorAdapter) GetURL() string { - if l.locator == nil { - return "" - } - - return l.locator.GetUrl() -} - -// GetSize implements types.Locator interface. -func (l *V1Alpha0LocatorAdapter) GetSize() uint64 { - if l.locator == nil { - return 0 - } - - return l.locator.GetSize() -} - -// GetDigest implements types.Locator interface. -func (l *V1Alpha0LocatorAdapter) GetDigest() string { - if l.locator == nil { - return "" - } - - return l.locator.GetDigest() -} - -// V1Alpha0ModuleAdapter adapts typesv1alpha0.Extension to types.Module interface. -type V1Alpha0ModuleAdapter struct { - extension *typesv1alpha0.Extension -} - -// NewV1Alpha0ModuleAdapter creates a new V1Alpha0ModuleAdapter. -func NewV1Alpha0ModuleAdapter(extension *typesv1alpha0.Extension) *V1Alpha0ModuleAdapter { - return &V1Alpha0ModuleAdapter{extension: extension} -} - -// GetName implements types.Module interface. -func (m *V1Alpha0ModuleAdapter) GetName() string { - if m.extension == nil { - return "" - } - - return m.extension.GetName() -} - -// GetData implements types.Module interface. -func (m *V1Alpha0ModuleAdapter) GetData() map[string]any { - if m.extension == nil || m.extension.GetData() == nil { - return nil - } - - resp, err := decoder.ProtoToStruct[map[string]any](m.extension.GetData()) - if err != nil { - return nil - } - - return *resp -} - -// GetID implements types.Module interface. -// V1Alpha0 uses extensions which don't have IDs, so return 0. -func (m *V1Alpha0ModuleAdapter) GetID() uint64 { - return 0 -} - -// GetSkillLabels implements types.LabelProvider interface. -func (a *V1Alpha0Adapter) GetSkillLabels() []types.Label { - if a.record == nil { - return nil - } - - skills := a.record.GetSkills() - result := make([]types.Label, 0, len(skills)) - - for _, skill := range skills { - // Reuse the existing skill adapter logic for name formatting - skillAdapter := NewV1Alpha0SkillAdapter(skill) - skillName := skillAdapter.GetName() - - skillLabel := types.Label(types.LabelTypeSkill.Prefix() + skillName) - result = append(result, skillLabel) - } - - return result -} - -// GetLocatorLabels implements types.LabelProvider interface. -func (a *V1Alpha0Adapter) GetLocatorLabels() []types.Label { - if a.record == nil { - return nil - } - - locators := a.record.GetLocators() - result := make([]types.Label, 0, len(locators)) - - for _, locator := range locators { - locatorAdapter := NewV1Alpha0LocatorAdapter(locator) - locatorType := locatorAdapter.GetType() - - locatorLabel := types.Label(types.LabelTypeLocator.Prefix() + locatorType) - result = append(result, locatorLabel) - } - - return result -} - -// GetDomainLabels implements types.LabelProvider interface. -func (a *V1Alpha0Adapter) GetDomainLabels() []types.Label { - // V1Alpha0 doesn't have domains, return empty slice - return []types.Label{} -} - -// GetModuleLabels implements types.LabelProvider interface. -func (a *V1Alpha0Adapter) GetModuleLabels() []types.Label { - if a.record == nil { - return nil - } - - extensions := a.record.GetExtensions() - result := make([]types.Label, 0, len(extensions)) - - for _, ext := range extensions { - extensionName := ext.GetName() - - // Handle v0.3.1 schema prefix for features - now map to modules - name := strings.TrimPrefix(extensionName, featuresSchemaPrefix) - moduleLabel := types.Label(types.LabelTypeModule.Prefix() + name) - result = append(result, moduleLabel) - } - - return result -} - -// GetAllLabels implements types.LabelProvider interface. -func (a *V1Alpha0Adapter) GetAllLabels() []types.Label { - var allLabels []types.Label - - allLabels = append(allLabels, a.GetSkillLabels()...) - allLabels = append(allLabels, a.GetDomainLabels()...) - allLabels = append(allLabels, a.GetModuleLabels()...) - allLabels = append(allLabels, a.GetLocatorLabels()...) - - return allLabels -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package adapters + +import ( + "fmt" + "strings" + + typesv1alpha0 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha0" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/oasf-sdk/pkg/decoder" +) + +const featuresSchemaPrefix = "schema.oasf.agntcy.org/features/" + +// V1Alpha0Adapter adapts typesv1alpha0.Record to types.RecordData interface. +type V1Alpha0Adapter struct { + record *typesv1alpha0.Record +} + +// Compile-time interface checks. +var ( + _ types.RecordData = (*V1Alpha0Adapter)(nil) + _ types.LabelProvider = (*V1Alpha0Adapter)(nil) +) + +// NewV1Alpha0Adapter creates a new V1Alpha0Adapter. +func NewV1Alpha0Adapter(record *typesv1alpha0.Record) *V1Alpha0Adapter { + return &V1Alpha0Adapter{record: record} +} + +// GetAnnotations implements types.RecordData interface. +func (a *V1Alpha0Adapter) GetAnnotations() map[string]string { + if a.record == nil { + return nil + } + + return a.record.GetAnnotations() +} + +// GetSchemaVersion implements types.RecordData interface. +func (a *V1Alpha0Adapter) GetSchemaVersion() string { + if a.record == nil { + return "" + } + + return a.record.GetSchemaVersion() +} + +// GetDomains implements types.RecordData. +// +// NOTE: V1Alpha0 doesn't have domains, so we return an empty slice. +func (a *V1Alpha0Adapter) GetDomains() []types.Domain { + return []types.Domain{} +} + +// GetName implements types.RecordData interface. +func (a *V1Alpha0Adapter) GetName() string { + if a.record == nil { + return "" + } + + return a.record.GetName() +} + +// GetVersion implements types.RecordData interface. +func (a *V1Alpha0Adapter) GetVersion() string { + if a.record == nil { + return "" + } + + return a.record.GetVersion() +} + +// GetDescription implements types.RecordData interface. +func (a *V1Alpha0Adapter) GetDescription() string { + if a.record == nil { + return "" + } + + return a.record.GetDescription() +} + +// GetAuthors implements types.RecordData interface. +func (a *V1Alpha0Adapter) GetAuthors() []string { + if a.record == nil { + return nil + } + + return a.record.GetAuthors() +} + +// GetCreatedAt implements types.RecordData interface. +func (a *V1Alpha0Adapter) GetCreatedAt() string { + if a.record == nil { + return "" + } + + return a.record.GetCreatedAt() +} + +// GetSkills implements types.RecordData interface. +func (a *V1Alpha0Adapter) GetSkills() []types.Skill { + if a.record == nil { + return nil + } + + skills := a.record.GetSkills() + result := make([]types.Skill, len(skills)) + + for i, skill := range skills { + result[i] = NewV1Alpha0SkillAdapter(skill) + } + + return result +} + +// GetLocators implements types.RecordData interface. +func (a *V1Alpha0Adapter) GetLocators() []types.Locator { + if a.record == nil { + return nil + } + + locators := a.record.GetLocators() + result := make([]types.Locator, len(locators)) + + for i, locator := range locators { + result[i] = NewV1Alpha0LocatorAdapter(locator) + } + + return result +} + +// GetModules implements types.RecordData interface. +func (a *V1Alpha0Adapter) GetModules() []types.Module { + if a.record == nil { + return nil + } + + extensions := a.record.GetExtensions() + result := make([]types.Module, len(extensions)) + + for i, extension := range extensions { + result[i] = NewV1Alpha0ModuleAdapter(extension) + } + + return result +} + +// GetSignature implements types.RecordData interface. +func (a *V1Alpha0Adapter) GetSignature() types.Signature { + if a.record == nil || a.record.GetSignature() == nil { + return nil + } + + return NewV1Alpha0SignatureAdapter(a.record.GetSignature()) +} + +// GetPreviousRecordCid implements types.RecordData interface. +func (a *V1Alpha0Adapter) GetPreviousRecordCid() string { + // V1 doesn't have previous record CID + return "" +} + +// V1Alpha0SignatureAdapter adapts typesv1alpha0.Signature to types.Signature interface. +type V1Alpha0SignatureAdapter struct { + signature *typesv1alpha0.Signature +} + +// NewV1Alpha0SignatureAdapter creates a new V1Alpha0SignatureAdapter. +func NewV1Alpha0SignatureAdapter(signature *typesv1alpha0.Signature) *V1Alpha0SignatureAdapter { + return &V1Alpha0SignatureAdapter{signature: signature} +} + +// GetAnnotations implements types.Signature interface. +func (s *V1Alpha0SignatureAdapter) GetAnnotations() map[string]string { + // V1 signature doesn't have annotations + return nil +} + +// GetSignedAt implements types.Signature interface. +func (s *V1Alpha0SignatureAdapter) GetSignedAt() string { + if s.signature == nil { + return "" + } + + return s.signature.GetSignedAt() +} + +// GetAlgorithm implements types.Signature interface. +func (s *V1Alpha0SignatureAdapter) GetAlgorithm() string { + if s.signature == nil { + return "" + } + + return s.signature.GetAlgorithm() +} + +// GetSignature implements types.Signature interface. +func (s *V1Alpha0SignatureAdapter) GetSignature() string { + if s.signature == nil { + return "" + } + + return s.signature.GetSignature() +} + +// GetCertificate implements types.Signature interface. +func (s *V1Alpha0SignatureAdapter) GetCertificate() string { + if s.signature == nil { + return "" + } + + return s.signature.GetCertificate() +} + +// GetContentType implements types.Signature interface. +func (s *V1Alpha0SignatureAdapter) GetContentType() string { + if s.signature == nil { + return "" + } + + return s.signature.GetContentType() +} + +// GetContentBundle implements types.Signature interface. +func (s *V1Alpha0SignatureAdapter) GetContentBundle() string { + if s.signature == nil { + return "" + } + + return s.signature.GetContentBundle() +} + +// V1Alpha0SkillAdapter adapts typesv1alpha0.Skill to types.Skill interface. +type V1Alpha0SkillAdapter struct { + skill *typesv1alpha0.Skill +} + +// NewV1Alpha0SkillAdapter creates a new V1Alpha0SkillAdapter. +func NewV1Alpha0SkillAdapter(skill *typesv1alpha0.Skill) *V1Alpha0SkillAdapter { + return &V1Alpha0SkillAdapter{skill: skill} +} + +// GetAnnotations implements types.Skill interface. +func (s *V1Alpha0SkillAdapter) GetAnnotations() map[string]string { + if s.skill == nil { + return nil + } + + return s.skill.GetAnnotations() +} + +// GetName implements types.Skill interface. +func (s *V1Alpha0SkillAdapter) GetName() string { + if s.skill == nil { + return "" + } + + if s.skill.GetClassName() == "" { + return s.skill.GetCategoryName() + } + + return fmt.Sprintf("%s/%s", s.skill.GetCategoryName(), s.skill.GetClassName()) +} + +// GetID implements types.Skill interface. +func (s *V1Alpha0SkillAdapter) GetID() uint64 { + if s.skill == nil { + return 0 + } + + return s.skill.GetClassUid() +} + +// V1Alpha0LocatorAdapter adapts typesv1alpha0.Locator to types.Locator interface. +type V1Alpha0LocatorAdapter struct { + locator *typesv1alpha0.Locator +} + +// NewV1Alpha0LocatorAdapter creates a new V1Alpha0LocatorAdapter. +func NewV1Alpha0LocatorAdapter(locator *typesv1alpha0.Locator) *V1Alpha0LocatorAdapter { + return &V1Alpha0LocatorAdapter{locator: locator} +} + +// GetAnnotations implements types.Locator interface. +func (l *V1Alpha0LocatorAdapter) GetAnnotations() map[string]string { + if l.locator == nil { + return nil + } + + return l.locator.GetAnnotations() +} + +// GetType implements types.Locator interface. +func (l *V1Alpha0LocatorAdapter) GetType() string { + if l.locator == nil { + return "" + } + + return l.locator.GetType() +} + +// GetURL implements types.Locator interface. +func (l *V1Alpha0LocatorAdapter) GetURL() string { + if l.locator == nil { + return "" + } + + return l.locator.GetUrl() +} + +// GetSize implements types.Locator interface. +func (l *V1Alpha0LocatorAdapter) GetSize() uint64 { + if l.locator == nil { + return 0 + } + + return l.locator.GetSize() +} + +// GetDigest implements types.Locator interface. +func (l *V1Alpha0LocatorAdapter) GetDigest() string { + if l.locator == nil { + return "" + } + + return l.locator.GetDigest() +} + +// V1Alpha0ModuleAdapter adapts typesv1alpha0.Extension to types.Module interface. +type V1Alpha0ModuleAdapter struct { + extension *typesv1alpha0.Extension +} + +// NewV1Alpha0ModuleAdapter creates a new V1Alpha0ModuleAdapter. +func NewV1Alpha0ModuleAdapter(extension *typesv1alpha0.Extension) *V1Alpha0ModuleAdapter { + return &V1Alpha0ModuleAdapter{extension: extension} +} + +// GetName implements types.Module interface. +func (m *V1Alpha0ModuleAdapter) GetName() string { + if m.extension == nil { + return "" + } + + return m.extension.GetName() +} + +// GetData implements types.Module interface. +func (m *V1Alpha0ModuleAdapter) GetData() map[string]any { + if m.extension == nil || m.extension.GetData() == nil { + return nil + } + + resp, err := decoder.ProtoToStruct[map[string]any](m.extension.GetData()) + if err != nil { + return nil + } + + return *resp +} + +// GetID implements types.Module interface. +// V1Alpha0 uses extensions which don't have IDs, so return 0. +func (m *V1Alpha0ModuleAdapter) GetID() uint64 { + return 0 +} + +// GetSkillLabels implements types.LabelProvider interface. +func (a *V1Alpha0Adapter) GetSkillLabels() []types.Label { + if a.record == nil { + return nil + } + + skills := a.record.GetSkills() + result := make([]types.Label, 0, len(skills)) + + for _, skill := range skills { + // Reuse the existing skill adapter logic for name formatting + skillAdapter := NewV1Alpha0SkillAdapter(skill) + skillName := skillAdapter.GetName() + + skillLabel := types.Label(types.LabelTypeSkill.Prefix() + skillName) + result = append(result, skillLabel) + } + + return result +} + +// GetLocatorLabels implements types.LabelProvider interface. +func (a *V1Alpha0Adapter) GetLocatorLabels() []types.Label { + if a.record == nil { + return nil + } + + locators := a.record.GetLocators() + result := make([]types.Label, 0, len(locators)) + + for _, locator := range locators { + locatorAdapter := NewV1Alpha0LocatorAdapter(locator) + locatorType := locatorAdapter.GetType() + + locatorLabel := types.Label(types.LabelTypeLocator.Prefix() + locatorType) + result = append(result, locatorLabel) + } + + return result +} + +// GetDomainLabels implements types.LabelProvider interface. +func (a *V1Alpha0Adapter) GetDomainLabels() []types.Label { + // V1Alpha0 doesn't have domains, return empty slice + return []types.Label{} +} + +// GetModuleLabels implements types.LabelProvider interface. +func (a *V1Alpha0Adapter) GetModuleLabels() []types.Label { + if a.record == nil { + return nil + } + + extensions := a.record.GetExtensions() + result := make([]types.Label, 0, len(extensions)) + + for _, ext := range extensions { + extensionName := ext.GetName() + + // Handle v0.3.1 schema prefix for features - now map to modules + name := strings.TrimPrefix(extensionName, featuresSchemaPrefix) + moduleLabel := types.Label(types.LabelTypeModule.Prefix() + name) + result = append(result, moduleLabel) + } + + return result +} + +// GetAllLabels implements types.LabelProvider interface. +func (a *V1Alpha0Adapter) GetAllLabels() []types.Label { + var allLabels []types.Label + + allLabels = append(allLabels, a.GetSkillLabels()...) + allLabels = append(allLabels, a.GetDomainLabels()...) + allLabels = append(allLabels, a.GetModuleLabels()...) + allLabels = append(allLabels, a.GetLocatorLabels()...) + + return allLabels +} diff --git a/server/types/adapters/oasf_v1alpha1.go b/server/types/adapters/oasf_v1alpha1.go index ea5132d0e..52510bd4b 100644 --- a/server/types/adapters/oasf_v1alpha1.go +++ b/server/types/adapters/oasf_v1alpha1.go @@ -1,513 +1,513 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package adapters - -import ( - typesv1alpha1 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha1" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/oasf-sdk/pkg/decoder" -) - -// V1Alpha1Adapter adapts typesv1alpha1.Record to types.RecordData interface. -type V1Alpha1Adapter struct { - record *typesv1alpha1.Record -} - -// Compile-time interface checks. -var ( - _ types.RecordData = (*V1Alpha1Adapter)(nil) - _ types.LabelProvider = (*V1Alpha1Adapter)(nil) -) - -// NewV1Alpha1Adapter creates a new V1Alpha1Adapter. -func NewV1Alpha1Adapter(record *typesv1alpha1.Record) *V1Alpha1Adapter { - return &V1Alpha1Adapter{record: record} -} - -// GetAnnotations implements types.RecordData interface. -func (a *V1Alpha1Adapter) GetAnnotations() map[string]string { - if a.record == nil { - return nil - } - - return a.record.GetAnnotations() -} - -// GetSchemaVersion implements types.RecordData interface. -func (a *V1Alpha1Adapter) GetSchemaVersion() string { - if a.record == nil { - return "" - } - - return a.record.GetSchemaVersion() -} - -// GetName implements types.RecordData interface. -func (a *V1Alpha1Adapter) GetName() string { - if a.record == nil { - return "" - } - - return a.record.GetName() -} - -// GetVersion implements types.RecordData interface. -func (a *V1Alpha1Adapter) GetVersion() string { - if a.record == nil { - return "" - } - - return a.record.GetVersion() -} - -// GetDescription implements types.RecordData interface. -func (a *V1Alpha1Adapter) GetDescription() string { - if a.record == nil { - return "" - } - - return a.record.GetDescription() -} - -// GetAuthors implements types.RecordData interface. -func (a *V1Alpha1Adapter) GetAuthors() []string { - if a.record == nil { - return nil - } - - return a.record.GetAuthors() -} - -// GetCreatedAt implements types.RecordData interface. -func (a *V1Alpha1Adapter) GetCreatedAt() string { - if a.record == nil { - return "" - } - - return a.record.GetCreatedAt() -} - -// GetSkills implements types.RecordData interface. -func (a *V1Alpha1Adapter) GetSkills() []types.Skill { - if a.record == nil { - return nil - } - - skills := a.record.GetSkills() - result := make([]types.Skill, len(skills)) - - for i, skill := range skills { - result[i] = NewV1Alpha1SkillAdapter(skill) - } - - return result -} - -// GetDomains implements types.RecordData. -func (a *V1Alpha1Adapter) GetDomains() []types.Domain { - if a.record == nil { - return nil - } - - domains := a.record.GetDomains() - result := make([]types.Domain, len(domains)) - - for i, domain := range domains { - result[i] = NewV1Alpha1DomainAdapter(domain) - } - - return result -} - -// GetLocators implements types.RecordData interface. -func (a *V1Alpha1Adapter) GetLocators() []types.Locator { - if a.record == nil { - return nil - } - - locators := a.record.GetLocators() - result := make([]types.Locator, len(locators)) - - for i, locator := range locators { - result[i] = NewV1Alpha1LocatorAdapter(locator) - } - - return result -} - -// GetModules implements types.RecordData interface. -func (a *V1Alpha1Adapter) GetModules() []types.Module { - if a.record == nil { - return nil - } - - modules := a.record.GetModules() - result := make([]types.Module, len(modules)) - - for i, module := range modules { - result[i] = NewV1Alpha1ModuleAdapter(module) - } - - return result -} - -// GetSignature implements types.RecordData interface. -func (a *V1Alpha1Adapter) GetSignature() types.Signature { - if a.record == nil || a.record.GetSignature() == nil { - return nil - } - - return NewV1Alpha1SignatureAdapter(a.record.GetSignature()) -} - -// GetPreviousRecordCid implements types.RecordData interface. -func (a *V1Alpha1Adapter) GetPreviousRecordCid() string { - if a.record == nil { - return "" - } - - return a.record.GetPreviousRecordCid() -} - -// V1Alpha1SignatureAdapter adapts typesv1alpha1.Signature to types.Signature interface. -type V1Alpha1SignatureAdapter struct { - signature *typesv1alpha1.Signature -} - -// NewV1Alpha1SignatureAdapter creates a new V1Alpha1SignatureAdapter. -func NewV1Alpha1SignatureAdapter(signature *typesv1alpha1.Signature) *V1Alpha1SignatureAdapter { - return &V1Alpha1SignatureAdapter{signature: signature} -} - -// GetAnnotations implements types.Signature interface. -func (s *V1Alpha1SignatureAdapter) GetAnnotations() map[string]string { - if s.signature == nil { - return nil - } - - return s.signature.GetAnnotations() -} - -// GetSignedAt implements types.Signature interface. -func (s *V1Alpha1SignatureAdapter) GetSignedAt() string { - if s.signature == nil { - return "" - } - - return s.signature.GetSignedAt() -} - -// GetAlgorithm implements types.Signature interface. -func (s *V1Alpha1SignatureAdapter) GetAlgorithm() string { - if s.signature == nil { - return "" - } - - return s.signature.GetAlgorithm() -} - -// GetSignature implements types.Signature interface. -func (s *V1Alpha1SignatureAdapter) GetSignature() string { - if s.signature == nil { - return "" - } - - return s.signature.GetSignature() -} - -// GetCertificate implements types.Signature interface. -func (s *V1Alpha1SignatureAdapter) GetCertificate() string { - if s.signature == nil { - return "" - } - - return s.signature.GetCertificate() -} - -// GetContentType implements types.Signature interface. -func (s *V1Alpha1SignatureAdapter) GetContentType() string { - if s.signature == nil { - return "" - } - - return s.signature.GetContentType() -} - -// GetContentBundle implements types.Signature interface. -func (s *V1Alpha1SignatureAdapter) GetContentBundle() string { - if s.signature == nil { - return "" - } - - return s.signature.GetContentBundle() -} - -// V1Alpha1SkillAdapter adapts typesv1alpha1.Skill to types.Skill interface. -type V1Alpha1SkillAdapter struct { - skill *typesv1alpha1.Skill -} - -// NewV1Alpha1SkillAdapter creates a new V1Alpha1SkillAdapter. -func NewV1Alpha1SkillAdapter(skill *typesv1alpha1.Skill) *V1Alpha1SkillAdapter { - return &V1Alpha1SkillAdapter{skill: skill} -} - -// GetAnnotations implements types.Skill interface. -func (s *V1Alpha1SkillAdapter) GetAnnotations() map[string]string { - if s.skill == nil { - return nil - } - - return s.skill.GetAnnotations() -} - -// GetName implements types.Skill interface. -func (s *V1Alpha1SkillAdapter) GetName() string { - if s.skill == nil { - return "" - } - - return s.skill.GetName() -} - -// GetID implements types.Skill interface. -func (s *V1Alpha1SkillAdapter) GetID() uint64 { - if s.skill == nil { - return 0 - } - - return uint64(s.skill.GetId()) -} - -// V1Alpha1LocatorAdapter adapts typesv1alpha1.Locator to types.Locator interface. -type V1Alpha1LocatorAdapter struct { - locator *typesv1alpha1.Locator -} - -// NewV1Alpha1LocatorAdapter creates a new V1Alpha1LocatorAdapter. -func NewV1Alpha1LocatorAdapter(locator *typesv1alpha1.Locator) *V1Alpha1LocatorAdapter { - return &V1Alpha1LocatorAdapter{locator: locator} -} - -// GetAnnotations implements types.Locator interface. -func (l *V1Alpha1LocatorAdapter) GetAnnotations() map[string]string { - if l.locator == nil { - return nil - } - - return l.locator.GetAnnotations() -} - -// GetType implements types.Locator interface. -func (l *V1Alpha1LocatorAdapter) GetType() string { - if l.locator == nil { - return "" - } - - return l.locator.GetType() -} - -// GetURL implements types.Locator interface. -func (l *V1Alpha1LocatorAdapter) GetURL() string { - if l.locator == nil { - return "" - } - - return l.locator.GetUrl() -} - -// GetSize implements types.Locator interface. -func (l *V1Alpha1LocatorAdapter) GetSize() uint64 { - if l.locator == nil { - return 0 - } - - return l.locator.GetSize() -} - -// GetDigest implements types.Locator interface. -func (l *V1Alpha1LocatorAdapter) GetDigest() string { - if l.locator == nil { - return "" - } - - return l.locator.GetDigest() -} - -// V1Alpha1SkillAdapter adapts typesv1alpha1.Skill to types.Skill interface. -type V1Alpha1DomainAdapter struct { - domain *typesv1alpha1.Domain -} - -// NewV1Alpha1DomainAdapter creates a new V1Alpha1DomainAdapter. -func NewV1Alpha1DomainAdapter(domain *typesv1alpha1.Domain) *V1Alpha1DomainAdapter { - if domain == nil { - return nil - } - - return &V1Alpha1DomainAdapter{domain: domain} -} - -// GetAnnotations implements types.Domain interface. -func (d *V1Alpha1DomainAdapter) GetAnnotations() map[string]string { - if d.domain == nil { - return nil - } - - return d.domain.GetAnnotations() -} - -// GetName implements types.Domain interface. -func (d *V1Alpha1DomainAdapter) GetName() string { - if d.domain == nil { - return "" - } - - return d.domain.GetName() -} - -// GetID implements types.Domain interface. -func (d *V1Alpha1DomainAdapter) GetID() uint64 { - if d.domain == nil { - return 0 - } - - return uint64(d.domain.GetId()) -} - -// V1Alpha1ModuleAdapter adapts typesv1alpha1.Module to types.Module interface. -type V1Alpha1ModuleAdapter struct { - module *typesv1alpha1.Module -} - -// NewV1Alpha1ModuleAdapter creates a new V1Alpha1ModuleAdapter. -func NewV1Alpha1ModuleAdapter(module *typesv1alpha1.Module) *V1Alpha1ModuleAdapter { - return &V1Alpha1ModuleAdapter{module: module} -} - -// GetName implements types.Module interface. -func (m *V1Alpha1ModuleAdapter) GetName() string { - if m.module == nil { - return "" - } - - return m.module.GetName() -} - -// GetData implements types.Module interface. -func (m *V1Alpha1ModuleAdapter) GetData() map[string]any { - if m.module == nil || m.module.GetData() == nil { - return nil - } - - resp, err := decoder.ProtoToStruct[map[string]any](m.module.GetData()) - if err != nil { - return nil - } - - return *resp -} - -// GetID implements types.Module interface. -func (m *V1Alpha1ModuleAdapter) GetID() uint64 { - if m.module == nil { - return 0 - } - - return uint64(m.module.GetId()) -} - -// GetSkillLabels implements types.LabelProvider interface. -func (a *V1Alpha1Adapter) GetSkillLabels() []types.Label { - if a.record == nil { - return nil - } - - skills := a.record.GetSkills() - result := make([]types.Label, 0, len(skills)) - - for _, skill := range skills { - // Reuse the existing skill adapter logic for name formatting - skillAdapter := NewV1Alpha1SkillAdapter(skill) - skillName := skillAdapter.GetName() - - skillLabel := types.Label(types.LabelTypeSkill.Prefix() + skillName) - result = append(result, skillLabel) - } - - return result -} - -// GetLocatorLabels implements types.LabelProvider interface. -func (a *V1Alpha1Adapter) GetLocatorLabels() []types.Label { - if a.record == nil { - return nil - } - - locators := a.record.GetLocators() - result := make([]types.Label, 0, len(locators)) - - for _, locator := range locators { - locatorAdapter := NewV1Alpha1LocatorAdapter(locator) - locatorType := locatorAdapter.GetType() - - locatorLabel := types.Label(types.LabelTypeLocator.Prefix() + locatorType) - result = append(result, locatorLabel) - } - - return result -} - -// GetDomainLabels implements types.LabelProvider interface. -func (a *V1Alpha1Adapter) GetDomainLabels() []types.Label { - if a.record == nil { - return nil - } - - domains := a.record.GetDomains() - result := make([]types.Label, 0, len(domains)) - - for _, domain := range domains { - domainAdapter := NewV1Alpha1DomainAdapter(domain) - domainName := domainAdapter.GetName() - - domainLabel := types.Label(types.LabelTypeDomain.Prefix() + domainName) - result = append(result, domainLabel) - } - - return result -} - -// GetModuleLabels implements types.LabelProvider interface. -func (a *V1Alpha1Adapter) GetModuleLabels() []types.Label { - if a.record == nil { - return nil - } - - modules := a.record.GetModules() - result := make([]types.Label, 0, len(modules)) - - for _, mod := range modules { - moduleAdapter := NewV1Alpha1ModuleAdapter(mod) - moduleName := moduleAdapter.GetName() - - // V1Alpha1 modules don't have schema prefixes, use name directly with /modules prefix - moduleLabel := types.Label(types.LabelTypeModule.Prefix() + moduleName) - result = append(result, moduleLabel) - } - - return result -} - -// GetAllLabels implements types.LabelProvider interface. -func (a *V1Alpha1Adapter) GetAllLabels() []types.Label { - var allLabels []types.Label - - allLabels = append(allLabels, a.GetSkillLabels()...) - allLabels = append(allLabels, a.GetDomainLabels()...) - allLabels = append(allLabels, a.GetModuleLabels()...) - allLabels = append(allLabels, a.GetLocatorLabels()...) - - return allLabels -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package adapters + +import ( + typesv1alpha1 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha1" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/oasf-sdk/pkg/decoder" +) + +// V1Alpha1Adapter adapts typesv1alpha1.Record to types.RecordData interface. +type V1Alpha1Adapter struct { + record *typesv1alpha1.Record +} + +// Compile-time interface checks. +var ( + _ types.RecordData = (*V1Alpha1Adapter)(nil) + _ types.LabelProvider = (*V1Alpha1Adapter)(nil) +) + +// NewV1Alpha1Adapter creates a new V1Alpha1Adapter. +func NewV1Alpha1Adapter(record *typesv1alpha1.Record) *V1Alpha1Adapter { + return &V1Alpha1Adapter{record: record} +} + +// GetAnnotations implements types.RecordData interface. +func (a *V1Alpha1Adapter) GetAnnotations() map[string]string { + if a.record == nil { + return nil + } + + return a.record.GetAnnotations() +} + +// GetSchemaVersion implements types.RecordData interface. +func (a *V1Alpha1Adapter) GetSchemaVersion() string { + if a.record == nil { + return "" + } + + return a.record.GetSchemaVersion() +} + +// GetName implements types.RecordData interface. +func (a *V1Alpha1Adapter) GetName() string { + if a.record == nil { + return "" + } + + return a.record.GetName() +} + +// GetVersion implements types.RecordData interface. +func (a *V1Alpha1Adapter) GetVersion() string { + if a.record == nil { + return "" + } + + return a.record.GetVersion() +} + +// GetDescription implements types.RecordData interface. +func (a *V1Alpha1Adapter) GetDescription() string { + if a.record == nil { + return "" + } + + return a.record.GetDescription() +} + +// GetAuthors implements types.RecordData interface. +func (a *V1Alpha1Adapter) GetAuthors() []string { + if a.record == nil { + return nil + } + + return a.record.GetAuthors() +} + +// GetCreatedAt implements types.RecordData interface. +func (a *V1Alpha1Adapter) GetCreatedAt() string { + if a.record == nil { + return "" + } + + return a.record.GetCreatedAt() +} + +// GetSkills implements types.RecordData interface. +func (a *V1Alpha1Adapter) GetSkills() []types.Skill { + if a.record == nil { + return nil + } + + skills := a.record.GetSkills() + result := make([]types.Skill, len(skills)) + + for i, skill := range skills { + result[i] = NewV1Alpha1SkillAdapter(skill) + } + + return result +} + +// GetDomains implements types.RecordData. +func (a *V1Alpha1Adapter) GetDomains() []types.Domain { + if a.record == nil { + return nil + } + + domains := a.record.GetDomains() + result := make([]types.Domain, len(domains)) + + for i, domain := range domains { + result[i] = NewV1Alpha1DomainAdapter(domain) + } + + return result +} + +// GetLocators implements types.RecordData interface. +func (a *V1Alpha1Adapter) GetLocators() []types.Locator { + if a.record == nil { + return nil + } + + locators := a.record.GetLocators() + result := make([]types.Locator, len(locators)) + + for i, locator := range locators { + result[i] = NewV1Alpha1LocatorAdapter(locator) + } + + return result +} + +// GetModules implements types.RecordData interface. +func (a *V1Alpha1Adapter) GetModules() []types.Module { + if a.record == nil { + return nil + } + + modules := a.record.GetModules() + result := make([]types.Module, len(modules)) + + for i, module := range modules { + result[i] = NewV1Alpha1ModuleAdapter(module) + } + + return result +} + +// GetSignature implements types.RecordData interface. +func (a *V1Alpha1Adapter) GetSignature() types.Signature { + if a.record == nil || a.record.GetSignature() == nil { + return nil + } + + return NewV1Alpha1SignatureAdapter(a.record.GetSignature()) +} + +// GetPreviousRecordCid implements types.RecordData interface. +func (a *V1Alpha1Adapter) GetPreviousRecordCid() string { + if a.record == nil { + return "" + } + + return a.record.GetPreviousRecordCid() +} + +// V1Alpha1SignatureAdapter adapts typesv1alpha1.Signature to types.Signature interface. +type V1Alpha1SignatureAdapter struct { + signature *typesv1alpha1.Signature +} + +// NewV1Alpha1SignatureAdapter creates a new V1Alpha1SignatureAdapter. +func NewV1Alpha1SignatureAdapter(signature *typesv1alpha1.Signature) *V1Alpha1SignatureAdapter { + return &V1Alpha1SignatureAdapter{signature: signature} +} + +// GetAnnotations implements types.Signature interface. +func (s *V1Alpha1SignatureAdapter) GetAnnotations() map[string]string { + if s.signature == nil { + return nil + } + + return s.signature.GetAnnotations() +} + +// GetSignedAt implements types.Signature interface. +func (s *V1Alpha1SignatureAdapter) GetSignedAt() string { + if s.signature == nil { + return "" + } + + return s.signature.GetSignedAt() +} + +// GetAlgorithm implements types.Signature interface. +func (s *V1Alpha1SignatureAdapter) GetAlgorithm() string { + if s.signature == nil { + return "" + } + + return s.signature.GetAlgorithm() +} + +// GetSignature implements types.Signature interface. +func (s *V1Alpha1SignatureAdapter) GetSignature() string { + if s.signature == nil { + return "" + } + + return s.signature.GetSignature() +} + +// GetCertificate implements types.Signature interface. +func (s *V1Alpha1SignatureAdapter) GetCertificate() string { + if s.signature == nil { + return "" + } + + return s.signature.GetCertificate() +} + +// GetContentType implements types.Signature interface. +func (s *V1Alpha1SignatureAdapter) GetContentType() string { + if s.signature == nil { + return "" + } + + return s.signature.GetContentType() +} + +// GetContentBundle implements types.Signature interface. +func (s *V1Alpha1SignatureAdapter) GetContentBundle() string { + if s.signature == nil { + return "" + } + + return s.signature.GetContentBundle() +} + +// V1Alpha1SkillAdapter adapts typesv1alpha1.Skill to types.Skill interface. +type V1Alpha1SkillAdapter struct { + skill *typesv1alpha1.Skill +} + +// NewV1Alpha1SkillAdapter creates a new V1Alpha1SkillAdapter. +func NewV1Alpha1SkillAdapter(skill *typesv1alpha1.Skill) *V1Alpha1SkillAdapter { + return &V1Alpha1SkillAdapter{skill: skill} +} + +// GetAnnotations implements types.Skill interface. +func (s *V1Alpha1SkillAdapter) GetAnnotations() map[string]string { + if s.skill == nil { + return nil + } + + return s.skill.GetAnnotations() +} + +// GetName implements types.Skill interface. +func (s *V1Alpha1SkillAdapter) GetName() string { + if s.skill == nil { + return "" + } + + return s.skill.GetName() +} + +// GetID implements types.Skill interface. +func (s *V1Alpha1SkillAdapter) GetID() uint64 { + if s.skill == nil { + return 0 + } + + return uint64(s.skill.GetId()) +} + +// V1Alpha1LocatorAdapter adapts typesv1alpha1.Locator to types.Locator interface. +type V1Alpha1LocatorAdapter struct { + locator *typesv1alpha1.Locator +} + +// NewV1Alpha1LocatorAdapter creates a new V1Alpha1LocatorAdapter. +func NewV1Alpha1LocatorAdapter(locator *typesv1alpha1.Locator) *V1Alpha1LocatorAdapter { + return &V1Alpha1LocatorAdapter{locator: locator} +} + +// GetAnnotations implements types.Locator interface. +func (l *V1Alpha1LocatorAdapter) GetAnnotations() map[string]string { + if l.locator == nil { + return nil + } + + return l.locator.GetAnnotations() +} + +// GetType implements types.Locator interface. +func (l *V1Alpha1LocatorAdapter) GetType() string { + if l.locator == nil { + return "" + } + + return l.locator.GetType() +} + +// GetURL implements types.Locator interface. +func (l *V1Alpha1LocatorAdapter) GetURL() string { + if l.locator == nil { + return "" + } + + return l.locator.GetUrl() +} + +// GetSize implements types.Locator interface. +func (l *V1Alpha1LocatorAdapter) GetSize() uint64 { + if l.locator == nil { + return 0 + } + + return l.locator.GetSize() +} + +// GetDigest implements types.Locator interface. +func (l *V1Alpha1LocatorAdapter) GetDigest() string { + if l.locator == nil { + return "" + } + + return l.locator.GetDigest() +} + +// V1Alpha1SkillAdapter adapts typesv1alpha1.Skill to types.Skill interface. +type V1Alpha1DomainAdapter struct { + domain *typesv1alpha1.Domain +} + +// NewV1Alpha1DomainAdapter creates a new V1Alpha1DomainAdapter. +func NewV1Alpha1DomainAdapter(domain *typesv1alpha1.Domain) *V1Alpha1DomainAdapter { + if domain == nil { + return nil + } + + return &V1Alpha1DomainAdapter{domain: domain} +} + +// GetAnnotations implements types.Domain interface. +func (d *V1Alpha1DomainAdapter) GetAnnotations() map[string]string { + if d.domain == nil { + return nil + } + + return d.domain.GetAnnotations() +} + +// GetName implements types.Domain interface. +func (d *V1Alpha1DomainAdapter) GetName() string { + if d.domain == nil { + return "" + } + + return d.domain.GetName() +} + +// GetID implements types.Domain interface. +func (d *V1Alpha1DomainAdapter) GetID() uint64 { + if d.domain == nil { + return 0 + } + + return uint64(d.domain.GetId()) +} + +// V1Alpha1ModuleAdapter adapts typesv1alpha1.Module to types.Module interface. +type V1Alpha1ModuleAdapter struct { + module *typesv1alpha1.Module +} + +// NewV1Alpha1ModuleAdapter creates a new V1Alpha1ModuleAdapter. +func NewV1Alpha1ModuleAdapter(module *typesv1alpha1.Module) *V1Alpha1ModuleAdapter { + return &V1Alpha1ModuleAdapter{module: module} +} + +// GetName implements types.Module interface. +func (m *V1Alpha1ModuleAdapter) GetName() string { + if m.module == nil { + return "" + } + + return m.module.GetName() +} + +// GetData implements types.Module interface. +func (m *V1Alpha1ModuleAdapter) GetData() map[string]any { + if m.module == nil || m.module.GetData() == nil { + return nil + } + + resp, err := decoder.ProtoToStruct[map[string]any](m.module.GetData()) + if err != nil { + return nil + } + + return *resp +} + +// GetID implements types.Module interface. +func (m *V1Alpha1ModuleAdapter) GetID() uint64 { + if m.module == nil { + return 0 + } + + return uint64(m.module.GetId()) +} + +// GetSkillLabels implements types.LabelProvider interface. +func (a *V1Alpha1Adapter) GetSkillLabels() []types.Label { + if a.record == nil { + return nil + } + + skills := a.record.GetSkills() + result := make([]types.Label, 0, len(skills)) + + for _, skill := range skills { + // Reuse the existing skill adapter logic for name formatting + skillAdapter := NewV1Alpha1SkillAdapter(skill) + skillName := skillAdapter.GetName() + + skillLabel := types.Label(types.LabelTypeSkill.Prefix() + skillName) + result = append(result, skillLabel) + } + + return result +} + +// GetLocatorLabels implements types.LabelProvider interface. +func (a *V1Alpha1Adapter) GetLocatorLabels() []types.Label { + if a.record == nil { + return nil + } + + locators := a.record.GetLocators() + result := make([]types.Label, 0, len(locators)) + + for _, locator := range locators { + locatorAdapter := NewV1Alpha1LocatorAdapter(locator) + locatorType := locatorAdapter.GetType() + + locatorLabel := types.Label(types.LabelTypeLocator.Prefix() + locatorType) + result = append(result, locatorLabel) + } + + return result +} + +// GetDomainLabels implements types.LabelProvider interface. +func (a *V1Alpha1Adapter) GetDomainLabels() []types.Label { + if a.record == nil { + return nil + } + + domains := a.record.GetDomains() + result := make([]types.Label, 0, len(domains)) + + for _, domain := range domains { + domainAdapter := NewV1Alpha1DomainAdapter(domain) + domainName := domainAdapter.GetName() + + domainLabel := types.Label(types.LabelTypeDomain.Prefix() + domainName) + result = append(result, domainLabel) + } + + return result +} + +// GetModuleLabels implements types.LabelProvider interface. +func (a *V1Alpha1Adapter) GetModuleLabels() []types.Label { + if a.record == nil { + return nil + } + + modules := a.record.GetModules() + result := make([]types.Label, 0, len(modules)) + + for _, mod := range modules { + moduleAdapter := NewV1Alpha1ModuleAdapter(mod) + moduleName := moduleAdapter.GetName() + + // V1Alpha1 modules don't have schema prefixes, use name directly with /modules prefix + moduleLabel := types.Label(types.LabelTypeModule.Prefix() + moduleName) + result = append(result, moduleLabel) + } + + return result +} + +// GetAllLabels implements types.LabelProvider interface. +func (a *V1Alpha1Adapter) GetAllLabels() []types.Label { + var allLabels []types.Label + + allLabels = append(allLabels, a.GetSkillLabels()...) + allLabels = append(allLabels, a.GetDomainLabels()...) + allLabels = append(allLabels, a.GetModuleLabels()...) + allLabels = append(allLabels, a.GetLocatorLabels()...) + + return allLabels +} diff --git a/server/types/adapters/oasf_v1alpha2.go b/server/types/adapters/oasf_v1alpha2.go index f8fc0c47e..02be7058e 100644 --- a/server/types/adapters/oasf_v1alpha2.go +++ b/server/types/adapters/oasf_v1alpha2.go @@ -1,438 +1,438 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package adapters - -import ( - typesv1alpha2 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha2" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/oasf-sdk/pkg/decoder" -) - -// V1Alpha2Adapter adapts typesv1alpha2.Record to types.RecordData interface. -type V1Alpha2Adapter struct { - record *typesv1alpha2.Record -} - -// Compile-time interface checks. -var ( - _ types.RecordData = (*V1Alpha2Adapter)(nil) - _ types.LabelProvider = (*V1Alpha2Adapter)(nil) -) - -// NewV1Alpha2Adapter creates a new V1Alpha2Adapter. -func NewV1Alpha2Adapter(record *typesv1alpha2.Record) *V1Alpha2Adapter { - return &V1Alpha2Adapter{record: record} -} - -// GetAnnotations implements types.RecordData interface. -func (a *V1Alpha2Adapter) GetAnnotations() map[string]string { - if a.record == nil { - return nil - } - - return a.record.GetAnnotations() -} - -// GetAuthors implements types.RecordData interface. -func (a *V1Alpha2Adapter) GetAuthors() []string { - if a.record == nil { - return nil - } - - return a.record.GetAuthors() -} - -// GetCreatedAt implements types.RecordData interface. -func (a *V1Alpha2Adapter) GetCreatedAt() string { - if a.record == nil { - return "" - } - - return a.record.GetCreatedAt() -} - -// GetDescription implements types.RecordData interface. -func (a *V1Alpha2Adapter) GetDescription() string { - if a.record == nil { - return "" - } - - return a.record.GetDescription() -} - -// GetDomains implements types.RecordData interface. -func (a *V1Alpha2Adapter) GetDomains() []types.Domain { - if a.record == nil { - return nil - } - - domains := a.record.GetDomains() - result := make([]types.Domain, len(domains)) - - for i, domain := range domains { - result[i] = NewV1Alpha2DomainAdapter(domain) - } - - return result -} - -// V1Alpha2DomainAdapter adapts typesv1alpha2.Domain to types.Domain interface. -type V1Alpha2DomainAdapter struct { - domain *typesv1alpha2.Domain -} - -// NewV1Alpha2DomainAdapter creates a new V1Alpha2DomainAdapter. -func NewV1Alpha2DomainAdapter(domain *typesv1alpha2.Domain) *V1Alpha2DomainAdapter { - if domain == nil { - return nil - } - - return &V1Alpha2DomainAdapter{domain: domain} -} - -// GetAnnotations implements types.Domain interface. -func (d *V1Alpha2DomainAdapter) GetAnnotations() map[string]string { - return nil -} - -// GetID implements types.Domain interface. -func (d *V1Alpha2DomainAdapter) GetID() uint64 { - if d.domain == nil { - return 0 - } - - return uint64(d.domain.GetId()) -} - -// GetName implements types.Domain interface. -func (d *V1Alpha2DomainAdapter) GetName() string { - if d.domain == nil { - return "" - } - - return d.domain.GetName() -} - -// GetLocators implements types.RecordData interface. -func (a *V1Alpha2Adapter) GetLocators() []types.Locator { - if a.record == nil { - return nil - } - - locators := a.record.GetLocators() - result := make([]types.Locator, len(locators)) - - for i, locator := range locators { - result[i] = NewV1Alpha2LocatorAdapter(locator) - } - - return result -} - -// V1Alpha2LocatorAdapter adapts typesv1alpha2.Locator to types.Locator interface. -type V1Alpha2LocatorAdapter struct { - locator *typesv1alpha2.Locator -} - -// NewV1Alpha2LocatorAdapter creates a new V1Alpha2LocatorAdapter. -func NewV1Alpha2LocatorAdapter(locator *typesv1alpha2.Locator) *V1Alpha2LocatorAdapter { - if locator == nil { - return nil - } - - return &V1Alpha2LocatorAdapter{locator: locator} -} - -// GetAnnotations implements types.Locator interface. -func (l *V1Alpha2LocatorAdapter) GetAnnotations() map[string]string { - if l.locator == nil { - return nil - } - - return l.locator.GetAnnotations() -} - -// GetDigest implements types.Locator interface. -func (l *V1Alpha2LocatorAdapter) GetDigest() string { - if l.locator == nil { - return "" - } - - return l.locator.GetDigest() -} - -// GetSize implements types.Locator interface. -func (l *V1Alpha2LocatorAdapter) GetSize() uint64 { - if l.locator == nil { - return 0 - } - - return l.locator.GetSize() -} - -// GetType implements types.Locator interface. -func (l *V1Alpha2LocatorAdapter) GetType() string { - if l.locator == nil { - return "" - } - - return l.locator.GetType() -} - -// GetURL implements types.Locator interface. -func (l *V1Alpha2LocatorAdapter) GetURL() string { - if l.locator == nil { - return "" - } - - return l.locator.GetUrl() -} - -// GetModules implements types.RecordData interface. -func (a *V1Alpha2Adapter) GetModules() []types.Module { - if a.record == nil { - return nil - } - - modules := a.record.GetModules() - - result := make([]types.Module, len(modules)) - for i, module := range modules { - result[i] = NewV1Alpha2ModuleAdapter(module) - } - - return result -} - -// V1Alpha2ModuleAdapter adapts typesv1alpha2.Module to types.Module interface. -type V1Alpha2ModuleAdapter struct { - module *typesv1alpha2.Module -} - -// NewV1Alpha2ModuleAdapter creates a new V1Alpha2ModuleAdapter. -func NewV1Alpha2ModuleAdapter(module *typesv1alpha2.Module) *V1Alpha2ModuleAdapter { - if module == nil { - return nil - } - - return &V1Alpha2ModuleAdapter{module: module} -} - -// GetData implements types.Module interface. -func (m *V1Alpha2ModuleAdapter) GetData() map[string]any { - if m.module == nil || m.module.GetData() == nil { - return nil - } - - resp, err := decoder.ProtoToStruct[map[string]any](m.module.GetData()) - if err != nil { - return nil - } - - return *resp -} - -// GetName implements types.Module interface. -func (m *V1Alpha2ModuleAdapter) GetName() string { - if m.module == nil { - return "" - } - - return m.module.GetName() -} - -// GetID implements types.Module interface. -func (m *V1Alpha2ModuleAdapter) GetID() uint64 { - if m.module == nil { - return 0 - } - - return uint64(m.module.GetId()) -} - -// GetName implements types.RecordData interface. -func (a *V1Alpha2Adapter) GetName() string { - if a.record == nil { - return "" - } - - return a.record.GetName() -} - -// GetPreviousRecordCid implements types.RecordData interface. -func (a *V1Alpha2Adapter) GetPreviousRecordCid() string { - if a.record == nil { - return "" - } - - return a.record.GetPreviousRecordCid() -} - -// GetSchemaVersion implements types.RecordData interface. -func (a *V1Alpha2Adapter) GetSchemaVersion() string { - if a.record == nil { - return "" - } - - return a.record.GetSchemaVersion() -} - -// GetSignature implements types.RecordData interface. -func (a *V1Alpha2Adapter) GetSignature() types.Signature { - return nil -} - -// GetSkills implements types.RecordData interface. -func (a *V1Alpha2Adapter) GetSkills() []types.Skill { - if a.record == nil { - return nil - } - - skills := a.record.GetSkills() - - result := make([]types.Skill, len(skills)) - for i, skill := range skills { - result[i] = NewV1Alpha2SkillAdapter(skill) - } - - return result -} - -// V1Alpha2SkillAdapter adapts typesv1alpha2.Skill to types.Skill interface. -type V1Alpha2SkillAdapter struct { - skill *typesv1alpha2.Skill -} - -// NewV1Alpha2SkillAdapter creates a new V1Alpha2SkillAdapter. -func NewV1Alpha2SkillAdapter(skill *typesv1alpha2.Skill) *V1Alpha2SkillAdapter { - if skill == nil { - return nil - } - - return &V1Alpha2SkillAdapter{skill: skill} -} - -// GetAnnotations implements types.Skill interface. -func (s *V1Alpha2SkillAdapter) GetAnnotations() map[string]string { - return nil -} - -// GetID implements types.Skill interface. -func (s *V1Alpha2SkillAdapter) GetID() uint64 { - if s.skill == nil { - return 0 - } - - return uint64(s.skill.GetId()) -} - -// GetName implements types.Skill interface. -func (s *V1Alpha2SkillAdapter) GetName() string { - if s.skill == nil { - return "" - } - - return s.skill.GetName() -} - -// GetVersion implements types.RecordData interface. -func (a *V1Alpha2Adapter) GetVersion() string { - if a.record == nil { - return "" - } - - return a.record.GetVersion() -} - -// GetDomainLabels implements types.LabelProvider interface. -func (a *V1Alpha2Adapter) GetDomainLabels() []types.Label { - if a.record == nil { - return nil - } - - domains := a.record.GetDomains() - result := make([]types.Label, 0, len(domains)) - - for _, domain := range domains { - domainAdapter := NewV1Alpha2DomainAdapter(domain) - domainName := domainAdapter.GetName() - - domainLabel := types.Label(types.LabelTypeDomain.Prefix() + domainName) - result = append(result, domainLabel) - } - - return result -} - -// GetLocatorLabels implements types.LabelProvider interface. -func (a *V1Alpha2Adapter) GetLocatorLabels() []types.Label { - if a.record == nil { - return nil - } - - locators := a.record.GetLocators() - result := make([]types.Label, 0, len(locators)) - - for _, locator := range locators { - locatorAdapter := NewV1Alpha2LocatorAdapter(locator) - locatorType := locatorAdapter.GetType() - - locatorLabel := types.Label(types.LabelTypeLocator.Prefix() + locatorType) - result = append(result, locatorLabel) - } - - return result -} - -// GetModuleLabels implements types.LabelProvider interface. -func (a *V1Alpha2Adapter) GetModuleLabels() []types.Label { - if a.record == nil { - return nil - } - - modules := a.record.GetModules() - result := make([]types.Label, 0, len(modules)) - - for _, module := range modules { - moduleAdapter := NewV1Alpha2ModuleAdapter(module) - moduleName := moduleAdapter.GetName() - - moduleLabel := types.Label(types.LabelTypeModule.Prefix() + moduleName) - result = append(result, moduleLabel) - } - - return result -} - -// GetSkillLabels implements types.LabelProvider interface. -func (a *V1Alpha2Adapter) GetSkillLabels() []types.Label { - if a.record == nil { - return nil - } - - skills := a.record.GetSkills() - - result := make([]types.Label, 0, len(skills)) - for _, skill := range skills { - skillAdapter := NewV1Alpha2SkillAdapter(skill) - skillName := skillAdapter.GetName() - - skillLabel := types.Label(types.LabelTypeSkill.Prefix() + skillName) - result = append(result, skillLabel) - } - - return result -} - -// GetAllLabels implements types.LabelProvider interface. -func (a *V1Alpha2Adapter) GetAllLabels() []types.Label { - var allLabels []types.Label - - allLabels = append(allLabels, a.GetDomainLabels()...) - allLabels = append(allLabels, a.GetLocatorLabels()...) - allLabels = append(allLabels, a.GetModuleLabels()...) - allLabels = append(allLabels, a.GetSkillLabels()...) - - return allLabels -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package adapters + +import ( + typesv1alpha2 "buf.build/gen/go/agntcy/oasf/protocolbuffers/go/agntcy/oasf/types/v1alpha2" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/oasf-sdk/pkg/decoder" +) + +// V1Alpha2Adapter adapts typesv1alpha2.Record to types.RecordData interface. +type V1Alpha2Adapter struct { + record *typesv1alpha2.Record +} + +// Compile-time interface checks. +var ( + _ types.RecordData = (*V1Alpha2Adapter)(nil) + _ types.LabelProvider = (*V1Alpha2Adapter)(nil) +) + +// NewV1Alpha2Adapter creates a new V1Alpha2Adapter. +func NewV1Alpha2Adapter(record *typesv1alpha2.Record) *V1Alpha2Adapter { + return &V1Alpha2Adapter{record: record} +} + +// GetAnnotations implements types.RecordData interface. +func (a *V1Alpha2Adapter) GetAnnotations() map[string]string { + if a.record == nil { + return nil + } + + return a.record.GetAnnotations() +} + +// GetAuthors implements types.RecordData interface. +func (a *V1Alpha2Adapter) GetAuthors() []string { + if a.record == nil { + return nil + } + + return a.record.GetAuthors() +} + +// GetCreatedAt implements types.RecordData interface. +func (a *V1Alpha2Adapter) GetCreatedAt() string { + if a.record == nil { + return "" + } + + return a.record.GetCreatedAt() +} + +// GetDescription implements types.RecordData interface. +func (a *V1Alpha2Adapter) GetDescription() string { + if a.record == nil { + return "" + } + + return a.record.GetDescription() +} + +// GetDomains implements types.RecordData interface. +func (a *V1Alpha2Adapter) GetDomains() []types.Domain { + if a.record == nil { + return nil + } + + domains := a.record.GetDomains() + result := make([]types.Domain, len(domains)) + + for i, domain := range domains { + result[i] = NewV1Alpha2DomainAdapter(domain) + } + + return result +} + +// V1Alpha2DomainAdapter adapts typesv1alpha2.Domain to types.Domain interface. +type V1Alpha2DomainAdapter struct { + domain *typesv1alpha2.Domain +} + +// NewV1Alpha2DomainAdapter creates a new V1Alpha2DomainAdapter. +func NewV1Alpha2DomainAdapter(domain *typesv1alpha2.Domain) *V1Alpha2DomainAdapter { + if domain == nil { + return nil + } + + return &V1Alpha2DomainAdapter{domain: domain} +} + +// GetAnnotations implements types.Domain interface. +func (d *V1Alpha2DomainAdapter) GetAnnotations() map[string]string { + return nil +} + +// GetID implements types.Domain interface. +func (d *V1Alpha2DomainAdapter) GetID() uint64 { + if d.domain == nil { + return 0 + } + + return uint64(d.domain.GetId()) +} + +// GetName implements types.Domain interface. +func (d *V1Alpha2DomainAdapter) GetName() string { + if d.domain == nil { + return "" + } + + return d.domain.GetName() +} + +// GetLocators implements types.RecordData interface. +func (a *V1Alpha2Adapter) GetLocators() []types.Locator { + if a.record == nil { + return nil + } + + locators := a.record.GetLocators() + result := make([]types.Locator, len(locators)) + + for i, locator := range locators { + result[i] = NewV1Alpha2LocatorAdapter(locator) + } + + return result +} + +// V1Alpha2LocatorAdapter adapts typesv1alpha2.Locator to types.Locator interface. +type V1Alpha2LocatorAdapter struct { + locator *typesv1alpha2.Locator +} + +// NewV1Alpha2LocatorAdapter creates a new V1Alpha2LocatorAdapter. +func NewV1Alpha2LocatorAdapter(locator *typesv1alpha2.Locator) *V1Alpha2LocatorAdapter { + if locator == nil { + return nil + } + + return &V1Alpha2LocatorAdapter{locator: locator} +} + +// GetAnnotations implements types.Locator interface. +func (l *V1Alpha2LocatorAdapter) GetAnnotations() map[string]string { + if l.locator == nil { + return nil + } + + return l.locator.GetAnnotations() +} + +// GetDigest implements types.Locator interface. +func (l *V1Alpha2LocatorAdapter) GetDigest() string { + if l.locator == nil { + return "" + } + + return l.locator.GetDigest() +} + +// GetSize implements types.Locator interface. +func (l *V1Alpha2LocatorAdapter) GetSize() uint64 { + if l.locator == nil { + return 0 + } + + return l.locator.GetSize() +} + +// GetType implements types.Locator interface. +func (l *V1Alpha2LocatorAdapter) GetType() string { + if l.locator == nil { + return "" + } + + return l.locator.GetType() +} + +// GetURL implements types.Locator interface. +func (l *V1Alpha2LocatorAdapter) GetURL() string { + if l.locator == nil { + return "" + } + + return l.locator.GetUrl() +} + +// GetModules implements types.RecordData interface. +func (a *V1Alpha2Adapter) GetModules() []types.Module { + if a.record == nil { + return nil + } + + modules := a.record.GetModules() + + result := make([]types.Module, len(modules)) + for i, module := range modules { + result[i] = NewV1Alpha2ModuleAdapter(module) + } + + return result +} + +// V1Alpha2ModuleAdapter adapts typesv1alpha2.Module to types.Module interface. +type V1Alpha2ModuleAdapter struct { + module *typesv1alpha2.Module +} + +// NewV1Alpha2ModuleAdapter creates a new V1Alpha2ModuleAdapter. +func NewV1Alpha2ModuleAdapter(module *typesv1alpha2.Module) *V1Alpha2ModuleAdapter { + if module == nil { + return nil + } + + return &V1Alpha2ModuleAdapter{module: module} +} + +// GetData implements types.Module interface. +func (m *V1Alpha2ModuleAdapter) GetData() map[string]any { + if m.module == nil || m.module.GetData() == nil { + return nil + } + + resp, err := decoder.ProtoToStruct[map[string]any](m.module.GetData()) + if err != nil { + return nil + } + + return *resp +} + +// GetName implements types.Module interface. +func (m *V1Alpha2ModuleAdapter) GetName() string { + if m.module == nil { + return "" + } + + return m.module.GetName() +} + +// GetID implements types.Module interface. +func (m *V1Alpha2ModuleAdapter) GetID() uint64 { + if m.module == nil { + return 0 + } + + return uint64(m.module.GetId()) +} + +// GetName implements types.RecordData interface. +func (a *V1Alpha2Adapter) GetName() string { + if a.record == nil { + return "" + } + + return a.record.GetName() +} + +// GetPreviousRecordCid implements types.RecordData interface. +func (a *V1Alpha2Adapter) GetPreviousRecordCid() string { + if a.record == nil { + return "" + } + + return a.record.GetPreviousRecordCid() +} + +// GetSchemaVersion implements types.RecordData interface. +func (a *V1Alpha2Adapter) GetSchemaVersion() string { + if a.record == nil { + return "" + } + + return a.record.GetSchemaVersion() +} + +// GetSignature implements types.RecordData interface. +func (a *V1Alpha2Adapter) GetSignature() types.Signature { + return nil +} + +// GetSkills implements types.RecordData interface. +func (a *V1Alpha2Adapter) GetSkills() []types.Skill { + if a.record == nil { + return nil + } + + skills := a.record.GetSkills() + + result := make([]types.Skill, len(skills)) + for i, skill := range skills { + result[i] = NewV1Alpha2SkillAdapter(skill) + } + + return result +} + +// V1Alpha2SkillAdapter adapts typesv1alpha2.Skill to types.Skill interface. +type V1Alpha2SkillAdapter struct { + skill *typesv1alpha2.Skill +} + +// NewV1Alpha2SkillAdapter creates a new V1Alpha2SkillAdapter. +func NewV1Alpha2SkillAdapter(skill *typesv1alpha2.Skill) *V1Alpha2SkillAdapter { + if skill == nil { + return nil + } + + return &V1Alpha2SkillAdapter{skill: skill} +} + +// GetAnnotations implements types.Skill interface. +func (s *V1Alpha2SkillAdapter) GetAnnotations() map[string]string { + return nil +} + +// GetID implements types.Skill interface. +func (s *V1Alpha2SkillAdapter) GetID() uint64 { + if s.skill == nil { + return 0 + } + + return uint64(s.skill.GetId()) +} + +// GetName implements types.Skill interface. +func (s *V1Alpha2SkillAdapter) GetName() string { + if s.skill == nil { + return "" + } + + return s.skill.GetName() +} + +// GetVersion implements types.RecordData interface. +func (a *V1Alpha2Adapter) GetVersion() string { + if a.record == nil { + return "" + } + + return a.record.GetVersion() +} + +// GetDomainLabels implements types.LabelProvider interface. +func (a *V1Alpha2Adapter) GetDomainLabels() []types.Label { + if a.record == nil { + return nil + } + + domains := a.record.GetDomains() + result := make([]types.Label, 0, len(domains)) + + for _, domain := range domains { + domainAdapter := NewV1Alpha2DomainAdapter(domain) + domainName := domainAdapter.GetName() + + domainLabel := types.Label(types.LabelTypeDomain.Prefix() + domainName) + result = append(result, domainLabel) + } + + return result +} + +// GetLocatorLabels implements types.LabelProvider interface. +func (a *V1Alpha2Adapter) GetLocatorLabels() []types.Label { + if a.record == nil { + return nil + } + + locators := a.record.GetLocators() + result := make([]types.Label, 0, len(locators)) + + for _, locator := range locators { + locatorAdapter := NewV1Alpha2LocatorAdapter(locator) + locatorType := locatorAdapter.GetType() + + locatorLabel := types.Label(types.LabelTypeLocator.Prefix() + locatorType) + result = append(result, locatorLabel) + } + + return result +} + +// GetModuleLabels implements types.LabelProvider interface. +func (a *V1Alpha2Adapter) GetModuleLabels() []types.Label { + if a.record == nil { + return nil + } + + modules := a.record.GetModules() + result := make([]types.Label, 0, len(modules)) + + for _, module := range modules { + moduleAdapter := NewV1Alpha2ModuleAdapter(module) + moduleName := moduleAdapter.GetName() + + moduleLabel := types.Label(types.LabelTypeModule.Prefix() + moduleName) + result = append(result, moduleLabel) + } + + return result +} + +// GetSkillLabels implements types.LabelProvider interface. +func (a *V1Alpha2Adapter) GetSkillLabels() []types.Label { + if a.record == nil { + return nil + } + + skills := a.record.GetSkills() + + result := make([]types.Label, 0, len(skills)) + for _, skill := range skills { + skillAdapter := NewV1Alpha2SkillAdapter(skill) + skillName := skillAdapter.GetName() + + skillLabel := types.Label(types.LabelTypeSkill.Prefix() + skillName) + result = append(result, skillLabel) + } + + return result +} + +// GetAllLabels implements types.LabelProvider interface. +func (a *V1Alpha2Adapter) GetAllLabels() []types.Label { + var allLabels []types.Label + + allLabels = append(allLabels, a.GetDomainLabels()...) + allLabels = append(allLabels, a.GetLocatorLabels()...) + allLabels = append(allLabels, a.GetModuleLabels()...) + allLabels = append(allLabels, a.GetSkillLabels()...) + + return allLabels +} diff --git a/server/types/adapters/record.go b/server/types/adapters/record.go index 81cb6f398..a0139d162 100644 --- a/server/types/adapters/record.go +++ b/server/types/adapters/record.go @@ -1,49 +1,49 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package adapters - -import ( - "fmt" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/server/types" -) - -var _ types.Record = (*RecordAdapter)(nil) - -// RecordAdapter adapts corev1.Record to types.Record interface. -type RecordAdapter struct { - record *corev1.Record -} - -// NewRecordAdapter creates a new RecordAdapter. -func NewRecordAdapter(record *corev1.Record) *RecordAdapter { - return &RecordAdapter{record: record} -} - -// GetCid implements types.Record interface. -func (r *RecordAdapter) GetCid() string { - return r.record.GetCid() -} - -// GetRecordData implements types.Record interface. -func (r *RecordAdapter) GetRecordData() (types.RecordData, error) { - // Decode record - decoded, err := r.record.Decode() - if err != nil { - return nil, fmt.Errorf("failed to decode record: %w", err) - } - - // Determine record type and create appropriate adapter - switch { - case decoded.HasV1Alpha0(): - return NewV1Alpha0Adapter(decoded.GetV1Alpha0()), nil - case decoded.HasV1Alpha1(): - return NewV1Alpha1Adapter(decoded.GetV1Alpha1()), nil - case decoded.HasV1Alpha2(): - return NewV1Alpha2Adapter(decoded.GetV1Alpha2()), nil - default: - return nil, fmt.Errorf("unsupported record type: %T", decoded.GetRecord()) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package adapters + +import ( + "fmt" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/server/types" +) + +var _ types.Record = (*RecordAdapter)(nil) + +// RecordAdapter adapts corev1.Record to types.Record interface. +type RecordAdapter struct { + record *corev1.Record +} + +// NewRecordAdapter creates a new RecordAdapter. +func NewRecordAdapter(record *corev1.Record) *RecordAdapter { + return &RecordAdapter{record: record} +} + +// GetCid implements types.Record interface. +func (r *RecordAdapter) GetCid() string { + return r.record.GetCid() +} + +// GetRecordData implements types.Record interface. +func (r *RecordAdapter) GetRecordData() (types.RecordData, error) { + // Decode record + decoded, err := r.record.Decode() + if err != nil { + return nil, fmt.Errorf("failed to decode record: %w", err) + } + + // Determine record type and create appropriate adapter + switch { + case decoded.HasV1Alpha0(): + return NewV1Alpha0Adapter(decoded.GetV1Alpha0()), nil + case decoded.HasV1Alpha1(): + return NewV1Alpha1Adapter(decoded.GetV1Alpha1()), nil + case decoded.HasV1Alpha2(): + return NewV1Alpha2Adapter(decoded.GetV1Alpha2()), nil + default: + return nil, fmt.Errorf("unsupported record type: %T", decoded.GetRecord()) + } +} diff --git a/server/types/api.go b/server/types/api.go index 27db5f86c..dd6626784 100644 --- a/server/types/api.go +++ b/server/types/api.go @@ -1,60 +1,60 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package types - -import ( - "github.com/agntcy/dir/server/config" - "github.com/agntcy/dir/server/events" -) - -// TODO: Extend with cleaning and garbage collection support. -type API interface { - // Options returns API options - Options() APIOptions - - // Store returns an implementation of the StoreAPI - Store() StoreAPI - - // Routing returns an implementation of the RoutingAPI - Routing() RoutingAPI - - // Database returns an implementation of the DatabaseAPI - Database() DatabaseAPI -} - -// APIOptions collects internal dependencies for all API services. -type APIOptions interface { - // Config returns the config data. Read only! Unsafe to edit. - Config() *config.Config - - // EventBus returns the safe event bus for publishing events. - // Returns a nil-safe wrapper that won't panic even if events are disabled. - EventBus() *events.SafeEventBus - - // WithEventBus returns a new APIOptions with the event bus set. - WithEventBus(bus *events.SafeEventBus) APIOptions -} - -type options struct { - config *config.Config - eventBus *events.SafeEventBus -} - -func NewOptions(config *config.Config) APIOptions { - return &options{ - config: config, - eventBus: events.NewSafeEventBus(nil), // Default to nil-safe no-op - } -} - -func (o *options) Config() *config.Config { return o.config } - -func (o *options) EventBus() *events.SafeEventBus { return o.eventBus } - -func (o *options) WithEventBus(bus *events.SafeEventBus) APIOptions { - return &options{ - config: o.config, - eventBus: bus, - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package types + +import ( + "github.com/agntcy/dir/server/config" + "github.com/agntcy/dir/server/events" +) + +// TODO: Extend with cleaning and garbage collection support. +type API interface { + // Options returns API options + Options() APIOptions + + // Store returns an implementation of the StoreAPI + Store() StoreAPI + + // Routing returns an implementation of the RoutingAPI + Routing() RoutingAPI + + // Database returns an implementation of the DatabaseAPI + Database() DatabaseAPI +} + +// APIOptions collects internal dependencies for all API services. +type APIOptions interface { + // Config returns the config data. Read only! Unsafe to edit. + Config() *config.Config + + // EventBus returns the safe event bus for publishing events. + // Returns a nil-safe wrapper that won't panic even if events are disabled. + EventBus() *events.SafeEventBus + + // WithEventBus returns a new APIOptions with the event bus set. + WithEventBus(bus *events.SafeEventBus) APIOptions +} + +type options struct { + config *config.Config + eventBus *events.SafeEventBus +} + +func NewOptions(config *config.Config) APIOptions { + return &options{ + config: config, + eventBus: events.NewSafeEventBus(nil), // Default to nil-safe no-op + } +} + +func (o *options) Config() *config.Config { return o.config } + +func (o *options) EventBus() *events.SafeEventBus { return o.eventBus } + +func (o *options) WithEventBus(bus *events.SafeEventBus) APIOptions { + return &options{ + config: o.config, + eventBus: bus, + } +} diff --git a/server/types/database.go b/server/types/database.go index 20f4515ed..1815940e4 100644 --- a/server/types/database.go +++ b/server/types/database.go @@ -1,82 +1,82 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package types - -import ( - "context" - - routingv1 "github.com/agntcy/dir/api/routing/v1" - storev1 "github.com/agntcy/dir/api/store/v1" -) - -type DatabaseAPI interface { - // SearchDatabaseAPI handles management of the search database. - SearchDatabaseAPI - - // SyncDatabaseAPI handles management of the sync database. - SyncDatabaseAPI - - // PublicationDatabaseAPI handles management of the publication database. - PublicationDatabaseAPI - - // IsReady checks if the database connection is ready to serve traffic. - IsReady(context.Context) bool -} - -type SearchDatabaseAPI interface { - // AddRecord adds a new record to the search database. - AddRecord(record Record) error - - // GetRecordCIDs retrieves record CIDs based on the provided filters. - GetRecordCIDs(opts ...FilterOption) ([]string, error) - - // RemoveRecord removes a record from the search database by CID. - RemoveRecord(cid string) error -} - -type SyncDatabaseAPI interface { - // CreateSync creates a new sync object in the database. - CreateSync(remoteURL string, cids []string) (string, error) - - // GetSyncByID retrieves a sync object by its ID. - GetSyncByID(syncID string) (SyncObject, error) - - // GetSyncs retrieves all sync objects. - GetSyncs(offset, limit int) ([]SyncObject, error) - - // GetSyncsByStatus retrieves all sync objects by their status. - GetSyncsByStatus(status storev1.SyncStatus) ([]SyncObject, error) - - // UpdateSyncStatus updates an existing sync object in the database. - UpdateSyncStatus(syncID string, status storev1.SyncStatus) error - - // UpdateSyncRemoteRegistry updates the remote registry of a sync object. - UpdateSyncRemoteRegistry(syncID string, remoteRegistry string) error - - // GetSyncRemoteRegistry retrieves the remote registry of a sync object. - GetSyncRemoteRegistry(syncID string) (string, error) - - // DeleteSync deletes a sync object by its ID. - DeleteSync(syncID string) error -} - -type PublicationDatabaseAPI interface { - // CreatePublication creates a new publication object in the database. - CreatePublication(request *routingv1.PublishRequest) (string, error) - - // GetPublicationByID retrieves a publication object by its ID. - GetPublicationByID(publicationID string) (PublicationObject, error) - - // GetPublications retrieves all publication objects. - GetPublications(offset, limit int) ([]PublicationObject, error) - - // GetPublicationsByStatus retrieves all publication objects by their status. - GetPublicationsByStatus(status routingv1.PublicationStatus) ([]PublicationObject, error) - - // UpdatePublicationStatus updates an existing publication object's status in the database. - UpdatePublicationStatus(publicationID string, status routingv1.PublicationStatus) error - - // DeletePublication deletes a publication object by its ID. - DeletePublication(publicationID string) error -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package types + +import ( + "context" + + routingv1 "github.com/agntcy/dir/api/routing/v1" + storev1 "github.com/agntcy/dir/api/store/v1" +) + +type DatabaseAPI interface { + // SearchDatabaseAPI handles management of the search database. + SearchDatabaseAPI + + // SyncDatabaseAPI handles management of the sync database. + SyncDatabaseAPI + + // PublicationDatabaseAPI handles management of the publication database. + PublicationDatabaseAPI + + // IsReady checks if the database connection is ready to serve traffic. + IsReady(context.Context) bool +} + +type SearchDatabaseAPI interface { + // AddRecord adds a new record to the search database. + AddRecord(record Record) error + + // GetRecordCIDs retrieves record CIDs based on the provided filters. + GetRecordCIDs(opts ...FilterOption) ([]string, error) + + // RemoveRecord removes a record from the search database by CID. + RemoveRecord(cid string) error +} + +type SyncDatabaseAPI interface { + // CreateSync creates a new sync object in the database. + CreateSync(remoteURL string, cids []string) (string, error) + + // GetSyncByID retrieves a sync object by its ID. + GetSyncByID(syncID string) (SyncObject, error) + + // GetSyncs retrieves all sync objects. + GetSyncs(offset, limit int) ([]SyncObject, error) + + // GetSyncsByStatus retrieves all sync objects by their status. + GetSyncsByStatus(status storev1.SyncStatus) ([]SyncObject, error) + + // UpdateSyncStatus updates an existing sync object in the database. + UpdateSyncStatus(syncID string, status storev1.SyncStatus) error + + // UpdateSyncRemoteRegistry updates the remote registry of a sync object. + UpdateSyncRemoteRegistry(syncID string, remoteRegistry string) error + + // GetSyncRemoteRegistry retrieves the remote registry of a sync object. + GetSyncRemoteRegistry(syncID string) (string, error) + + // DeleteSync deletes a sync object by its ID. + DeleteSync(syncID string) error +} + +type PublicationDatabaseAPI interface { + // CreatePublication creates a new publication object in the database. + CreatePublication(request *routingv1.PublishRequest) (string, error) + + // GetPublicationByID retrieves a publication object by its ID. + GetPublicationByID(publicationID string) (PublicationObject, error) + + // GetPublications retrieves all publication objects. + GetPublications(offset, limit int) ([]PublicationObject, error) + + // GetPublicationsByStatus retrieves all publication objects by their status. + GetPublicationsByStatus(status routingv1.PublicationStatus) ([]PublicationObject, error) + + // UpdatePublicationStatus updates an existing publication object's status in the database. + UpdatePublicationStatus(publicationID string, status routingv1.PublicationStatus) error + + // DeletePublication deletes a publication object by its ID. + DeletePublication(publicationID string) error +} diff --git a/server/types/datastore.go b/server/types/datastore.go index 8ef40186c..a374cb16d 100644 --- a/server/types/datastore.go +++ b/server/types/datastore.go @@ -1,22 +1,22 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package types - -import ( - "github.com/ipfs/go-datastore" -) - -// Datastore is a local key-value store with path-like query syntax. -// Used as a local cache for information such as -// peers, contents, cache, and storage metadata. -// -// Backends: Badger, BoltDB, LevelDB, Mem, Map, etc. -// Providers: Filesystem (local/remote), OCI (remote sync), S3 (remote sync). -// -// NOTE: This is an interface to serve internal and external APIs. -// -// Ref: https://github.com/ipfs/go-datastore -type Datastore interface { - datastore.Batching -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package types + +import ( + "github.com/ipfs/go-datastore" +) + +// Datastore is a local key-value store with path-like query syntax. +// Used as a local cache for information such as +// peers, contents, cache, and storage metadata. +// +// Backends: Badger, BoltDB, LevelDB, Mem, Map, etc. +// Providers: Filesystem (local/remote), OCI (remote sync), S3 (remote sync). +// +// NOTE: This is an interface to serve internal and external APIs. +// +// Ref: https://github.com/ipfs/go-datastore +type Datastore interface { + datastore.Batching +} diff --git a/server/types/label.go b/server/types/label.go index 2b6c411ed..e283cea8d 100644 --- a/server/types/label.go +++ b/server/types/label.go @@ -1,198 +1,198 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package types - -// Label types and operations for the routing system. -// This file provides unified label types including Label, LabelType, and LabelMetadata, -// along with utilities for label extraction and manipulation. - -import ( - "errors" - "strings" - "time" -) - -// LabelType represents the category of a label based on its namespace. -// Using string type for natural representation and direct DHT integration. -type LabelType string - -const ( - LabelTypeUnknown LabelType = "" - LabelTypeSkill LabelType = "skills" - LabelTypeDomain LabelType = "domains" - LabelTypeModule LabelType = "modules" - LabelTypeLocator LabelType = "locators" -) - -// String returns the string representation of the label type. -// This is used for DHT validation, logging, and debugging. -func (lt LabelType) String() string { - return string(lt) -} - -// Example: LabelTypeSkill.Prefix() returns "/skills/". -func (lt LabelType) Prefix() string { - if lt == LabelTypeUnknown { - return "" - } - - return "/" + string(lt) + "/" -} - -// IsValid checks if the label type is one of the supported types. -func (lt LabelType) IsValid() bool { - switch lt { - case LabelTypeSkill, LabelTypeDomain, LabelTypeModule, LabelTypeLocator: - return true - case LabelTypeUnknown: - return false - default: - return false - } -} - -// AllLabelTypes returns all supported label types. -func AllLabelTypes() []LabelType { - return []LabelType{LabelTypeSkill, LabelTypeDomain, LabelTypeModule, LabelTypeLocator} -} - -// ParseLabelType converts a string to LabelType if valid. -func ParseLabelType(s string) (LabelType, bool) { - lt := LabelType(s) - if lt.IsValid() { - return lt, true - } - - return LabelTypeUnknown, false -} - -// Label represents a typed label with namespace awareness. -// This provides type safety and eliminates string-based operations throughout the routing system. -type Label string - -// String returns the string representation of the label. -// This is used for storage, logging, and API boundary conversions. -func (l Label) String() string { - return string(l) -} - -// Bytes returns the byte representation for efficient storage operations. -// This eliminates the need for string conversions in datastore operations. -func (l Label) Bytes() []byte { - return []byte(l) -} - -// Type returns the type of the label based on its namespace prefix. -// This enables efficient type-based filtering without complex lookups. -func (l Label) Type() LabelType { - s := string(l) - - switch { - case strings.HasPrefix(s, LabelTypeSkill.Prefix()): - return LabelTypeSkill - case strings.HasPrefix(s, LabelTypeDomain.Prefix()): - return LabelTypeDomain - case strings.HasPrefix(s, LabelTypeModule.Prefix()): - return LabelTypeModule - case strings.HasPrefix(s, LabelTypeLocator.Prefix()): - return LabelTypeLocator - default: - return LabelTypeUnknown - } -} - -// Namespace returns the namespace prefix of the label. -// For example, Label("/skills/AI") returns "/skills/". -func (l Label) Namespace() string { - return l.Type().Prefix() -} - -// Value returns the label value without the namespace prefix. -// For example, Label("/skills/AI/ML") returns "AI/ML". -func (l Label) Value() string { - namespace := l.Namespace() - if namespace == "" { - return string(l) - } - - return strings.TrimPrefix(string(l), namespace) -} - -// LabelMetadata stores temporal information about a label announcement. -// The label itself is stored in the datastore key structure: /skills/AI/CID123/Peer1 -// where the metadata tracks when the label was first announced and last seen. -type LabelMetadata struct { - Timestamp time.Time `json:"timestamp"` // When label was first announced - LastSeen time.Time `json:"last_seen"` // When label was last seen/refreshed -} - -// Validate checks if the metadata is valid and all required fields are properly set. -func (m *LabelMetadata) Validate() error { - if m.Timestamp.IsZero() { - return errors.New("timestamp cannot be zero") - } - - if m.LastSeen.IsZero() { - return errors.New("last seen timestamp cannot be zero") - } - - if m.LastSeen.Before(m.Timestamp) { - return errors.New("last seen cannot be before creation timestamp") - } - - return nil -} - -// IsStale checks if the label is older than the given maximum age duration. -func (m *LabelMetadata) IsStale(maxAge time.Duration) bool { - return time.Since(m.LastSeen) > maxAge -} - -// Age returns how long ago the label was last seen. -func (m *LabelMetadata) Age() time.Duration { - return time.Since(m.LastSeen) -} - -// Update refreshes the LastSeen timestamp to the current time. -func (m *LabelMetadata) Update() { - m.LastSeen = time.Now() -} - -// Constants for label validation and processing. -const ( - // Enhanced format: /type/label/CID/PeerID splits into ["", "type", "label", "CID", "PeerID"] = 5 parts. - MinLabelKeyParts = 5 -) - -// GetLabelsFromRecord extracts labels from a record using the LabelProvider interface. -// This function works at the types interface level, making it usable from any package -// without circular dependencies. -// -// The caller is responsible for wrapping concrete record types (e.g., *corev1.Record) -// with the appropriate adapter before calling this function. -// -// Example: -// -// adapter := adapters.NewRecordAdapter(corev1Record) -// labels := types.GetLabelsFromRecord(adapter) -// -// Returns: -// - []Label: List of all labels extracted from the record -// - nil: If record is nil, has no data, or doesn't implement LabelProvider -func GetLabelsFromRecord(record Record) []Label { - if record == nil { - return nil - } - - recordData, err := record.GetRecordData() - if err != nil { - return nil - } - - if provider, ok := recordData.(LabelProvider); ok { - return provider.GetAllLabels() - } - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package types + +// Label types and operations for the routing system. +// This file provides unified label types including Label, LabelType, and LabelMetadata, +// along with utilities for label extraction and manipulation. + +import ( + "errors" + "strings" + "time" +) + +// LabelType represents the category of a label based on its namespace. +// Using string type for natural representation and direct DHT integration. +type LabelType string + +const ( + LabelTypeUnknown LabelType = "" + LabelTypeSkill LabelType = "skills" + LabelTypeDomain LabelType = "domains" + LabelTypeModule LabelType = "modules" + LabelTypeLocator LabelType = "locators" +) + +// String returns the string representation of the label type. +// This is used for DHT validation, logging, and debugging. +func (lt LabelType) String() string { + return string(lt) +} + +// Example: LabelTypeSkill.Prefix() returns "/skills/". +func (lt LabelType) Prefix() string { + if lt == LabelTypeUnknown { + return "" + } + + return "/" + string(lt) + "/" +} + +// IsValid checks if the label type is one of the supported types. +func (lt LabelType) IsValid() bool { + switch lt { + case LabelTypeSkill, LabelTypeDomain, LabelTypeModule, LabelTypeLocator: + return true + case LabelTypeUnknown: + return false + default: + return false + } +} + +// AllLabelTypes returns all supported label types. +func AllLabelTypes() []LabelType { + return []LabelType{LabelTypeSkill, LabelTypeDomain, LabelTypeModule, LabelTypeLocator} +} + +// ParseLabelType converts a string to LabelType if valid. +func ParseLabelType(s string) (LabelType, bool) { + lt := LabelType(s) + if lt.IsValid() { + return lt, true + } + + return LabelTypeUnknown, false +} + +// Label represents a typed label with namespace awareness. +// This provides type safety and eliminates string-based operations throughout the routing system. +type Label string + +// String returns the string representation of the label. +// This is used for storage, logging, and API boundary conversions. +func (l Label) String() string { + return string(l) +} + +// Bytes returns the byte representation for efficient storage operations. +// This eliminates the need for string conversions in datastore operations. +func (l Label) Bytes() []byte { + return []byte(l) +} + +// Type returns the type of the label based on its namespace prefix. +// This enables efficient type-based filtering without complex lookups. +func (l Label) Type() LabelType { + s := string(l) + + switch { + case strings.HasPrefix(s, LabelTypeSkill.Prefix()): + return LabelTypeSkill + case strings.HasPrefix(s, LabelTypeDomain.Prefix()): + return LabelTypeDomain + case strings.HasPrefix(s, LabelTypeModule.Prefix()): + return LabelTypeModule + case strings.HasPrefix(s, LabelTypeLocator.Prefix()): + return LabelTypeLocator + default: + return LabelTypeUnknown + } +} + +// Namespace returns the namespace prefix of the label. +// For example, Label("/skills/AI") returns "/skills/". +func (l Label) Namespace() string { + return l.Type().Prefix() +} + +// Value returns the label value without the namespace prefix. +// For example, Label("/skills/AI/ML") returns "AI/ML". +func (l Label) Value() string { + namespace := l.Namespace() + if namespace == "" { + return string(l) + } + + return strings.TrimPrefix(string(l), namespace) +} + +// LabelMetadata stores temporal information about a label announcement. +// The label itself is stored in the datastore key structure: /skills/AI/CID123/Peer1 +// where the metadata tracks when the label was first announced and last seen. +type LabelMetadata struct { + Timestamp time.Time `json:"timestamp"` // When label was first announced + LastSeen time.Time `json:"last_seen"` // When label was last seen/refreshed +} + +// Validate checks if the metadata is valid and all required fields are properly set. +func (m *LabelMetadata) Validate() error { + if m.Timestamp.IsZero() { + return errors.New("timestamp cannot be zero") + } + + if m.LastSeen.IsZero() { + return errors.New("last seen timestamp cannot be zero") + } + + if m.LastSeen.Before(m.Timestamp) { + return errors.New("last seen cannot be before creation timestamp") + } + + return nil +} + +// IsStale checks if the label is older than the given maximum age duration. +func (m *LabelMetadata) IsStale(maxAge time.Duration) bool { + return time.Since(m.LastSeen) > maxAge +} + +// Age returns how long ago the label was last seen. +func (m *LabelMetadata) Age() time.Duration { + return time.Since(m.LastSeen) +} + +// Update refreshes the LastSeen timestamp to the current time. +func (m *LabelMetadata) Update() { + m.LastSeen = time.Now() +} + +// Constants for label validation and processing. +const ( + // Enhanced format: /type/label/CID/PeerID splits into ["", "type", "label", "CID", "PeerID"] = 5 parts. + MinLabelKeyParts = 5 +) + +// GetLabelsFromRecord extracts labels from a record using the LabelProvider interface. +// This function works at the types interface level, making it usable from any package +// without circular dependencies. +// +// The caller is responsible for wrapping concrete record types (e.g., *corev1.Record) +// with the appropriate adapter before calling this function. +// +// Example: +// +// adapter := adapters.NewRecordAdapter(corev1Record) +// labels := types.GetLabelsFromRecord(adapter) +// +// Returns: +// - []Label: List of all labels extracted from the record +// - nil: If record is nil, has no data, or doesn't implement LabelProvider +func GetLabelsFromRecord(record Record) []Label { + if record == nil { + return nil + } + + recordData, err := record.GetRecordData() + if err != nil { + return nil + } + + if provider, ok := recordData.(LabelProvider); ok { + return provider.GetAllLabels() + } + + return nil +} diff --git a/server/types/label_test.go b/server/types/label_test.go index be0530120..45cb81e8d 100644 --- a/server/types/label_test.go +++ b/server/types/label_test.go @@ -1,153 +1,153 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package types_test - -import ( - "testing" - - corev1 "github.com/agntcy/dir/api/core/v1" - "github.com/agntcy/dir/server/types" - "github.com/agntcy/dir/server/types/adapters" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestGetLabelsFromRecord(t *testing.T) { - t.Run("valid_v1alpha0_record", func(t *testing.T) { - // Create a valid v1alpha0 record JSON - recordJSON := `{ - "name": "test-agent", - "version": "1.0.0", - "schema_version": "v0.3.1", - "authors": ["test"], - "created_at": "2023-01-01T00:00:00Z", - "skills": [ - { - "category_name": "Natural Language Processing", - "category_uid": 1, - "class_name": "Text Completion", - "class_uid": 10201 - } - ], - "locators": [ - { - "type": "docker-image", - "url": "https://example.com/test", - "size": 1000, - "digest": "sha256:abc123" - } - ], - "extensions": [ - { - "name": "schema.oasf.agntcy.org/features/runtime/model", - "version": "v0.0.0", - "data": {} - } - ] - }` - - record, err := corev1.UnmarshalRecord([]byte(recordJSON)) - require.NoError(t, err) - - adapter := adapters.NewRecordAdapter(record) - labels := types.GetLabelsFromRecord(adapter) - require.NotNil(t, labels) - - // Should have at least skill, locator, and module labels - assert.GreaterOrEqual(t, len(labels), 3) - - // Convert to strings for easier assertion - labelStrings := make([]string, len(labels)) - for i, label := range labels { - labelStrings[i] = label.String() - } - - // Check expected labels are present - assert.Contains(t, labelStrings, "/skills/Natural Language Processing/Text Completion") - assert.Contains(t, labelStrings, "/locators/docker-image") - assert.Contains(t, labelStrings, "/modules/runtime/model") // Schema prefix stripped - }) - - t.Run("valid_v1alpha1_record", func(t *testing.T) { - // Create a valid v1alpha1 record JSON - recordJSON := `{ - "name": "test-agent-v2", - "version": "2.0.0", - "schema_version": "0.7.0", - "authors": ["test"], - "created_at": "2023-01-01T00:00:00Z", - "skills": [ - { - "name": "Machine Learning/Classification", - "id": 20301 - } - ], - "domains": [ - { - "name": "healthcare/medical_technology", - "id": 905 - } - ], - "locators": [ - { - "type": "http", - "url": "https://example.com/v2", - "size": 2000, - "digest": "sha256:def456" - } - ], - "modules": [ - { - "name": "security/authentication", - "data": {} - } - ] - }` - - record, err := corev1.UnmarshalRecord([]byte(recordJSON)) - require.NoError(t, err) - - adapter := adapters.NewRecordAdapter(record) - labels := types.GetLabelsFromRecord(adapter) - require.NotNil(t, labels) - - // Should have skill, domain, locator, and module labels - assert.GreaterOrEqual(t, len(labels), 4) - - // Convert to strings for easier assertion - labelStrings := make([]string, len(labels)) - for i, label := range labels { - labelStrings[i] = label.String() - } - - // Check expected labels are present - assert.Contains(t, labelStrings, "/skills/Machine Learning/Classification") - assert.Contains(t, labelStrings, "/domains/healthcare/medical_technology") - assert.Contains(t, labelStrings, "/locators/http") - assert.Contains(t, labelStrings, "/modules/security/authentication") // Direct module name - }) - - t.Run("invalid_record", func(t *testing.T) { - // Create invalid JSON that will fail to unmarshal - invalidJSON := `{"invalid": json}` - - record, err := corev1.UnmarshalRecord([]byte(invalidJSON)) - if err != nil { - // If unmarshaling fails, we can't test GetLabelsFromRecord - t.Skip("Invalid JSON test skipped - unmarshal failed as expected") - - return - } - - adapter := adapters.NewRecordAdapter(record) - labels := types.GetLabelsFromRecord(adapter) - // Should handle gracefully and return nil or empty slice - assert.Empty(t, labels) - }) - - t.Run("nil_record", func(t *testing.T) { - labels := types.GetLabelsFromRecord(nil) - assert.Nil(t, labels) - }) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package types_test + +import ( + "testing" + + corev1 "github.com/agntcy/dir/api/core/v1" + "github.com/agntcy/dir/server/types" + "github.com/agntcy/dir/server/types/adapters" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGetLabelsFromRecord(t *testing.T) { + t.Run("valid_v1alpha0_record", func(t *testing.T) { + // Create a valid v1alpha0 record JSON + recordJSON := `{ + "name": "test-agent", + "version": "1.0.0", + "schema_version": "v0.3.1", + "authors": ["test"], + "created_at": "2023-01-01T00:00:00Z", + "skills": [ + { + "category_name": "Natural Language Processing", + "category_uid": 1, + "class_name": "Text Completion", + "class_uid": 10201 + } + ], + "locators": [ + { + "type": "docker-image", + "url": "https://example.com/test", + "size": 1000, + "digest": "sha256:abc123" + } + ], + "extensions": [ + { + "name": "schema.oasf.agntcy.org/features/runtime/model", + "version": "v0.0.0", + "data": {} + } + ] + }` + + record, err := corev1.UnmarshalRecord([]byte(recordJSON)) + require.NoError(t, err) + + adapter := adapters.NewRecordAdapter(record) + labels := types.GetLabelsFromRecord(adapter) + require.NotNil(t, labels) + + // Should have at least skill, locator, and module labels + assert.GreaterOrEqual(t, len(labels), 3) + + // Convert to strings for easier assertion + labelStrings := make([]string, len(labels)) + for i, label := range labels { + labelStrings[i] = label.String() + } + + // Check expected labels are present + assert.Contains(t, labelStrings, "/skills/Natural Language Processing/Text Completion") + assert.Contains(t, labelStrings, "/locators/docker-image") + assert.Contains(t, labelStrings, "/modules/runtime/model") // Schema prefix stripped + }) + + t.Run("valid_v1alpha1_record", func(t *testing.T) { + // Create a valid v1alpha1 record JSON + recordJSON := `{ + "name": "test-agent-v2", + "version": "2.0.0", + "schema_version": "0.7.0", + "authors": ["test"], + "created_at": "2023-01-01T00:00:00Z", + "skills": [ + { + "name": "Machine Learning/Classification", + "id": 20301 + } + ], + "domains": [ + { + "name": "healthcare/medical_technology", + "id": 905 + } + ], + "locators": [ + { + "type": "http", + "url": "https://example.com/v2", + "size": 2000, + "digest": "sha256:def456" + } + ], + "modules": [ + { + "name": "security/authentication", + "data": {} + } + ] + }` + + record, err := corev1.UnmarshalRecord([]byte(recordJSON)) + require.NoError(t, err) + + adapter := adapters.NewRecordAdapter(record) + labels := types.GetLabelsFromRecord(adapter) + require.NotNil(t, labels) + + // Should have skill, domain, locator, and module labels + assert.GreaterOrEqual(t, len(labels), 4) + + // Convert to strings for easier assertion + labelStrings := make([]string, len(labels)) + for i, label := range labels { + labelStrings[i] = label.String() + } + + // Check expected labels are present + assert.Contains(t, labelStrings, "/skills/Machine Learning/Classification") + assert.Contains(t, labelStrings, "/domains/healthcare/medical_technology") + assert.Contains(t, labelStrings, "/locators/http") + assert.Contains(t, labelStrings, "/modules/security/authentication") // Direct module name + }) + + t.Run("invalid_record", func(t *testing.T) { + // Create invalid JSON that will fail to unmarshal + invalidJSON := `{"invalid": json}` + + record, err := corev1.UnmarshalRecord([]byte(invalidJSON)) + if err != nil { + // If unmarshaling fails, we can't test GetLabelsFromRecord + t.Skip("Invalid JSON test skipped - unmarshal failed as expected") + + return + } + + adapter := adapters.NewRecordAdapter(record) + labels := types.GetLabelsFromRecord(adapter) + // Should handle gracefully and return nil or empty slice + assert.Empty(t, labels) + }) + + t.Run("nil_record", func(t *testing.T) { + labels := types.GetLabelsFromRecord(nil) + assert.Nil(t, labels) + }) +} diff --git a/server/types/publication.go b/server/types/publication.go index 443199a4a..7844087b0 100644 --- a/server/types/publication.go +++ b/server/types/publication.go @@ -1,16 +1,16 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package types - -import ( - routingv1 "github.com/agntcy/dir/api/routing/v1" -) - -type PublicationObject interface { - GetID() string - GetRequest() *routingv1.PublishRequest - GetStatus() routingv1.PublicationStatus - GetCreatedTime() string - GetLastUpdateTime() string -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package types + +import ( + routingv1 "github.com/agntcy/dir/api/routing/v1" +) + +type PublicationObject interface { + GetID() string + GetRequest() *routingv1.PublishRequest + GetStatus() routingv1.PublicationStatus + GetCreatedTime() string + GetLastUpdateTime() string +} diff --git a/server/types/record.go b/server/types/record.go index 8b10493d0..9876f1b36 100644 --- a/server/types/record.go +++ b/server/types/record.go @@ -1,87 +1,87 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package types - -type Record interface { - GetCid() string - GetRecordData() (RecordData, error) -} - -type RecordMeta interface { - GetCid() string - GetAnnotations() map[string]string - GetSchemaVersion() string - GetCreatedAt() string -} - -type RecordRef interface { - GetCid() string -} - -// Core abstraction interfaces. -// -//nolint:interfacebloat // RecordData is a cohesive interface for all record data operations -type RecordData interface { - GetAnnotations() map[string]string - GetSchemaVersion() string - GetName() string - GetVersion() string - GetDescription() string - GetAuthors() []string - GetCreatedAt() string - GetSkills() []Skill - GetLocators() []Locator - GetDomains() []Domain - GetModules() []Module - GetSignature() Signature - GetPreviousRecordCid() string -} - -type Signature interface { - GetAnnotations() map[string]string - GetSignedAt() string - GetAlgorithm() string - GetSignature() string - GetCertificate() string - GetContentType() string - GetContentBundle() string -} - -type Module interface { - GetName() string - GetID() uint64 - GetData() map[string]any -} - -//nolint:iface -type Skill interface { - GetAnnotations() map[string]string - GetName() string - GetID() uint64 -} - -//nolint:iface -type Domain interface { - GetAnnotations() map[string]string - GetName() string - GetID() uint64 -} - -type Locator interface { - GetAnnotations() map[string]string - GetType() string - GetURL() string - GetSize() uint64 - GetDigest() string -} - -// LabelProvider provides routing labels for different record components. -// Implementations should handle version-specific label generation logic. -type LabelProvider interface { - GetSkillLabels() []Label - GetDomainLabels() []Label - GetModuleLabels() []Label - GetLocatorLabels() []Label - GetAllLabels() []Label -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package types + +type Record interface { + GetCid() string + GetRecordData() (RecordData, error) +} + +type RecordMeta interface { + GetCid() string + GetAnnotations() map[string]string + GetSchemaVersion() string + GetCreatedAt() string +} + +type RecordRef interface { + GetCid() string +} + +// Core abstraction interfaces. +// +//nolint:interfacebloat // RecordData is a cohesive interface for all record data operations +type RecordData interface { + GetAnnotations() map[string]string + GetSchemaVersion() string + GetName() string + GetVersion() string + GetDescription() string + GetAuthors() []string + GetCreatedAt() string + GetSkills() []Skill + GetLocators() []Locator + GetDomains() []Domain + GetModules() []Module + GetSignature() Signature + GetPreviousRecordCid() string +} + +type Signature interface { + GetAnnotations() map[string]string + GetSignedAt() string + GetAlgorithm() string + GetSignature() string + GetCertificate() string + GetContentType() string + GetContentBundle() string +} + +type Module interface { + GetName() string + GetID() uint64 + GetData() map[string]any +} + +//nolint:iface +type Skill interface { + GetAnnotations() map[string]string + GetName() string + GetID() uint64 +} + +//nolint:iface +type Domain interface { + GetAnnotations() map[string]string + GetName() string + GetID() uint64 +} + +type Locator interface { + GetAnnotations() map[string]string + GetType() string + GetURL() string + GetSize() uint64 + GetDigest() string +} + +// LabelProvider provides routing labels for different record components. +// Implementations should handle version-specific label generation logic. +type LabelProvider interface { + GetSkillLabels() []Label + GetDomainLabels() []Label + GetModuleLabels() []Label + GetLocatorLabels() []Label + GetAllLabels() []Label +} diff --git a/server/types/routing.go b/server/types/routing.go index 61cdb94af..ccf7bcacb 100644 --- a/server/types/routing.go +++ b/server/types/routing.go @@ -1,43 +1,43 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package types - -import ( - "context" - - routingv1 "github.com/agntcy/dir/api/routing/v1" - "github.com/libp2p/go-libp2p/core/peer" -) - -type Peer = peer.AddrInfo - -// RoutingAPI handles management of the routing layer. -type RoutingAPI interface { - // Publish record to the network - // The caller must wrap concrete record types (e.g. *corev1.Record) with adapters.NewRecordAdapter() - Publish(context.Context, Record) error - - // List all records that this peer is currently providing (local-only operation) - List(context.Context, *routingv1.ListRequest) (<-chan *routingv1.ListResponse, error) - - // Search for records across the network using cached remote announcements - Search(context.Context, *routingv1.SearchRequest) (<-chan *routingv1.SearchResponse, error) - - // Unpublish record from the network - // The caller must wrap concrete record types (e.g. *corev1.Record) with adapters.NewRecordAdapter() - Unpublish(context.Context, Record) error - - // Stop stops the routing services and releases resources - // Should be called during server shutdown for graceful cleanup - Stop() error - - // IsReady checks if the routing subsystem is ready to serve traffic. - IsReady(context.Context) bool -} - -// PublicationAPI handles management of publication tasks. -type PublicationAPI interface { - // CreatePublication creates a new publication task to be processed. - CreatePublication(context.Context, *routingv1.PublishRequest) (string, error) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package types + +import ( + "context" + + routingv1 "github.com/agntcy/dir/api/routing/v1" + "github.com/libp2p/go-libp2p/core/peer" +) + +type Peer = peer.AddrInfo + +// RoutingAPI handles management of the routing layer. +type RoutingAPI interface { + // Publish record to the network + // The caller must wrap concrete record types (e.g. *corev1.Record) with adapters.NewRecordAdapter() + Publish(context.Context, Record) error + + // List all records that this peer is currently providing (local-only operation) + List(context.Context, *routingv1.ListRequest) (<-chan *routingv1.ListResponse, error) + + // Search for records across the network using cached remote announcements + Search(context.Context, *routingv1.SearchRequest) (<-chan *routingv1.SearchResponse, error) + + // Unpublish record from the network + // The caller must wrap concrete record types (e.g. *corev1.Record) with adapters.NewRecordAdapter() + Unpublish(context.Context, Record) error + + // Stop stops the routing services and releases resources + // Should be called during server shutdown for graceful cleanup + Stop() error + + // IsReady checks if the routing subsystem is ready to serve traffic. + IsReady(context.Context) bool +} + +// PublicationAPI handles management of publication tasks. +type PublicationAPI interface { + // CreatePublication creates a new publication task to be processed. + CreatePublication(context.Context, *routingv1.PublishRequest) (string, error) +} diff --git a/server/types/search.go b/server/types/search.go index 4ad6ae49d..dabaa081b 100644 --- a/server/types/search.go +++ b/server/types/search.go @@ -1,129 +1,129 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package types - -type RecordFilters struct { - Limit int - Offset int - Names []string - Versions []string - SkillIDs []uint64 - SkillNames []string - LocatorTypes []string - LocatorURLs []string - ModuleNames []string - ModuleIDs []uint64 - DomainIDs []uint64 - DomainNames []string - CreatedAts []string - Authors []string - SchemaVersions []string -} - -type FilterOption func(*RecordFilters) - -// WithLimit sets the maximum number of records to return. -func WithLimit(limit int) FilterOption { - return func(sc *RecordFilters) { - sc.Limit = limit - } -} - -// WithOffset sets pagination offset. -func WithOffset(offset int) FilterOption { - return func(sc *RecordFilters) { - sc.Offset = offset - } -} - -// WithNames filters records by name patterns. -func WithNames(names ...string) FilterOption { - return func(sc *RecordFilters) { - sc.Names = append(sc.Names, names...) - } -} - -// WithVersions filters records by version patterns. -func WithVersions(versions ...string) FilterOption { - return func(sc *RecordFilters) { - sc.Versions = append(sc.Versions, versions...) - } -} - -// WithSkillIDs RecordFilters records by skill IDs. -func WithSkillIDs(ids ...uint64) FilterOption { - return func(sc *RecordFilters) { - sc.SkillIDs = append(sc.SkillIDs, ids...) - } -} - -// WithSkillNames RecordFilters records by skill names. -func WithSkillNames(names ...string) FilterOption { - return func(sc *RecordFilters) { - sc.SkillNames = append(sc.SkillNames, names...) - } -} - -// WithLocatorTypes RecordFilters records by locator types. -func WithLocatorTypes(types ...string) FilterOption { - return func(sc *RecordFilters) { - sc.LocatorTypes = append(sc.LocatorTypes, types...) - } -} - -// WithLocatorURLs RecordFilters records by locator URLs. -func WithLocatorURLs(urls ...string) FilterOption { - return func(sc *RecordFilters) { - sc.LocatorURLs = append(sc.LocatorURLs, urls...) - } -} - -// WithModuleNames RecordFilters records by module names. -func WithModuleNames(names ...string) FilterOption { - return func(sc *RecordFilters) { - sc.ModuleNames = append(sc.ModuleNames, names...) - } -} - -// WithDomainIDs filters records by domain IDs. -func WithDomainIDs(ids ...uint64) FilterOption { - return func(sc *RecordFilters) { - sc.DomainIDs = append(sc.DomainIDs, ids...) - } -} - -// WithDomainNames filters records by domain names. -func WithDomainNames(names ...string) FilterOption { - return func(sc *RecordFilters) { - sc.DomainNames = append(sc.DomainNames, names...) - } -} - -// WithCreatedAts filters records by created_at timestamp patterns. -func WithCreatedAts(createdAts ...string) FilterOption { - return func(sc *RecordFilters) { - sc.CreatedAts = append(sc.CreatedAts, createdAts...) - } -} - -// WithAuthors filters records by author names. -func WithAuthors(names ...string) FilterOption { - return func(sc *RecordFilters) { - sc.Authors = append(sc.Authors, names...) - } -} - -// WithSchemaVersions filters records by schema version patterns. -func WithSchemaVersions(versions ...string) FilterOption { - return func(sc *RecordFilters) { - sc.SchemaVersions = append(sc.SchemaVersions, versions...) - } -} - -// WithModuleIDs filters records by module IDs. -func WithModuleIDs(ids ...uint64) FilterOption { - return func(sc *RecordFilters) { - sc.ModuleIDs = append(sc.ModuleIDs, ids...) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package types + +type RecordFilters struct { + Limit int + Offset int + Names []string + Versions []string + SkillIDs []uint64 + SkillNames []string + LocatorTypes []string + LocatorURLs []string + ModuleNames []string + ModuleIDs []uint64 + DomainIDs []uint64 + DomainNames []string + CreatedAts []string + Authors []string + SchemaVersions []string +} + +type FilterOption func(*RecordFilters) + +// WithLimit sets the maximum number of records to return. +func WithLimit(limit int) FilterOption { + return func(sc *RecordFilters) { + sc.Limit = limit + } +} + +// WithOffset sets pagination offset. +func WithOffset(offset int) FilterOption { + return func(sc *RecordFilters) { + sc.Offset = offset + } +} + +// WithNames filters records by name patterns. +func WithNames(names ...string) FilterOption { + return func(sc *RecordFilters) { + sc.Names = append(sc.Names, names...) + } +} + +// WithVersions filters records by version patterns. +func WithVersions(versions ...string) FilterOption { + return func(sc *RecordFilters) { + sc.Versions = append(sc.Versions, versions...) + } +} + +// WithSkillIDs RecordFilters records by skill IDs. +func WithSkillIDs(ids ...uint64) FilterOption { + return func(sc *RecordFilters) { + sc.SkillIDs = append(sc.SkillIDs, ids...) + } +} + +// WithSkillNames RecordFilters records by skill names. +func WithSkillNames(names ...string) FilterOption { + return func(sc *RecordFilters) { + sc.SkillNames = append(sc.SkillNames, names...) + } +} + +// WithLocatorTypes RecordFilters records by locator types. +func WithLocatorTypes(types ...string) FilterOption { + return func(sc *RecordFilters) { + sc.LocatorTypes = append(sc.LocatorTypes, types...) + } +} + +// WithLocatorURLs RecordFilters records by locator URLs. +func WithLocatorURLs(urls ...string) FilterOption { + return func(sc *RecordFilters) { + sc.LocatorURLs = append(sc.LocatorURLs, urls...) + } +} + +// WithModuleNames RecordFilters records by module names. +func WithModuleNames(names ...string) FilterOption { + return func(sc *RecordFilters) { + sc.ModuleNames = append(sc.ModuleNames, names...) + } +} + +// WithDomainIDs filters records by domain IDs. +func WithDomainIDs(ids ...uint64) FilterOption { + return func(sc *RecordFilters) { + sc.DomainIDs = append(sc.DomainIDs, ids...) + } +} + +// WithDomainNames filters records by domain names. +func WithDomainNames(names ...string) FilterOption { + return func(sc *RecordFilters) { + sc.DomainNames = append(sc.DomainNames, names...) + } +} + +// WithCreatedAts filters records by created_at timestamp patterns. +func WithCreatedAts(createdAts ...string) FilterOption { + return func(sc *RecordFilters) { + sc.CreatedAts = append(sc.CreatedAts, createdAts...) + } +} + +// WithAuthors filters records by author names. +func WithAuthors(names ...string) FilterOption { + return func(sc *RecordFilters) { + sc.Authors = append(sc.Authors, names...) + } +} + +// WithSchemaVersions filters records by schema version patterns. +func WithSchemaVersions(versions ...string) FilterOption { + return func(sc *RecordFilters) { + sc.SchemaVersions = append(sc.SchemaVersions, versions...) + } +} + +// WithModuleIDs filters records by module IDs. +func WithModuleIDs(ids ...uint64) FilterOption { + return func(sc *RecordFilters) { + sc.ModuleIDs = append(sc.ModuleIDs, ids...) + } +} diff --git a/server/types/store.go b/server/types/store.go index 4c053d32a..a7536ce5a 100644 --- a/server/types/store.go +++ b/server/types/store.go @@ -1,64 +1,64 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package types - -import ( - "context" - - corev1 "github.com/agntcy/dir/api/core/v1" -) - -// StoreAPI handles management of content-addressable object storage. -type StoreAPI interface { - // Push record to content store - Push(context.Context, *corev1.Record) (*corev1.RecordRef, error) - - // Pull record from content store - Pull(context.Context, *corev1.RecordRef) (*corev1.Record, error) - - // Lookup metadata about the record from reference - Lookup(context.Context, *corev1.RecordRef) (*corev1.RecordMeta, error) - - // Delete the record - Delete(context.Context, *corev1.RecordRef) error - - // List all available records - // Needed for bootstrapping - // List(context.Context, func(*corev1.RecordRef) error) error - - // IsReady checks if the storage backend is ready to serve traffic. - IsReady(context.Context) bool -} - -// ReferrerStoreAPI handles management of generic record referrers. -// This implements the OCI Referrers API for attaching artifacts to records. -// -// Implementations: oci.Store -// Used by: store.Controller, sync.Monitor. -type ReferrerStoreAPI interface { - // PushReferrer pushes a referrer to content store - PushReferrer(context.Context, string, *corev1.RecordReferrer) error - - // WalkReferrers walks referrers individually for a given record CID and optional type filter - WalkReferrers(ctx context.Context, recordCID string, referrerType string, walkFn func(*corev1.RecordReferrer) error) error -} - -// VerifierStore provides signature verification using Zot registry. -// This is implemented by OCI-backed stores that have access to a Zot registry -// with cosign/notation signature support. -// -// Implementations: oci.Store (when using Zot registry) -// Used by: sign.Controller. -type VerifierStore interface { - // VerifyWithZot verifies a record signature using Zot registry GraphQL API - VerifyWithZot(ctx context.Context, recordCID string) (bool, error) -} - -// FullStore is the complete store interface with all optional capabilities. -// This is what the OCI store implementation provides. -type FullStore interface { - StoreAPI - ReferrerStoreAPI - VerifierStore -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package types + +import ( + "context" + + corev1 "github.com/agntcy/dir/api/core/v1" +) + +// StoreAPI handles management of content-addressable object storage. +type StoreAPI interface { + // Push record to content store + Push(context.Context, *corev1.Record) (*corev1.RecordRef, error) + + // Pull record from content store + Pull(context.Context, *corev1.RecordRef) (*corev1.Record, error) + + // Lookup metadata about the record from reference + Lookup(context.Context, *corev1.RecordRef) (*corev1.RecordMeta, error) + + // Delete the record + Delete(context.Context, *corev1.RecordRef) error + + // List all available records + // Needed for bootstrapping + // List(context.Context, func(*corev1.RecordRef) error) error + + // IsReady checks if the storage backend is ready to serve traffic. + IsReady(context.Context) bool +} + +// ReferrerStoreAPI handles management of generic record referrers. +// This implements the OCI Referrers API for attaching artifacts to records. +// +// Implementations: oci.Store +// Used by: store.Controller, sync.Monitor. +type ReferrerStoreAPI interface { + // PushReferrer pushes a referrer to content store + PushReferrer(context.Context, string, *corev1.RecordReferrer) error + + // WalkReferrers walks referrers individually for a given record CID and optional type filter + WalkReferrers(ctx context.Context, recordCID string, referrerType string, walkFn func(*corev1.RecordReferrer) error) error +} + +// VerifierStore provides signature verification using Zot registry. +// This is implemented by OCI-backed stores that have access to a Zot registry +// with cosign/notation signature support. +// +// Implementations: oci.Store (when using Zot registry) +// Used by: sign.Controller. +type VerifierStore interface { + // VerifyWithZot verifies a record signature using Zot registry GraphQL API + VerifyWithZot(ctx context.Context, recordCID string) (bool, error) +} + +// FullStore is the complete store interface with all optional capabilities. +// This is what the OCI store implementation provides. +type FullStore interface { + StoreAPI + ReferrerStoreAPI + VerifierStore +} diff --git a/server/types/sync.go b/server/types/sync.go index eaad70608..8e47c5ab6 100644 --- a/server/types/sync.go +++ b/server/types/sync.go @@ -1,13 +1,13 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package types - -import storev1 "github.com/agntcy/dir/api/store/v1" - -type SyncObject interface { - GetID() string - GetRemoteDirectoryURL() string - GetCIDs() []string - GetStatus() storev1.SyncStatus -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package types + +import storev1 "github.com/agntcy/dir/api/store/v1" + +type SyncObject interface { + GetID() string + GetRemoteDirectoryURL() string + GetCIDs() []string + GetStatus() storev1.SyncStatus +} diff --git a/utils/cosign/keys.go b/utils/cosign/keys.go index eb9f4b577..338d032dc 100644 --- a/utils/cosign/keys.go +++ b/utils/cosign/keys.go @@ -1,159 +1,159 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package cosign - -import ( - "context" - "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - _ "crypto/sha512" // if user chooses SHA2-384 or SHA2-512 for hash - "encoding/base64" - "errors" - "fmt" - "io" - "os" - - "github.com/sigstore/cosign/v3/pkg/cosign" - "github.com/sigstore/cosign/v3/pkg/cosign/env" - protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" - "github.com/sigstore/sigstore/pkg/cryptoutils" -) - -type KeypairOptions struct { - Hint []byte -} - -type Keypair struct { - options *KeypairOptions - privateKey crypto.Signer - hashAlgorithm protocommon.HashAlgorithm -} - -func LoadKeypair(privateKeyBytes []byte, pw []byte) (*Keypair, error) { - if len(privateKeyBytes) == 0 { - return nil, errors.New("private key bytes cannot be empty") - } - - privateKey, err := cryptoutils.UnmarshalPEMToPrivateKey( - privateKeyBytes, - cryptoutils.StaticPasswordFunc(pw), - ) - if err != nil { - return nil, fmt.Errorf("unmarshal PEM to private key: %w", err) - } - - // Get public key from the private key - v, err := cosign.LoadPrivateKey(privateKeyBytes, pw, nil) - if err != nil { - return nil, fmt.Errorf("failed to load private key: %w", err) - } - - pubKey, err := v.PublicKey() - if err != nil { - return nil, fmt.Errorf("failed to get public key: %w", err) - } - - // Derive the hint from the public key - pubKeyBytes, err := cryptoutils.MarshalPublicKeyToDER(pubKey) - if err != nil { - return nil, fmt.Errorf("failed to marshal public key: %w", err) - } - - opts := &KeypairOptions{ - Hint: GenerateHintFromPublicKey(pubKeyBytes), - } - - signer, ok := privateKey.(crypto.Signer) - if !ok { - return nil, errors.New("private key does not implement crypto.Signer") - } - - return &Keypair{ - options: opts, - privateKey: signer, - hashAlgorithm: protocommon.HashAlgorithm_SHA2_256, - }, nil -} - -func (e *Keypair) GetHashAlgorithm() protocommon.HashAlgorithm { - return e.hashAlgorithm -} - -func (e *Keypair) GetHint() []byte { - return e.options.Hint -} - -func (e *Keypair) GetKeyAlgorithm() string { - switch pubKey := e.privateKey.Public().(type) { - case *rsa.PublicKey: - return "RSA" - case *ecdsa.PublicKey: - switch pubKey.Curve.Params().Name { - case "P-256": - return "ECDSA-P256" - case "P-384": - return "ECDSA-P384" - case "P-521": - return "ECDSA-P521" - default: - return "ECDSA" - } - case ed25519.PublicKey: - return "Ed25519" - default: - return "Unknown" - } -} - -func (e *Keypair) GetPublicKeyPem() (string, error) { - pubKeyBytes, err := cryptoutils.MarshalPublicKeyToPEM(e.privateKey.Public()) - if err != nil { - return "", fmt.Errorf("failed to marshal public key to PEM: %w", err) - } - - return string(pubKeyBytes), nil -} - -func (e *Keypair) SignData(_ context.Context, data []byte) ([]byte, []byte, error) { - hasher := crypto.SHA256.New() - hasher.Write(data) - digest := hasher.Sum(nil) - - signature, err := e.privateKey.Sign(rand.Reader, digest, crypto.SHA256) - if err != nil { - return nil, nil, fmt.Errorf("failed to sign data: %w", err) - } - - return signature, digest, nil -} - -func GenerateHintFromPublicKey(pubKey []byte) []byte { - hashedBytes := sha256.Sum256(pubKey) - - return []byte(base64.StdEncoding.EncodeToString(hashedBytes[:])) -} - -func ReadPrivateKeyPassword() func() ([]byte, error) { - pw, ok := env.LookupEnv(env.VariablePassword) - - switch { - case ok: - return func() ([]byte, error) { - return []byte(pw), nil - } - case cosign.IsTerminal(): - return func() ([]byte, error) { - return cosign.GetPassFromTerm(true) - } - // Handle piped in passwords. - default: - return func() ([]byte, error) { - return io.ReadAll(os.Stdin) - } - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package cosign + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + _ "crypto/sha512" // if user chooses SHA2-384 or SHA2-512 for hash + "encoding/base64" + "errors" + "fmt" + "io" + "os" + + "github.com/sigstore/cosign/v3/pkg/cosign" + "github.com/sigstore/cosign/v3/pkg/cosign/env" + protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +type KeypairOptions struct { + Hint []byte +} + +type Keypair struct { + options *KeypairOptions + privateKey crypto.Signer + hashAlgorithm protocommon.HashAlgorithm +} + +func LoadKeypair(privateKeyBytes []byte, pw []byte) (*Keypair, error) { + if len(privateKeyBytes) == 0 { + return nil, errors.New("private key bytes cannot be empty") + } + + privateKey, err := cryptoutils.UnmarshalPEMToPrivateKey( + privateKeyBytes, + cryptoutils.StaticPasswordFunc(pw), + ) + if err != nil { + return nil, fmt.Errorf("unmarshal PEM to private key: %w", err) + } + + // Get public key from the private key + v, err := cosign.LoadPrivateKey(privateKeyBytes, pw, nil) + if err != nil { + return nil, fmt.Errorf("failed to load private key: %w", err) + } + + pubKey, err := v.PublicKey() + if err != nil { + return nil, fmt.Errorf("failed to get public key: %w", err) + } + + // Derive the hint from the public key + pubKeyBytes, err := cryptoutils.MarshalPublicKeyToDER(pubKey) + if err != nil { + return nil, fmt.Errorf("failed to marshal public key: %w", err) + } + + opts := &KeypairOptions{ + Hint: GenerateHintFromPublicKey(pubKeyBytes), + } + + signer, ok := privateKey.(crypto.Signer) + if !ok { + return nil, errors.New("private key does not implement crypto.Signer") + } + + return &Keypair{ + options: opts, + privateKey: signer, + hashAlgorithm: protocommon.HashAlgorithm_SHA2_256, + }, nil +} + +func (e *Keypair) GetHashAlgorithm() protocommon.HashAlgorithm { + return e.hashAlgorithm +} + +func (e *Keypair) GetHint() []byte { + return e.options.Hint +} + +func (e *Keypair) GetKeyAlgorithm() string { + switch pubKey := e.privateKey.Public().(type) { + case *rsa.PublicKey: + return "RSA" + case *ecdsa.PublicKey: + switch pubKey.Curve.Params().Name { + case "P-256": + return "ECDSA-P256" + case "P-384": + return "ECDSA-P384" + case "P-521": + return "ECDSA-P521" + default: + return "ECDSA" + } + case ed25519.PublicKey: + return "Ed25519" + default: + return "Unknown" + } +} + +func (e *Keypair) GetPublicKeyPem() (string, error) { + pubKeyBytes, err := cryptoutils.MarshalPublicKeyToPEM(e.privateKey.Public()) + if err != nil { + return "", fmt.Errorf("failed to marshal public key to PEM: %w", err) + } + + return string(pubKeyBytes), nil +} + +func (e *Keypair) SignData(_ context.Context, data []byte) ([]byte, []byte, error) { + hasher := crypto.SHA256.New() + hasher.Write(data) + digest := hasher.Sum(nil) + + signature, err := e.privateKey.Sign(rand.Reader, digest, crypto.SHA256) + if err != nil { + return nil, nil, fmt.Errorf("failed to sign data: %w", err) + } + + return signature, digest, nil +} + +func GenerateHintFromPublicKey(pubKey []byte) []byte { + hashedBytes := sha256.Sum256(pubKey) + + return []byte(base64.StdEncoding.EncodeToString(hashedBytes[:])) +} + +func ReadPrivateKeyPassword() func() ([]byte, error) { + pw, ok := env.LookupEnv(env.VariablePassword) + + switch { + case ok: + return func() ([]byte, error) { + return []byte(pw), nil + } + case cosign.IsTerminal(): + return func() ([]byte, error) { + return cosign.GetPassFromTerm(true) + } + // Handle piped in passwords. + default: + return func() ([]byte, error) { + return io.ReadAll(os.Stdin) + } + } +} diff --git a/utils/cosign/payload.go b/utils/cosign/payload.go index 9ccb427c9..3480161b6 100644 --- a/utils/cosign/payload.go +++ b/utils/cosign/payload.go @@ -1,38 +1,38 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package cosign - -import ( - "encoding/json" - "fmt" -) - -type Payload struct { - Critical Critical `json:"critical"` -} - -type Critical struct { - Image Image `json:"image"` -} - -type Image struct { - DockerManifestDigest string `json:"docker-manifest-digest"` -} - -func GeneratePayload(digest string) ([]byte, error) { - payload := &Payload{ - Critical: Critical{ - Image: Image{ - DockerManifestDigest: digest, - }, - }, - } - - payloadBytes, err := json.Marshal(payload) - if err != nil { - return nil, fmt.Errorf("failed to marshal payload: %w", err) - } - - return payloadBytes, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package cosign + +import ( + "encoding/json" + "fmt" +) + +type Payload struct { + Critical Critical `json:"critical"` +} + +type Critical struct { + Image Image `json:"image"` +} + +type Image struct { + DockerManifestDigest string `json:"docker-manifest-digest"` +} + +func GeneratePayload(digest string) ([]byte, error) { + payload := &Payload{ + Critical: Critical{ + Image: Image{ + DockerManifestDigest: digest, + }, + }, + } + + payloadBytes, err := json.Marshal(payload) + if err != nil { + return nil, fmt.Errorf("failed to marshal payload: %w", err) + } + + return payloadBytes, nil +} diff --git a/utils/cosign/sign.go b/utils/cosign/sign.go index a9c143ca9..dfbdb7177 100644 --- a/utils/cosign/sign.go +++ b/utils/cosign/sign.go @@ -1,325 +1,325 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package cosign - -import ( - "bytes" - "context" - "encoding/base64" - "fmt" - "os" - "os/exec" - "time" - - "github.com/google/go-containerregistry/pkg/authn" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote" - "github.com/sigstore/cosign/v3/pkg/cosign" - "github.com/sigstore/cosign/v3/pkg/oci/mutate" - ociremote "github.com/sigstore/cosign/v3/pkg/oci/remote" - "github.com/sigstore/cosign/v3/pkg/oci/static" - v1 "github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1" - "github.com/sigstore/sigstore-go/pkg/root" - "github.com/sigstore/sigstore-go/pkg/sign" - "github.com/sigstore/sigstore/pkg/cryptoutils" -) - -const ( - DefaultFulcioURL = "https://fulcio.sigstage.dev" - DefaultRekorURL = "https://rekor.sigstage.dev" - DefaultTimestampURL = "https://timestamp.sigstage.dev/api/v1/timestamp" - DefaultOIDCProviderURL = "https://oauth2.sigstage.dev/auth" - DefaultOIDCClientID = "sigstore" - - DefaultFulcioTimeout = 30 * time.Second - DefaultTimestampAuthorityTimeout = 30 * time.Second - DefaultRekorTimeout = 90 * time.Second -) - -// SetOrDefault returns the value if it's not empty, otherwise returns the default value. -func SetOrDefault(value string, defaultValue string) string { - if value == "" { - value = defaultValue - } - - return value -} - -// SignBlobOIDCOptions contains options for OIDC-based blob signing. -type SignBlobOIDCOptions struct { - Payload []byte - IDToken string - FulcioURL string - RekorURL string - TimestampURL string - OIDCProviderURL string -} - -// SignBlobOIDCResult contains the result of OIDC blob signing. -type SignBlobOIDCResult struct { - Signature string - PublicKey string -} - -// SignBlobWithOIDC signs a blob using OIDC authentication. -func SignBlobWithOIDC(_ context.Context, opts *SignBlobOIDCOptions) (*SignBlobOIDCResult, error) { - // Load signing options. - var signOpts sign.BundleOptions - { - // Define config to use for signing. - signingConfig, err := root.NewSigningConfig( - root.SigningConfigMediaType02, - // Fulcio URLs - []root.Service{ - { - URL: setOrDefault(opts.FulcioURL, DefaultFulcioURL), - MajorAPIVersion: 1, - ValidityPeriodStart: time.Now().Add(-time.Hour), - ValidityPeriodEnd: time.Now().Add(time.Hour), - }, - }, - // OIDC Provider URLs - // Usage and requirements: https://docs.sigstore.dev/certificate_authority/oidc-in-fulcio/ - []root.Service{ - { - URL: setOrDefault(opts.OIDCProviderURL, DefaultOIDCProviderURL), - MajorAPIVersion: 1, - ValidityPeriodStart: time.Now().Add(-time.Hour), - ValidityPeriodEnd: time.Now().Add(time.Hour), - }, - }, - // Rekor URLs - []root.Service{ - { - URL: setOrDefault(opts.RekorURL, DefaultRekorURL), - MajorAPIVersion: 1, - ValidityPeriodStart: time.Now().Add(-time.Hour), - ValidityPeriodEnd: time.Now().Add(time.Hour), - }, - }, - root.ServiceConfiguration{ - Selector: v1.ServiceSelector_ANY, - }, - []root.Service{ - { - URL: setOrDefault(opts.TimestampURL, DefaultTimestampURL), - MajorAPIVersion: 1, - ValidityPeriodStart: time.Now().Add(-time.Hour), - ValidityPeriodEnd: time.Now().Add(time.Hour), - }, - }, - root.ServiceConfiguration{ - Selector: v1.ServiceSelector_ANY, - }, - ) - if err != nil { - return nil, fmt.Errorf("failed to create signing config: %w", err) - } - - // Use fulcio to sign. - fulcioURL, err := root.SelectService(signingConfig.FulcioCertificateAuthorityURLs(), []uint32{1}, time.Now()) - if err != nil { - return nil, fmt.Errorf("failed to select fulcio URL: %w", err) - } - - fulcioOpts := &sign.FulcioOptions{ - BaseURL: fulcioURL.URL, - Timeout: DefaultFulcioTimeout, - Retries: 1, - } - signOpts.CertificateProvider = sign.NewFulcio(fulcioOpts) - signOpts.CertificateProviderOptions = &sign.CertificateProviderOptions{ - IDToken: opts.IDToken, - } - - // Use timestamp authortiy to sign. - tsaURLs, err := root.SelectServices(signingConfig.TimestampAuthorityURLs(), - signingConfig.TimestampAuthorityURLsConfig(), []uint32{1}, time.Now()) - if err != nil { - return nil, fmt.Errorf("failed to select timestamp authority URL: %w", err) - } - - for _, tsaURL := range tsaURLs { - tsaOpts := &sign.TimestampAuthorityOptions{ - URL: tsaURL.URL, - Timeout: DefaultTimestampAuthorityTimeout, - Retries: 1, - } - signOpts.TimestampAuthorities = append(signOpts.TimestampAuthorities, sign.NewTimestampAuthority(tsaOpts)) - } - - // Use rekor to sign. - rekorURLs, err := root.SelectServices(signingConfig.RekorLogURLs(), - signingConfig.RekorLogURLsConfig(), []uint32{1}, time.Now()) - if err != nil { - return nil, fmt.Errorf("failed to select rekor URL: %w", err) - } - - for _, rekorURL := range rekorURLs { - rekorOpts := &sign.RekorOptions{ - BaseURL: rekorURL.URL, - Timeout: DefaultRekorTimeout, - Retries: 1, - } - signOpts.TransparencyLogs = append(signOpts.TransparencyLogs, sign.NewRekor(rekorOpts)) - } - } - - // Generate an ephemeral keypair for signing. - signKeypair, err := sign.NewEphemeralKeypair(nil) - if err != nil { - return nil, fmt.Errorf("failed to create ephemeral keypair: %w", err) - } - - // Sign the record JSON data. - sigBundle, err := sign.Bundle(&sign.PlainData{Data: opts.Payload}, signKeypair, signOpts) - if err != nil { - return nil, fmt.Errorf("failed to sign record: %w", err) - } - - publicKeyPEM, err := signKeypair.GetPublicKeyPem() - if err != nil { - return nil, fmt.Errorf("failed to get public key: %w", err) - } - - return &SignBlobOIDCResult{ - Signature: base64.StdEncoding.EncodeToString(sigBundle.GetMessageSignature().GetSignature()), - PublicKey: publicKeyPEM, - }, nil -} - -// SignBlobKeyOptions contains options for key-based blob signing. -type SignBlobKeyOptions struct { - Payload []byte - PrivateKey []byte - Password []byte -} - -// SignBlobKeyResult contains the result of key-based blob signing. -type SignBlobKeyResult struct { - Signature string - PublicKey string -} - -// SignBlobWithKey signs a blob using a private key. -func SignBlobWithKey(_ context.Context, opts *SignBlobKeyOptions) (*SignBlobKeyResult, error) { - payload := bytes.NewReader(opts.Payload) - - sv, err := cosign.LoadPrivateKey(opts.PrivateKey, opts.Password, nil) - if err != nil { - return nil, fmt.Errorf("loading private key: %w", err) - } - - sig, err := sv.SignMessage(payload) - if err != nil { - return nil, fmt.Errorf("signing blob: %w", err) - } - - pubKey, err := sv.PublicKey() - if err != nil { - return nil, fmt.Errorf("getting public key: %w", err) - } - - publicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(pubKey) - if err != nil { - return nil, fmt.Errorf("getting public key: %w", err) - } - - return &SignBlobKeyResult{ - Signature: base64.StdEncoding.EncodeToString(sig), - PublicKey: string(publicKeyPEM), - }, nil -} - -// AttachSignatureOptions contains options for attaching signatures to OCI images. -type AttachSignatureOptions struct { - ImageRef string - Signature string - Payload string - Username string - Password string -} - -// AttachSignature attaches a signature to an OCI image using cosign. -func AttachSignature(_ context.Context, opts *AttachSignatureOptions) error { - ref, err := name.ParseReference(opts.ImageRef) - if err != nil { - return fmt.Errorf("failed to parse image reference: %w", err) - } - - sig, err := static.NewSignature([]byte(opts.Payload), opts.Signature) - if err != nil { - return fmt.Errorf("failed to create static signature: %w", err) - } - - // Remote options for authentication - remoteOpts := []ociremote.Option{} - if opts.Username != "" && opts.Password != "" { - remoteOpts = append(remoteOpts, ociremote.WithRemoteOptions( - remote.WithAuth( - &authn.Basic{ - Username: opts.Username, - Password: opts.Password, - }, - ), - )) - } - - se, err := ociremote.SignedEntity(ref, remoteOpts...) - if err != nil { - return fmt.Errorf("failed to create signed entity: %w", err) - } - - // Attach the signature to the entity. - newSE, err := mutate.AttachSignatureToEntity(se, sig) - if err != nil { - return fmt.Errorf("failed to attach signature to entity: %w", err) - } - - digest, err := ociremote.ResolveDigest(ref, remoteOpts...) - if err != nil { - return fmt.Errorf("resolving digest: %w", err) - } - - err = ociremote.WriteSignaturesExperimentalOCI(digest, newSE, remoteOpts...) - if err != nil { - return fmt.Errorf("failed to write signatures: %w", err) - } - - return nil -} - -// GenerateKeyPairOptions contains options for generating cosign key pairs. -type GenerateKeyPairOptions struct { - Directory string - Password string -} - -// GenerateKeyPair generates a cosign key pair in the specified directory. -func GenerateKeyPair(ctx context.Context, opts *GenerateKeyPairOptions) error { - cmd := exec.CommandContext(ctx, "cosign", "generate-key-pair") - - if opts.Directory != "" { - cmd.Dir = opts.Directory - } - - if opts.Password != "" { - cmd.Env = append(os.Environ(), "COSIGN_PASSWORD="+opts.Password) - } - - output, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("cosign generate-key-pair failed: %w\nOutput: %s", err, string(output)) - } - - return nil -} - -func setOrDefault(value string, defaultValue string) string { - if value == "" { - value = defaultValue - } - - return value -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package cosign + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "os" + "os/exec" + "time" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/sigstore/cosign/v3/pkg/cosign" + "github.com/sigstore/cosign/v3/pkg/oci/mutate" + ociremote "github.com/sigstore/cosign/v3/pkg/oci/remote" + "github.com/sigstore/cosign/v3/pkg/oci/static" + v1 "github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore-go/pkg/sign" + "github.com/sigstore/sigstore/pkg/cryptoutils" +) + +const ( + DefaultFulcioURL = "https://fulcio.sigstage.dev" + DefaultRekorURL = "https://rekor.sigstage.dev" + DefaultTimestampURL = "https://timestamp.sigstage.dev/api/v1/timestamp" + DefaultOIDCProviderURL = "https://oauth2.sigstage.dev/auth" + DefaultOIDCClientID = "sigstore" + + DefaultFulcioTimeout = 30 * time.Second + DefaultTimestampAuthorityTimeout = 30 * time.Second + DefaultRekorTimeout = 90 * time.Second +) + +// SetOrDefault returns the value if it's not empty, otherwise returns the default value. +func SetOrDefault(value string, defaultValue string) string { + if value == "" { + value = defaultValue + } + + return value +} + +// SignBlobOIDCOptions contains options for OIDC-based blob signing. +type SignBlobOIDCOptions struct { + Payload []byte + IDToken string + FulcioURL string + RekorURL string + TimestampURL string + OIDCProviderURL string +} + +// SignBlobOIDCResult contains the result of OIDC blob signing. +type SignBlobOIDCResult struct { + Signature string + PublicKey string +} + +// SignBlobWithOIDC signs a blob using OIDC authentication. +func SignBlobWithOIDC(_ context.Context, opts *SignBlobOIDCOptions) (*SignBlobOIDCResult, error) { + // Load signing options. + var signOpts sign.BundleOptions + { + // Define config to use for signing. + signingConfig, err := root.NewSigningConfig( + root.SigningConfigMediaType02, + // Fulcio URLs + []root.Service{ + { + URL: setOrDefault(opts.FulcioURL, DefaultFulcioURL), + MajorAPIVersion: 1, + ValidityPeriodStart: time.Now().Add(-time.Hour), + ValidityPeriodEnd: time.Now().Add(time.Hour), + }, + }, + // OIDC Provider URLs + // Usage and requirements: https://docs.sigstore.dev/certificate_authority/oidc-in-fulcio/ + []root.Service{ + { + URL: setOrDefault(opts.OIDCProviderURL, DefaultOIDCProviderURL), + MajorAPIVersion: 1, + ValidityPeriodStart: time.Now().Add(-time.Hour), + ValidityPeriodEnd: time.Now().Add(time.Hour), + }, + }, + // Rekor URLs + []root.Service{ + { + URL: setOrDefault(opts.RekorURL, DefaultRekorURL), + MajorAPIVersion: 1, + ValidityPeriodStart: time.Now().Add(-time.Hour), + ValidityPeriodEnd: time.Now().Add(time.Hour), + }, + }, + root.ServiceConfiguration{ + Selector: v1.ServiceSelector_ANY, + }, + []root.Service{ + { + URL: setOrDefault(opts.TimestampURL, DefaultTimestampURL), + MajorAPIVersion: 1, + ValidityPeriodStart: time.Now().Add(-time.Hour), + ValidityPeriodEnd: time.Now().Add(time.Hour), + }, + }, + root.ServiceConfiguration{ + Selector: v1.ServiceSelector_ANY, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to create signing config: %w", err) + } + + // Use fulcio to sign. + fulcioURL, err := root.SelectService(signingConfig.FulcioCertificateAuthorityURLs(), []uint32{1}, time.Now()) + if err != nil { + return nil, fmt.Errorf("failed to select fulcio URL: %w", err) + } + + fulcioOpts := &sign.FulcioOptions{ + BaseURL: fulcioURL.URL, + Timeout: DefaultFulcioTimeout, + Retries: 1, + } + signOpts.CertificateProvider = sign.NewFulcio(fulcioOpts) + signOpts.CertificateProviderOptions = &sign.CertificateProviderOptions{ + IDToken: opts.IDToken, + } + + // Use timestamp authortiy to sign. + tsaURLs, err := root.SelectServices(signingConfig.TimestampAuthorityURLs(), + signingConfig.TimestampAuthorityURLsConfig(), []uint32{1}, time.Now()) + if err != nil { + return nil, fmt.Errorf("failed to select timestamp authority URL: %w", err) + } + + for _, tsaURL := range tsaURLs { + tsaOpts := &sign.TimestampAuthorityOptions{ + URL: tsaURL.URL, + Timeout: DefaultTimestampAuthorityTimeout, + Retries: 1, + } + signOpts.TimestampAuthorities = append(signOpts.TimestampAuthorities, sign.NewTimestampAuthority(tsaOpts)) + } + + // Use rekor to sign. + rekorURLs, err := root.SelectServices(signingConfig.RekorLogURLs(), + signingConfig.RekorLogURLsConfig(), []uint32{1}, time.Now()) + if err != nil { + return nil, fmt.Errorf("failed to select rekor URL: %w", err) + } + + for _, rekorURL := range rekorURLs { + rekorOpts := &sign.RekorOptions{ + BaseURL: rekorURL.URL, + Timeout: DefaultRekorTimeout, + Retries: 1, + } + signOpts.TransparencyLogs = append(signOpts.TransparencyLogs, sign.NewRekor(rekorOpts)) + } + } + + // Generate an ephemeral keypair for signing. + signKeypair, err := sign.NewEphemeralKeypair(nil) + if err != nil { + return nil, fmt.Errorf("failed to create ephemeral keypair: %w", err) + } + + // Sign the record JSON data. + sigBundle, err := sign.Bundle(&sign.PlainData{Data: opts.Payload}, signKeypair, signOpts) + if err != nil { + return nil, fmt.Errorf("failed to sign record: %w", err) + } + + publicKeyPEM, err := signKeypair.GetPublicKeyPem() + if err != nil { + return nil, fmt.Errorf("failed to get public key: %w", err) + } + + return &SignBlobOIDCResult{ + Signature: base64.StdEncoding.EncodeToString(sigBundle.GetMessageSignature().GetSignature()), + PublicKey: publicKeyPEM, + }, nil +} + +// SignBlobKeyOptions contains options for key-based blob signing. +type SignBlobKeyOptions struct { + Payload []byte + PrivateKey []byte + Password []byte +} + +// SignBlobKeyResult contains the result of key-based blob signing. +type SignBlobKeyResult struct { + Signature string + PublicKey string +} + +// SignBlobWithKey signs a blob using a private key. +func SignBlobWithKey(_ context.Context, opts *SignBlobKeyOptions) (*SignBlobKeyResult, error) { + payload := bytes.NewReader(opts.Payload) + + sv, err := cosign.LoadPrivateKey(opts.PrivateKey, opts.Password, nil) + if err != nil { + return nil, fmt.Errorf("loading private key: %w", err) + } + + sig, err := sv.SignMessage(payload) + if err != nil { + return nil, fmt.Errorf("signing blob: %w", err) + } + + pubKey, err := sv.PublicKey() + if err != nil { + return nil, fmt.Errorf("getting public key: %w", err) + } + + publicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(pubKey) + if err != nil { + return nil, fmt.Errorf("getting public key: %w", err) + } + + return &SignBlobKeyResult{ + Signature: base64.StdEncoding.EncodeToString(sig), + PublicKey: string(publicKeyPEM), + }, nil +} + +// AttachSignatureOptions contains options for attaching signatures to OCI images. +type AttachSignatureOptions struct { + ImageRef string + Signature string + Payload string + Username string + Password string +} + +// AttachSignature attaches a signature to an OCI image using cosign. +func AttachSignature(_ context.Context, opts *AttachSignatureOptions) error { + ref, err := name.ParseReference(opts.ImageRef) + if err != nil { + return fmt.Errorf("failed to parse image reference: %w", err) + } + + sig, err := static.NewSignature([]byte(opts.Payload), opts.Signature) + if err != nil { + return fmt.Errorf("failed to create static signature: %w", err) + } + + // Remote options for authentication + remoteOpts := []ociremote.Option{} + if opts.Username != "" && opts.Password != "" { + remoteOpts = append(remoteOpts, ociremote.WithRemoteOptions( + remote.WithAuth( + &authn.Basic{ + Username: opts.Username, + Password: opts.Password, + }, + ), + )) + } + + se, err := ociremote.SignedEntity(ref, remoteOpts...) + if err != nil { + return fmt.Errorf("failed to create signed entity: %w", err) + } + + // Attach the signature to the entity. + newSE, err := mutate.AttachSignatureToEntity(se, sig) + if err != nil { + return fmt.Errorf("failed to attach signature to entity: %w", err) + } + + digest, err := ociremote.ResolveDigest(ref, remoteOpts...) + if err != nil { + return fmt.Errorf("resolving digest: %w", err) + } + + err = ociremote.WriteSignaturesExperimentalOCI(digest, newSE, remoteOpts...) + if err != nil { + return fmt.Errorf("failed to write signatures: %w", err) + } + + return nil +} + +// GenerateKeyPairOptions contains options for generating cosign key pairs. +type GenerateKeyPairOptions struct { + Directory string + Password string +} + +// GenerateKeyPair generates a cosign key pair in the specified directory. +func GenerateKeyPair(ctx context.Context, opts *GenerateKeyPairOptions) error { + cmd := exec.CommandContext(ctx, "cosign", "generate-key-pair") + + if opts.Directory != "" { + cmd.Dir = opts.Directory + } + + if opts.Password != "" { + cmd.Env = append(os.Environ(), "COSIGN_PASSWORD="+opts.Password) + } + + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("cosign generate-key-pair failed: %w\nOutput: %s", err, string(output)) + } + + return nil +} + +func setOrDefault(value string, defaultValue string) string { + if value == "" { + value = defaultValue + } + + return value +} diff --git a/utils/go.mod b/utils/go.mod index ec1b5a6f3..ddc6ecf84 100644 --- a/utils/go.mod +++ b/utils/go.mod @@ -1,129 +1,129 @@ -module github.com/agntcy/dir/utils - -go 1.25.2 - -require ( - github.com/google/go-containerregistry v0.20.7 - github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c - github.com/sigstore/cosign/v3 v3.0.3 - github.com/sigstore/protobuf-specs v0.5.0 - github.com/sigstore/sigstore v1.10.0 - github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 - github.com/spf13/viper v1.21.0 - github.com/spiffe/go-spiffe/v2 v2.6.0 - github.com/stretchr/testify v1.11.1 - zotregistry.dev/zot/v2 v2.1.11 -) - -require ( - github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect - github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/blang/semver v3.5.1+incompatible // indirect - github.com/cenkalti/backoff/v5 v5.0.3 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect - github.com/coreos/go-oidc/v3 v3.17.0 // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect - github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect - github.com/distribution/distribution/v3 v3.0.0 // indirect - github.com/distribution/reference v0.6.0 // indirect - github.com/docker/cli v29.0.3+incompatible // indirect - github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker-credential-helpers v0.9.4 // indirect - github.com/dustin/go-humanize v1.0.1 // indirect - github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/go-chi/chi/v5 v5.2.3 // indirect - github.com/go-jose/go-jose/v4 v4.1.3 // indirect - github.com/go-logr/logr v1.4.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.24.1 // indirect - github.com/go-openapi/errors v0.22.4 // indirect - github.com/go-openapi/jsonpointer v0.22.1 // indirect - github.com/go-openapi/jsonreference v0.21.3 // indirect - github.com/go-openapi/loads v0.23.2 // indirect - github.com/go-openapi/runtime v0.29.2 // indirect - github.com/go-openapi/spec v0.22.1 // indirect - github.com/go-openapi/strfmt v0.25.0 // indirect - github.com/go-openapi/swag v0.25.4 // indirect - github.com/go-openapi/swag/cmdutils v0.25.4 // indirect - github.com/go-openapi/swag/conv v0.25.4 // indirect - github.com/go-openapi/swag/fileutils v0.25.4 // indirect - github.com/go-openapi/swag/jsonname v0.25.4 // indirect - github.com/go-openapi/swag/jsonutils v0.25.4 // indirect - github.com/go-openapi/swag/loading v0.25.4 // indirect - github.com/go-openapi/swag/mangling v0.25.4 // indirect - github.com/go-openapi/swag/netutils v0.25.4 // indirect - github.com/go-openapi/swag/stringutils v0.25.4 // indirect - github.com/go-openapi/swag/typeutils v0.25.4 // indirect - github.com/go-openapi/swag/yamlutils v0.25.4 // indirect - github.com/go-openapi/validate v0.25.1 // indirect - github.com/go-viper/mapstructure/v2 v2.4.0 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/certificate-transparency-go v1.3.2 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect - github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-retryablehttp v0.7.8 // indirect - github.com/in-toto/attestation v1.1.2 // indirect - github.com/in-toto/in-toto-golang v0.9.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect - github.com/klauspost/compress v1.18.1 // indirect - github.com/letsencrypt/boulder v0.20251110.0 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/moby/term v0.5.2 // indirect - github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect - github.com/oklog/ulid v1.3.1 // indirect - github.com/opencontainers/distribution-spec/specs-go v0.0.0-20250123160558-a139cc423184 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.1 // indirect - github.com/pelletier/go-toml/v2 v2.2.4 // indirect - github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/sagikazarmark/locafero v0.11.0 // indirect - github.com/sassoftware/relic v7.2.1+incompatible // indirect - github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect - github.com/shibumi/go-pathspec v1.3.0 // indirect - github.com/sigstore/rekor v1.4.3 // indirect - github.com/sigstore/rekor-tiles/v2 v2.0.1 // indirect - github.com/sigstore/timestamp-authority/v2 v2.0.3 // indirect - github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect - github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect - github.com/spf13/afero v1.15.0 // indirect - github.com/spf13/cast v1.10.0 // indirect - github.com/spf13/cobra v1.10.2 // indirect - github.com/spf13/pflag v1.0.10 // indirect - github.com/subosito/gotenv v1.6.0 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect - github.com/theupdateframework/go-tuf v0.7.0 // indirect - github.com/theupdateframework/go-tuf/v2 v2.3.0 // indirect - github.com/tiendc/go-deepcopy v1.7.1 // indirect - github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect - github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect - github.com/transparency-dev/merkle v0.0.2 // indirect - github.com/vbatts/tar-split v0.12.2 // indirect - go.mongodb.org/mongo-driver v1.17.6 // indirect - go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/otel v1.38.0 // indirect - go.opentelemetry.io/otel/metric v1.38.0 // indirect - go.opentelemetry.io/otel/trace v1.38.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.1 // indirect - go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/crypto v0.45.0 // indirect - golang.org/x/mod v0.30.0 // indirect - golang.org/x/net v0.47.0 // indirect - golang.org/x/oauth2 v0.33.0 // indirect - golang.org/x/sync v0.18.0 // indirect - golang.org/x/sys v0.38.0 // indirect - golang.org/x/term v0.37.0 // indirect - golang.org/x/text v0.31.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect - google.golang.org/grpc v1.77.0 // indirect - google.golang.org/protobuf v1.36.10 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - gotest.tools/v3 v3.5.2 // indirect -) +module github.com/agntcy/dir/utils + +go 1.25.2 + +require ( + github.com/google/go-containerregistry v0.20.7 + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c + github.com/sigstore/cosign/v3 v3.0.3 + github.com/sigstore/protobuf-specs v0.5.0 + github.com/sigstore/sigstore v1.10.0 + github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 + github.com/spf13/viper v1.21.0 + github.com/spiffe/go-spiffe/v2 v2.6.0 + github.com/stretchr/testify v1.11.1 + zotregistry.dev/zot/v2 v2.1.11 +) + +require ( + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/blang/semver v3.5.1+incompatible // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect + github.com/coreos/go-oidc/v3 v3.17.0 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect + github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect + github.com/distribution/distribution/v3 v3.0.0 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/cli v29.0.3+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.4 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/go-chi/chi/v5 v5.2.3 // indirect + github.com/go-jose/go-jose/v4 v4.1.3 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/analysis v0.24.1 // indirect + github.com/go-openapi/errors v0.22.4 // indirect + github.com/go-openapi/jsonpointer v0.22.1 // indirect + github.com/go-openapi/jsonreference v0.21.3 // indirect + github.com/go-openapi/loads v0.23.2 // indirect + github.com/go-openapi/runtime v0.29.2 // indirect + github.com/go-openapi/spec v0.22.1 // indirect + github.com/go-openapi/strfmt v0.25.0 // indirect + github.com/go-openapi/swag v0.25.4 // indirect + github.com/go-openapi/swag/cmdutils v0.25.4 // indirect + github.com/go-openapi/swag/conv v0.25.4 // indirect + github.com/go-openapi/swag/fileutils v0.25.4 // indirect + github.com/go-openapi/swag/jsonname v0.25.4 // indirect + github.com/go-openapi/swag/jsonutils v0.25.4 // indirect + github.com/go-openapi/swag/loading v0.25.4 // indirect + github.com/go-openapi/swag/mangling v0.25.4 // indirect + github.com/go-openapi/swag/netutils v0.25.4 // indirect + github.com/go-openapi/swag/stringutils v0.25.4 // indirect + github.com/go-openapi/swag/typeutils v0.25.4 // indirect + github.com/go-openapi/swag/yamlutils v0.25.4 // indirect + github.com/go-openapi/validate v0.25.1 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/certificate-transparency-go v1.3.2 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-retryablehttp v0.7.8 // indirect + github.com/in-toto/attestation v1.1.2 // indirect + github.com/in-toto/in-toto-golang v0.9.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect + github.com/klauspost/compress v1.18.1 // indirect + github.com/letsencrypt/boulder v0.20251110.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/moby/term v0.5.2 // indirect + github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/opencontainers/distribution-spec/specs-go v0.0.0-20250123160558-a139cc423184 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect + github.com/sassoftware/relic v7.2.1+incompatible // indirect + github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect + github.com/shibumi/go-pathspec v1.3.0 // indirect + github.com/sigstore/rekor v1.4.3 // indirect + github.com/sigstore/rekor-tiles/v2 v2.0.1 // indirect + github.com/sigstore/timestamp-authority/v2 v2.0.3 // indirect + github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/cobra v1.10.2 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/theupdateframework/go-tuf v0.7.0 // indirect + github.com/theupdateframework/go-tuf/v2 v2.3.0 // indirect + github.com/tiendc/go-deepcopy v1.7.1 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect + github.com/transparency-dev/merkle v0.0.2 // indirect + github.com/vbatts/tar-split v0.12.2 // indirect + go.mongodb.org/mongo-driver v1.17.6 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.1 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/mod v0.30.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.33.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 // indirect + google.golang.org/grpc v1.77.0 // indirect + google.golang.org/protobuf v1.36.10 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + gotest.tools/v3 v3.5.2 // indirect +) diff --git a/utils/go.sum b/utils/go.sum index 490fc91eb..f0c4eb0f3 100644 --- a/utils/go.sum +++ b/utils/go.sum @@ -1,605 +1,605 @@ -cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= -cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= -cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= -cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= -cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= -cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= -cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= -cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= -cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= -cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= -cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= -cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= -cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= -cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= -github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= -github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0 h1:E4MgwLBGeVB5f2MdcIVD3ELVAWpr+WD6MUe1i+tM/PA= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0/go.mod h1:Y2b/1clN4zsAoUd/pgNAQHjLDnTis/6ROkUfyob6psM= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4= -github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA= -github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= -github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= -github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= -github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= -github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= -github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= -github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc= -github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE= -github.com/aws/aws-sdk-go-v2/config v1.32.2 h1:4liUsdEpUUPZs5WVapsJLx5NPmQhQdez7nYFcovrytk= -github.com/aws/aws-sdk-go-v2/config v1.32.2/go.mod h1:l0hs06IFz1eCT+jTacU/qZtC33nvcnLADAPL/XyrkZI= -github.com/aws/aws-sdk-go-v2/credentials v1.19.2 h1:qZry8VUyTK4VIo5aEdUcBjPZHL2v4FyQ3QEOaWcFLu4= -github.com/aws/aws-sdk-go-v2/credentials v1.19.2/go.mod h1:YUqm5a1/kBnoK+/NY5WEiMocZihKSo15/tJdmdXnM5g= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 h1:WZVR5DbDgxzA0BJeudId89Kmgy6DIU4ORpxwsVHz0qA= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14/go.mod h1:Dadl9QO0kHgbrH1GRqGiZdYtW5w+IXXaBNCHTIaheM4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 h1:FIouAnCE46kyYqyhs0XEBDFFSREtdnr8HQuLPQPLCrY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14/go.mod h1:UTwDc5COa5+guonQU8qBikJo1ZJ4ln2r1MkF7Dqag1E= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.1 h1:U0asSZ3ifpuIehDPkRI2rxHbmFUMplDA2VeR9Uogrmw= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.1/go.mod h1:NZo9WJqQ0sxQ1Yqu1IwCHQFQunTms2MlVgejg16S1rY= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 h1:MxMBdKTYBjPQChlJhi4qlEueqB1p1KcbTEa7tD5aqPs= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.2/go.mod h1:iS6EPmNeqCsGo+xQmXv0jIMjyYtQfnwg36zl2FwEouk= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 h1:ksUT5KtgpZd3SAiFJNJ0AFEJVva3gjBmN7eXUZjzUwQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.5/go.mod h1:av+ArJpoYf3pgyrj6tcehSFW+y9/QvAY8kMooR9bZCw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 h1:GtsxyiF3Nd3JahRBJbxLCCdYW9ltGQYrFWg8XdkGDd8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10/go.mod h1:/j67Z5XBVDx8nZVp9EuFM9/BS5dvBznbqILGuu73hug= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 h1:a5UTtD4mHBU3t0o6aHQZFJTNKVfxFWfPX7J0Lr7G+uY= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.2/go.mod h1:6TxbXoDSgBQ225Qd8Q+MbxUxUh6TtNKwbRt/EPS9xso= -github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= -github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= -github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= -github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= -github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= -github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= -github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= -github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= -github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= -github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/danieljoos/wincred v1.2.3 h1:v7dZC2x32Ut3nEfRH+vhoZGvN72+dQ/snVXo/vMFLdQ= -github.com/danieljoos/wincred v1.2.3/go.mod h1:6qqX0WNrS4RzPZ1tnroDzq9kY3fu1KwE7MRLQK4X0bs= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= -github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= -github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= -github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= -github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= -github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= -github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= -github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= -github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= -github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= -github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= -github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= -github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= -github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= -github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= -github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= -github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rBOesYM= -github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84= -github.com/go-openapi/errors v0.22.4 h1:oi2K9mHTOb5DPW2Zjdzs/NIvwi2N3fARKaTJLdNabaM= -github.com/go-openapi/errors v0.22.4/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= -github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= -github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= -github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc= -github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4= -github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4= -github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY= -github.com/go-openapi/runtime v0.29.2 h1:UmwSGWNmWQqKm1c2MGgXVpC2FTGwPDQeUsBMufc5Yj0= -github.com/go-openapi/runtime v0.29.2/go.mod h1:biq5kJXRJKBJxTDJXAa00DOTa/anflQPhT0/wmjuy+0= -github.com/go-openapi/spec v0.22.1 h1:beZMa5AVQzRspNjvhe5aG1/XyBSMeX1eEOs7dMoXh/k= -github.com/go-openapi/spec v0.22.1/go.mod h1:c7aeIQT175dVowfp7FeCvXXnjN/MrpaONStibD2WtDA= -github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= -github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= -github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= -github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= -github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= -github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= -github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= -github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= -github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= -github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= -github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= -github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= -github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= -github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= -github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= -github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= -github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= -github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= -github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= -github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= -github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= -github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= -github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= -github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= -github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= -github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= -github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= -github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= -github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= -github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= -github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw= -github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc= -github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= -github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= -github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= -github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= -github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= -github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= -github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= -github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= -github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= -github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= -github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= -github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= -github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= -github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= -github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= -github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= -github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= -github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= -github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= -github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= -github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= -github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/vault/api v1.22.0 h1:+HYFquE35/B74fHoIeXlZIP2YADVboaPjaSicHEZiH0= -github.com/hashicorp/vault/api v1.22.0/go.mod h1:IUZA2cDvr4Ok3+NtK2Oq/r+lJeXkeCrHRmqdyWfpmGM= -github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= -github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E= -github.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM= -github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= -github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= -github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= -github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= -github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= -github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= -github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= -github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= -github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= -github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= -github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/letsencrypt/boulder v0.20251110.0 h1:J8MnKICeilO91dyQ2n5eBbab24neHzUpYMUIOdOtbjc= -github.com/letsencrypt/boulder v0.20251110.0/go.mod h1:ogKCJQwll82m7OVHWyTuf8eeFCjuzdRQlgnZcCl0V+8= -github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= -github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= -github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= -github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= -github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= -github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= -github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= -github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= -github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= -github.com/opencontainers/distribution-spec/specs-go v0.0.0-20250123160558-a139cc423184 h1:4fMydcL7sQjWQPMmzTLpRtsKl5KQdZVNcvPoYwpr4G4= -github.com/opencontainers/distribution-spec/specs-go v0.0.0-20250123160558-a139cc423184/go.mod h1:Va0IMqkjv62YSEytL4sgxrkiD9IzU0T0bX/ZZEtMnSQ= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= -github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= -github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= -github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= -github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= -github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= -github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= -github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= -github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= -github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= -github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= -github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= -github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= -github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= -github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= -github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= -github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= -github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= -github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= -github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= -github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= -github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= -github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= -github.com/sigstore/cosign/v3 v3.0.3 h1:IknuTUYM+tZ/ToghM7mvg9V0O31NG3rev97u1IJIuYA= -github.com/sigstore/cosign/v3 v3.0.3/go.mod h1:poeQqwvpDNIDyim7a2ljUhonVKpCys+fx3SY0Lkmi/4= -github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= -github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= -github.com/sigstore/rekor v1.4.3 h1:2+aw4Gbgumv8vYM/QVg6b+hvr4x4Cukur8stJrVPKU0= -github.com/sigstore/rekor v1.4.3/go.mod h1:o0zgY087Q21YwohVvGwV9vK1/tliat5mfnPiVI3i75o= -github.com/sigstore/rekor-tiles/v2 v2.0.1 h1:1Wfz15oSRNGF5Dzb0lWn5W8+lfO50ork4PGIfEKjZeo= -github.com/sigstore/rekor-tiles/v2 v2.0.1/go.mod h1:Pjsbhzj5hc3MKY8FfVTYHBUHQEnP0ozC4huatu4x7OU= -github.com/sigstore/sigstore v1.10.0 h1:lQrmdzqlR8p9SCfWIpFoGUqdXEzJSZT2X+lTXOMPaQI= -github.com/sigstore/sigstore v1.10.0/go.mod h1:Ygq+L/y9Bm3YnjpJTlQrOk/gXyrjkpn3/AEJpmk1n9Y= -github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 h1:K8hnZhun6XacjxAdCdxkowSi7+FpmfYnAcMhTXZQyPg= -github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894/go.mod h1:uuR+Edo6P+iwi0HKscycUm8mxXL748nAureqSg6jFLA= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0 h1:UOHpiyezCj5RuixgIvCV3QyuxIGQT+N6nGZEXA7OTTY= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0/go.mod h1:U0CZmA2psabDa8DdiV7yXab0AHODzfKqvD2isH7Hrvw= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0 h1:fq4+8Y4YadxeF8mzhoMRPZ1mVvDYXmI3BfS0vlkPT7M= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0/go.mod h1:u05nqPWY05lmcdHhv2lPaWTH3FGUhJzO7iW2hbboK3Q= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0 h1:iUEf5MZYOuXGnXxdF/WrarJrk0DTVHqeIOjYdtpVXtc= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0/go.mod h1:i6vg5JfEQix46R1rhQlrKmUtJoeH91drltyYOJEk1T4= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0 h1:dUvPv/MP23ZPIXZUW45kvCIgC0ZRfYxEof57AB6bAtU= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0/go.mod h1:fR/gDdPvJWGWL70/NgBBIL1O0/3Wma6JHs3tSSYg3s4= -github.com/sigstore/timestamp-authority/v2 v2.0.3 h1:sRyYNtdED/ttLCMdaYnwpf0zre1A9chvjTnCmWWxN8Y= -github.com/sigstore/timestamp-authority/v2 v2.0.3/go.mod h1:mDaHxkt3HmZYoIlwYj4QWo0RUr7VjYU52aVO5f5Qb3I= -github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= -github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smarty/assertions v1.16.0 h1:EvHNkdRA4QHMrn75NZSoUQ/mAUXAYWfatfB01yTCzfY= -github.com/smarty/assertions v1.16.0/go.mod h1:duaaFdCS0K9dnoM50iyek/eYINOZ64gbh1Xlf6LG7AI= -github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= -github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= -github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= -github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= -github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= -github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= -github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= -github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= -github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= -github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= -github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= -github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= -github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= -github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= -github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= -github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= -github.com/theupdateframework/go-tuf/v2 v2.3.0 h1:gt3X8xT8qu/HT4w+n1jgv+p7koi5ad8XEkLXXZqG9AA= -github.com/theupdateframework/go-tuf/v2 v2.3.0/go.mod h1:xW8yNvgXRncmovMLvBxKwrKpsOwJZu/8x+aB0KtFcdw= -github.com/tiendc/go-deepcopy v1.7.1 h1:LnubftI6nYaaMOcaz0LphzwraqN8jiWTwm416sitff4= -github.com/tiendc/go-deepcopy v1.7.1/go.mod h1:4bKjNC2r7boYOkD2IOuZpYjmlDdzjbpTRyCx+goBCJQ= -github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= -github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= -github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= -github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= -github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= -github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= -github.com/tink-crypto/tink-go/v2 v2.5.0 h1:B8KLF6AofxdBIE4UJIaFbmoj5/1ehEtt7/MmzfI4Zpw= -github.com/tink-crypto/tink-go/v2 v2.5.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8= -github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= -github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= -github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c h1:5a2XDQ2LiAUV+/RjckMyq9sXudfrPSuCY4FuPC1NyAw= -github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c/go.mod h1:g85IafeFJZLxlzZCDRu4JLpfS7HKzR+Hw9qRh3bVzDI= -github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= -github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= -github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= -github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= -github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= -github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= -github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= -github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= -github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= -github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= -github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= -github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= -github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= -github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms= -github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= -go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= -go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= -go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= -go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= -go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= -go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= -go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= -go.step.sm/crypto v0.74.0 h1:/APBEv45yYR4qQFg47HA8w1nesIGcxh44pGyQNw6JRA= -go.step.sm/crypto v0.74.0/go.mod h1:UoXqCAJjjRgzPte0Llaqen7O9P7XjPmgjgTHQGkKCDk= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= -go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= -go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= -go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= -go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= -golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= -golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= -golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= -golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= -golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= -golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= -golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= -golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= -golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= -gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI= -google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964= -google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 h1:LvZVVaPE0JSqL+ZWb6ErZfnEOKIqqFWUJE2D0fObSmc= -google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9/go.mod h1:QFOrLhdAe2PsTp3vQY4quuLKTi9j3XG3r6JPPaw7MSc= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= -google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= -google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= -google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= -google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= -gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= -k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= -k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= -sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= -software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= -software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= -zotregistry.dev/zot/v2 v2.1.11 h1:hxeE4ilLcmCeF035hs1lRtoyFXm6rJ0rearKgTXPbq8= -zotregistry.dev/zot/v2 v2.1.11/go.mod h1:EYqgYSnmOBPQ9OwD5ntuYoLY/qbuzVfpDllomKCa3NI= +cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= +cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= +cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= +cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= +cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= +cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= +cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0 h1:E4MgwLBGeVB5f2MdcIVD3ELVAWpr+WD6MUe1i+tM/PA= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.4.0/go.mod h1:Y2b/1clN4zsAoUd/pgNAQHjLDnTis/6ROkUfyob6psM= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= +github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= +github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= +github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= +github.com/aws/aws-sdk-go-v2 v1.40.0 h1:/WMUA0kjhZExjOQN2z3oLALDREea1A7TobfuiBrKlwc= +github.com/aws/aws-sdk-go-v2 v1.40.0/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE= +github.com/aws/aws-sdk-go-v2/config v1.32.2 h1:4liUsdEpUUPZs5WVapsJLx5NPmQhQdez7nYFcovrytk= +github.com/aws/aws-sdk-go-v2/config v1.32.2/go.mod h1:l0hs06IFz1eCT+jTacU/qZtC33nvcnLADAPL/XyrkZI= +github.com/aws/aws-sdk-go-v2/credentials v1.19.2 h1:qZry8VUyTK4VIo5aEdUcBjPZHL2v4FyQ3QEOaWcFLu4= +github.com/aws/aws-sdk-go-v2/credentials v1.19.2/go.mod h1:YUqm5a1/kBnoK+/NY5WEiMocZihKSo15/tJdmdXnM5g= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14 h1:WZVR5DbDgxzA0BJeudId89Kmgy6DIU4ORpxwsVHz0qA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.14/go.mod h1:Dadl9QO0kHgbrH1GRqGiZdYtW5w+IXXaBNCHTIaheM4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14 h1:PZHqQACxYb8mYgms4RZbhZG0a7dPW06xOjmaH0EJC/I= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.14/go.mod h1:VymhrMJUWs69D8u0/lZ7jSB6WgaG/NqHi3gX0aYf6U0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14 h1:bOS19y6zlJwagBfHxs0ESzr1XCOU2KXJCWcq3E2vfjY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.14/go.mod h1:1ipeGBMAxZ0xcTm6y6paC2C/J6f6OO7LBODV9afuAyM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14 h1:FIouAnCE46kyYqyhs0XEBDFFSREtdnr8HQuLPQPLCrY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.14/go.mod h1:UTwDc5COa5+guonQU8qBikJo1ZJ4ln2r1MkF7Dqag1E= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.1 h1:U0asSZ3ifpuIehDPkRI2rxHbmFUMplDA2VeR9Uogrmw= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.1/go.mod h1:NZo9WJqQ0sxQ1Yqu1IwCHQFQunTms2MlVgejg16S1rY= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.2 h1:MxMBdKTYBjPQChlJhi4qlEueqB1p1KcbTEa7tD5aqPs= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.2/go.mod h1:iS6EPmNeqCsGo+xQmXv0jIMjyYtQfnwg36zl2FwEouk= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.5 h1:ksUT5KtgpZd3SAiFJNJ0AFEJVva3gjBmN7eXUZjzUwQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.5/go.mod h1:av+ArJpoYf3pgyrj6tcehSFW+y9/QvAY8kMooR9bZCw= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10 h1:GtsxyiF3Nd3JahRBJbxLCCdYW9ltGQYrFWg8XdkGDd8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.10/go.mod h1:/j67Z5XBVDx8nZVp9EuFM9/BS5dvBznbqILGuu73hug= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.2 h1:a5UTtD4mHBU3t0o6aHQZFJTNKVfxFWfPX7J0Lr7G+uY= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.2/go.mod h1:6TxbXoDSgBQ225Qd8Q+MbxUxUh6TtNKwbRt/EPS9xso= +github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= +github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= +github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= +github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= +github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc= +github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/danieljoos/wincred v1.2.3 h1:v7dZC2x32Ut3nEfRH+vhoZGvN72+dQ/snVXo/vMFLdQ= +github.com/danieljoos/wincred v1.2.3/go.mod h1:6qqX0WNrS4RzPZ1tnroDzq9kY3fu1KwE7MRLQK4X0bs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= +github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= +github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= +github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= +github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/analysis v0.24.1 h1:Xp+7Yn/KOnVWYG8d+hPksOYnCYImE3TieBa7rBOesYM= +github.com/go-openapi/analysis v0.24.1/go.mod h1:dU+qxX7QGU1rl7IYhBC8bIfmWQdX4Buoea4TGtxXY84= +github.com/go-openapi/errors v0.22.4 h1:oi2K9mHTOb5DPW2Zjdzs/NIvwi2N3fARKaTJLdNabaM= +github.com/go-openapi/errors v0.22.4/go.mod h1:z9S8ASTUqx7+CP1Q8dD8ewGH/1JWFFLX/2PmAYNQLgk= +github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk= +github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM= +github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc= +github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4= +github.com/go-openapi/loads v0.23.2 h1:rJXAcP7g1+lWyBHC7iTY+WAF0rprtM+pm8Jxv1uQJp4= +github.com/go-openapi/loads v0.23.2/go.mod h1:IEVw1GfRt/P2Pplkelxzj9BYFajiWOtY2nHZNj4UnWY= +github.com/go-openapi/runtime v0.29.2 h1:UmwSGWNmWQqKm1c2MGgXVpC2FTGwPDQeUsBMufc5Yj0= +github.com/go-openapi/runtime v0.29.2/go.mod h1:biq5kJXRJKBJxTDJXAa00DOTa/anflQPhT0/wmjuy+0= +github.com/go-openapi/spec v0.22.1 h1:beZMa5AVQzRspNjvhe5aG1/XyBSMeX1eEOs7dMoXh/k= +github.com/go-openapi/spec v0.22.1/go.mod h1:c7aeIQT175dVowfp7FeCvXXnjN/MrpaONStibD2WtDA= +github.com/go-openapi/strfmt v0.25.0 h1:7R0RX7mbKLa9EYCTHRcCuIPcaqlyQiWNPTXwClK0saQ= +github.com/go-openapi/strfmt v0.25.0/go.mod h1:nNXct7OzbwrMY9+5tLX4I21pzcmE6ccMGXl3jFdPfn8= +github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= +github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= +github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= +github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= +github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= +github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= +github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= +github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= +github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= +github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= +github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= +github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= +github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= +github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= +github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= +github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= +github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= +github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= +github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= +github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZnmAgBw= +github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc= +github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= +github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= +github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= +github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/certificate-transparency-go v1.3.2 h1:9ahSNZF2o7SYMaKaXhAumVEzXB2QaayzII9C8rv7v+A= +github.com/google/certificate-transparency-go v1.3.2/go.mod h1:H5FpMUaGa5Ab2+KCYsxg6sELw3Flkl7pGZzWdBoYLXs= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= +github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/trillian v1.7.2 h1:EPBxc4YWY4Ak8tcuhyFleY+zYlbCDCa4Sn24e1Ka8Js= +github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81zxNQw05s= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= +github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM= +github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= +github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/vault/api v1.22.0 h1:+HYFquE35/B74fHoIeXlZIP2YADVboaPjaSicHEZiH0= +github.com/hashicorp/vault/api v1.22.0/go.mod h1:IUZA2cDvr4Ok3+NtK2Oq/r+lJeXkeCrHRmqdyWfpmGM= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E= +github.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM= +github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= +github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= +github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= +github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= +github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= +github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= +github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/letsencrypt/boulder v0.20251110.0 h1:J8MnKICeilO91dyQ2n5eBbab24neHzUpYMUIOdOtbjc= +github.com/letsencrypt/boulder v0.20251110.0/go.mod h1:ogKCJQwll82m7OVHWyTuf8eeFCjuzdRQlgnZcCl0V+8= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/natefinch/atomic v1.0.1 h1:ZPYKxkqQOx3KZ+RsbnP/YsgvxWQPGxjC0oBt2AhwV0A= +github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= +github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/opencontainers/distribution-spec/specs-go v0.0.0-20250123160558-a139cc423184 h1:4fMydcL7sQjWQPMmzTLpRtsKl5KQdZVNcvPoYwpr4G4= +github.com/opencontainers/distribution-spec/specs-go v0.0.0-20250123160558-a139cc423184/go.mod h1:Va0IMqkjv62YSEytL4sgxrkiD9IzU0T0bX/ZZEtMnSQ= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= +github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= +github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= +github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= +github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= +github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= +github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= +github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= +github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= +github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= +github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= +github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= +github.com/sigstore/cosign/v3 v3.0.3 h1:IknuTUYM+tZ/ToghM7mvg9V0O31NG3rev97u1IJIuYA= +github.com/sigstore/cosign/v3 v3.0.3/go.mod h1:poeQqwvpDNIDyim7a2ljUhonVKpCys+fx3SY0Lkmi/4= +github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= +github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.4.3 h1:2+aw4Gbgumv8vYM/QVg6b+hvr4x4Cukur8stJrVPKU0= +github.com/sigstore/rekor v1.4.3/go.mod h1:o0zgY087Q21YwohVvGwV9vK1/tliat5mfnPiVI3i75o= +github.com/sigstore/rekor-tiles/v2 v2.0.1 h1:1Wfz15oSRNGF5Dzb0lWn5W8+lfO50ork4PGIfEKjZeo= +github.com/sigstore/rekor-tiles/v2 v2.0.1/go.mod h1:Pjsbhzj5hc3MKY8FfVTYHBUHQEnP0ozC4huatu4x7OU= +github.com/sigstore/sigstore v1.10.0 h1:lQrmdzqlR8p9SCfWIpFoGUqdXEzJSZT2X+lTXOMPaQI= +github.com/sigstore/sigstore v1.10.0/go.mod h1:Ygq+L/y9Bm3YnjpJTlQrOk/gXyrjkpn3/AEJpmk1n9Y= +github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894 h1:K8hnZhun6XacjxAdCdxkowSi7+FpmfYnAcMhTXZQyPg= +github.com/sigstore/sigstore-go v1.1.4-0.20251201121426-2cdedea80894/go.mod h1:uuR+Edo6P+iwi0HKscycUm8mxXL748nAureqSg6jFLA= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0 h1:UOHpiyezCj5RuixgIvCV3QyuxIGQT+N6nGZEXA7OTTY= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.0/go.mod h1:U0CZmA2psabDa8DdiV7yXab0AHODzfKqvD2isH7Hrvw= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0 h1:fq4+8Y4YadxeF8mzhoMRPZ1mVvDYXmI3BfS0vlkPT7M= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.0/go.mod h1:u05nqPWY05lmcdHhv2lPaWTH3FGUhJzO7iW2hbboK3Q= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0 h1:iUEf5MZYOuXGnXxdF/WrarJrk0DTVHqeIOjYdtpVXtc= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.0/go.mod h1:i6vg5JfEQix46R1rhQlrKmUtJoeH91drltyYOJEk1T4= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0 h1:dUvPv/MP23ZPIXZUW45kvCIgC0ZRfYxEof57AB6bAtU= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.0/go.mod h1:fR/gDdPvJWGWL70/NgBBIL1O0/3Wma6JHs3tSSYg3s4= +github.com/sigstore/timestamp-authority/v2 v2.0.3 h1:sRyYNtdED/ttLCMdaYnwpf0zre1A9chvjTnCmWWxN8Y= +github.com/sigstore/timestamp-authority/v2 v2.0.3/go.mod h1:mDaHxkt3HmZYoIlwYj4QWo0RUr7VjYU52aVO5f5Qb3I= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af h1:Sp5TG9f7K39yfB+If0vjp97vuT74F72r8hfRpP8jLU0= +github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smarty/assertions v1.16.0 h1:EvHNkdRA4QHMrn75NZSoUQ/mAUXAYWfatfB01yTCzfY= +github.com/smarty/assertions v1.16.0/go.mod h1:duaaFdCS0K9dnoM50iyek/eYINOZ64gbh1Xlf6LG7AI= +github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= +github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= +github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= +github.com/theupdateframework/go-tuf/v2 v2.3.0 h1:gt3X8xT8qu/HT4w+n1jgv+p7koi5ad8XEkLXXZqG9AA= +github.com/theupdateframework/go-tuf/v2 v2.3.0/go.mod h1:xW8yNvgXRncmovMLvBxKwrKpsOwJZu/8x+aB0KtFcdw= +github.com/tiendc/go-deepcopy v1.7.1 h1:LnubftI6nYaaMOcaz0LphzwraqN8jiWTwm416sitff4= +github.com/tiendc/go-deepcopy v1.7.1/go.mod h1:4bKjNC2r7boYOkD2IOuZpYjmlDdzjbpTRyCx+goBCJQ= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI= +github.com/tink-crypto/tink-go-awskms/v2 v2.1.0/go.mod h1:PxSp9GlOkKL9rlybW804uspnHuO9nbD98V/fDX4uSis= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 h1:3B9i6XBXNTRspfkTC0asN5W0K6GhOSgcujNiECNRNb0= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0/go.mod h1:jY5YN2BqD/KSCHM9SqZPIpJNG/u3zwfLXHgws4x2IRw= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0 h1:6nAX1aRGnkg2SEUMwO5toB2tQkP0Jd6cbmZ/K5Le1V0= +github.com/tink-crypto/tink-go-hcvault/v2 v2.3.0/go.mod h1:HOC5NWW1wBI2Vke1FGcRBvDATkEYE7AUDiYbXqi2sBw= +github.com/tink-crypto/tink-go/v2 v2.5.0 h1:B8KLF6AofxdBIE4UJIaFbmoj5/1ehEtt7/MmzfI4Zpw= +github.com/tink-crypto/tink-go/v2 v2.5.0/go.mod h1:2WbBA6pfNsAfBwDCggboaHeB2X29wkU8XHtGwh2YIk8= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c h1:5a2XDQ2LiAUV+/RjckMyq9sXudfrPSuCY4FuPC1NyAw= +github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c/go.mod h1:g85IafeFJZLxlzZCDRu4JLpfS7HKzR+Hw9qRh3bVzDI= +github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= +github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= +github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= +github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= +github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= +github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= +github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= +github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= +github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= +github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= +github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= +github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= +github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms= +github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= +go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= +go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.step.sm/crypto v0.74.0 h1:/APBEv45yYR4qQFg47HA8w1nesIGcxh44pGyQNw6JRA= +go.step.sm/crypto v0.74.0/go.mod h1:UoXqCAJjjRgzPte0Llaqen7O9P7XjPmgjgTHQGkKCDk= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= +golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI= +google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964= +google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9 h1:LvZVVaPE0JSqL+ZWb6ErZfnEOKIqqFWUJE2D0fObSmc= +google.golang.org/genproto v0.0.0-20250922171735-9219d122eba9/go.mod h1:QFOrLhdAe2PsTp3vQY4quuLKTi9j3XG3r6JPPaw7MSc= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4= +google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= +zotregistry.dev/zot/v2 v2.1.11 h1:hxeE4ilLcmCeF035hs1lRtoyFXm6rJ0rearKgTXPbq8= +zotregistry.dev/zot/v2 v2.1.11/go.mod h1:EYqgYSnmOBPQ9OwD5ntuYoLY/qbuzVfpDllomKCa3NI= diff --git a/utils/logging/config.go b/utils/logging/config.go index 20e346367..8e5793a7c 100644 --- a/utils/logging/config.go +++ b/utils/logging/config.go @@ -1,57 +1,57 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package logging - -import ( - "fmt" - "strings" - - "github.com/mitchellh/mapstructure" - "github.com/spf13/viper" -) - -const ( - DefaultEnvPrefix = "DIRECTORY_LOGGER" - DefaultLogLevel = "INFO" - DefaultLogFormat = "text" -) - -type Config struct { - LogFile string `json:"log_file,omitempty" mapstructure:"log_file"` - LogLevel string `json:"log_level,omitempty" mapstructure:"log_level"` - LogFormat string `json:"log_format,omitempty" mapstructure:"log_format"` -} - -func LoadConfig() (*Config, error) { - v := viper.NewWithOptions( - viper.KeyDelimiter("."), - viper.EnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")), - ) - - v.SetEnvPrefix(DefaultEnvPrefix) - v.AllowEmptyEnv(true) - v.AutomaticEnv() - - _ = v.BindEnv("log_file") - - _ = v.BindEnv("log_level") - v.SetDefault("log_level", DefaultLogLevel) - - _ = v.BindEnv("log_format") - v.SetDefault("log_format", DefaultLogFormat) - - // Load configuration into struct - decodeHooks := mapstructure.ComposeDecodeHookFunc( - mapstructure.TextUnmarshallerHookFunc(), - mapstructure.StringToTimeDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - ) - - config := &Config{} - if err := v.Unmarshal(config, viper.DecodeHook(decodeHooks)); err != nil { - return nil, fmt.Errorf("failed to load configuration: %w", err) - } - - return config, nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package logging + +import ( + "fmt" + "strings" + + "github.com/mitchellh/mapstructure" + "github.com/spf13/viper" +) + +const ( + DefaultEnvPrefix = "DIRECTORY_LOGGER" + DefaultLogLevel = "INFO" + DefaultLogFormat = "text" +) + +type Config struct { + LogFile string `json:"log_file,omitempty" mapstructure:"log_file"` + LogLevel string `json:"log_level,omitempty" mapstructure:"log_level"` + LogFormat string `json:"log_format,omitempty" mapstructure:"log_format"` +} + +func LoadConfig() (*Config, error) { + v := viper.NewWithOptions( + viper.KeyDelimiter("."), + viper.EnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")), + ) + + v.SetEnvPrefix(DefaultEnvPrefix) + v.AllowEmptyEnv(true) + v.AutomaticEnv() + + _ = v.BindEnv("log_file") + + _ = v.BindEnv("log_level") + v.SetDefault("log_level", DefaultLogLevel) + + _ = v.BindEnv("log_format") + v.SetDefault("log_format", DefaultLogFormat) + + // Load configuration into struct + decodeHooks := mapstructure.ComposeDecodeHookFunc( + mapstructure.TextUnmarshallerHookFunc(), + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + ) + + config := &Config{} + if err := v.Unmarshal(config, viper.DecodeHook(decodeHooks)); err != nil { + return nil, fmt.Errorf("failed to load configuration: %w", err) + } + + return config, nil +} diff --git a/utils/logging/config_test.go b/utils/logging/config_test.go index 87364436a..660026ae4 100644 --- a/utils/logging/config_test.go +++ b/utils/logging/config_test.go @@ -1,195 +1,195 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package logging - -import ( - "os" - "testing" -) - -// TestLoadConfigWithDefaults verifies default configuration values. -func TestLoadConfigWithDefaults(t *testing.T) { - // Clear any environment variables - os.Unsetenv("DIRECTORY_LOGGER_LOG_FILE") - os.Unsetenv("DIRECTORY_LOGGER_LOG_LEVEL") - os.Unsetenv("DIRECTORY_LOGGER_LOG_FORMAT") - - cfg, err := LoadConfig() - if err != nil { - t.Fatalf("LoadConfig() failed: %v", err) - } - - // Verify defaults - if cfg.LogLevel != DefaultLogLevel { - t.Errorf("Expected LogLevel=%s, got: %s", DefaultLogLevel, cfg.LogLevel) - } - - if cfg.LogFormat != DefaultLogFormat { - t.Errorf("Expected LogFormat=%s, got: %s", DefaultLogFormat, cfg.LogFormat) - } - - if cfg.LogFile != "" { - t.Errorf("Expected LogFile='', got: %s", cfg.LogFile) - } -} - -// TestLoadConfigWithEnvVars verifies environment variable configuration. -func TestLoadConfigWithEnvVars(t *testing.T) { - // Set environment variables - t.Setenv("DIRECTORY_LOGGER_LOG_FILE", "/tmp/test.log") - t.Setenv("DIRECTORY_LOGGER_LOG_LEVEL", "DEBUG") - t.Setenv("DIRECTORY_LOGGER_LOG_FORMAT", "json") - - cfg, err := LoadConfig() - if err != nil { - t.Fatalf("LoadConfig() failed: %v", err) - } - - // Verify environment variables are loaded - if cfg.LogFile != "/tmp/test.log" { - t.Errorf("Expected LogFile='/tmp/test.log', got: %s", cfg.LogFile) - } - - if cfg.LogLevel != "DEBUG" { - t.Errorf("Expected LogLevel='DEBUG', got: %s", cfg.LogLevel) - } - - if cfg.LogFormat != "json" { - t.Errorf("Expected LogFormat='json', got: %s", cfg.LogFormat) - } -} - -// TestLoadConfigWithPartialEnvVars verifies partial environment variable configuration. -func TestLoadConfigWithPartialEnvVars(t *testing.T) { - // Set only some environment variables - t.Setenv("DIRECTORY_LOGGER_LOG_LEVEL", "ERROR") - - // Unset others to ensure clean state - os.Unsetenv("DIRECTORY_LOGGER_LOG_FILE") - os.Unsetenv("DIRECTORY_LOGGER_LOG_FORMAT") - - cfg, err := LoadConfig() - if err != nil { - t.Fatalf("LoadConfig() failed: %v", err) - } - - // Verify mix of env vars and defaults - if cfg.LogLevel != "ERROR" { - t.Errorf("Expected LogLevel='ERROR', got: %s", cfg.LogLevel) - } - - if cfg.LogFormat != DefaultLogFormat { - t.Errorf("Expected LogFormat=%s (default), got: %s", DefaultLogFormat, cfg.LogFormat) - } -} - -// TestLoadConfigEmptyEnvVars verifies empty environment variables are handled. -func TestLoadConfigEmptyEnvVars(t *testing.T) { - // Set empty environment variables - t.Setenv("DIRECTORY_LOGGER_LOG_FILE", "") - t.Setenv("DIRECTORY_LOGGER_LOG_LEVEL", "") - t.Setenv("DIRECTORY_LOGGER_LOG_FORMAT", "") - - cfg, err := LoadConfig() - if err != nil { - t.Fatalf("LoadConfig() failed: %v", err) - } - - // When env vars are set to empty string, Viper uses the empty value - // (not the default). This is expected behavior - empty string is valid. - if cfg.LogLevel != "" { - t.Errorf("Expected LogLevel='' (empty), got: %s", cfg.LogLevel) - } - - if cfg.LogFormat != "" { - t.Errorf("Expected LogFormat='' (empty), got: %s", cfg.LogFormat) - } - - if cfg.LogFile != "" { - t.Errorf("Expected LogFile='' (empty), got: %s", cfg.LogFile) - } -} - -// TestLoadConfigAllFormats verifies all supported log formats. -func TestLoadConfigAllFormats(t *testing.T) { - formats := []string{"text", "json", "TEXT", "JSON", "Text", "Json"} - - for _, format := range formats { - t.Run(format, func(t *testing.T) { - t.Setenv("DIRECTORY_LOGGER_LOG_FORMAT", format) - - cfg, err := LoadConfig() - if err != nil { - t.Fatalf("LoadConfig() failed for format %s: %v", format, err) - } - - if cfg.LogFormat != format { - t.Errorf("Expected LogFormat=%s, got: %s", format, cfg.LogFormat) - } - }) - } -} - -// TestLoadConfigAllLogLevels verifies all supported log levels. -func TestLoadConfigAllLogLevels(t *testing.T) { - levels := []string{"DEBUG", "INFO", "WARN", "ERROR", "debug", "info", "warn", "error"} - - for _, level := range levels { - t.Run(level, func(t *testing.T) { - t.Setenv("DIRECTORY_LOGGER_LOG_LEVEL", level) - - cfg, err := LoadConfig() - if err != nil { - t.Fatalf("LoadConfig() failed for level %s: %v", level, err) - } - - if cfg.LogLevel != level { - t.Errorf("Expected LogLevel=%s, got: %s", level, cfg.LogLevel) - } - }) - } -} - -// TestConfigJSONMarshaling verifies Config can be marshaled to JSON. -func TestConfigJSONMarshaling(t *testing.T) { - cfg := &Config{ - LogFile: "/var/log/app.log", - LogLevel: "INFO", - LogFormat: "json", - } - - // Just verify the struct is valid and fields are accessible - if cfg.LogFile != "/var/log/app.log" { - t.Errorf("LogFile mismatch") - } - - if cfg.LogLevel != "INFO" { - t.Errorf("LogLevel mismatch") - } - - if cfg.LogFormat != "json" { - t.Errorf("LogFormat mismatch") - } -} - -// TestConfigConstants verifies all constants are correctly defined. -func TestConfigConstants(t *testing.T) { - tests := []struct { - name string - value string - expected string - }{ - {"DefaultEnvPrefix", DefaultEnvPrefix, "DIRECTORY_LOGGER"}, - {"DefaultLogLevel", DefaultLogLevel, "INFO"}, - {"DefaultLogFormat", DefaultLogFormat, "text"}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if tt.value != tt.expected { - t.Errorf("Expected %s=%s, got: %s", tt.name, tt.expected, tt.value) - } - }) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package logging + +import ( + "os" + "testing" +) + +// TestLoadConfigWithDefaults verifies default configuration values. +func TestLoadConfigWithDefaults(t *testing.T) { + // Clear any environment variables + os.Unsetenv("DIRECTORY_LOGGER_LOG_FILE") + os.Unsetenv("DIRECTORY_LOGGER_LOG_LEVEL") + os.Unsetenv("DIRECTORY_LOGGER_LOG_FORMAT") + + cfg, err := LoadConfig() + if err != nil { + t.Fatalf("LoadConfig() failed: %v", err) + } + + // Verify defaults + if cfg.LogLevel != DefaultLogLevel { + t.Errorf("Expected LogLevel=%s, got: %s", DefaultLogLevel, cfg.LogLevel) + } + + if cfg.LogFormat != DefaultLogFormat { + t.Errorf("Expected LogFormat=%s, got: %s", DefaultLogFormat, cfg.LogFormat) + } + + if cfg.LogFile != "" { + t.Errorf("Expected LogFile='', got: %s", cfg.LogFile) + } +} + +// TestLoadConfigWithEnvVars verifies environment variable configuration. +func TestLoadConfigWithEnvVars(t *testing.T) { + // Set environment variables + t.Setenv("DIRECTORY_LOGGER_LOG_FILE", "/tmp/test.log") + t.Setenv("DIRECTORY_LOGGER_LOG_LEVEL", "DEBUG") + t.Setenv("DIRECTORY_LOGGER_LOG_FORMAT", "json") + + cfg, err := LoadConfig() + if err != nil { + t.Fatalf("LoadConfig() failed: %v", err) + } + + // Verify environment variables are loaded + if cfg.LogFile != "/tmp/test.log" { + t.Errorf("Expected LogFile='/tmp/test.log', got: %s", cfg.LogFile) + } + + if cfg.LogLevel != "DEBUG" { + t.Errorf("Expected LogLevel='DEBUG', got: %s", cfg.LogLevel) + } + + if cfg.LogFormat != "json" { + t.Errorf("Expected LogFormat='json', got: %s", cfg.LogFormat) + } +} + +// TestLoadConfigWithPartialEnvVars verifies partial environment variable configuration. +func TestLoadConfigWithPartialEnvVars(t *testing.T) { + // Set only some environment variables + t.Setenv("DIRECTORY_LOGGER_LOG_LEVEL", "ERROR") + + // Unset others to ensure clean state + os.Unsetenv("DIRECTORY_LOGGER_LOG_FILE") + os.Unsetenv("DIRECTORY_LOGGER_LOG_FORMAT") + + cfg, err := LoadConfig() + if err != nil { + t.Fatalf("LoadConfig() failed: %v", err) + } + + // Verify mix of env vars and defaults + if cfg.LogLevel != "ERROR" { + t.Errorf("Expected LogLevel='ERROR', got: %s", cfg.LogLevel) + } + + if cfg.LogFormat != DefaultLogFormat { + t.Errorf("Expected LogFormat=%s (default), got: %s", DefaultLogFormat, cfg.LogFormat) + } +} + +// TestLoadConfigEmptyEnvVars verifies empty environment variables are handled. +func TestLoadConfigEmptyEnvVars(t *testing.T) { + // Set empty environment variables + t.Setenv("DIRECTORY_LOGGER_LOG_FILE", "") + t.Setenv("DIRECTORY_LOGGER_LOG_LEVEL", "") + t.Setenv("DIRECTORY_LOGGER_LOG_FORMAT", "") + + cfg, err := LoadConfig() + if err != nil { + t.Fatalf("LoadConfig() failed: %v", err) + } + + // When env vars are set to empty string, Viper uses the empty value + // (not the default). This is expected behavior - empty string is valid. + if cfg.LogLevel != "" { + t.Errorf("Expected LogLevel='' (empty), got: %s", cfg.LogLevel) + } + + if cfg.LogFormat != "" { + t.Errorf("Expected LogFormat='' (empty), got: %s", cfg.LogFormat) + } + + if cfg.LogFile != "" { + t.Errorf("Expected LogFile='' (empty), got: %s", cfg.LogFile) + } +} + +// TestLoadConfigAllFormats verifies all supported log formats. +func TestLoadConfigAllFormats(t *testing.T) { + formats := []string{"text", "json", "TEXT", "JSON", "Text", "Json"} + + for _, format := range formats { + t.Run(format, func(t *testing.T) { + t.Setenv("DIRECTORY_LOGGER_LOG_FORMAT", format) + + cfg, err := LoadConfig() + if err != nil { + t.Fatalf("LoadConfig() failed for format %s: %v", format, err) + } + + if cfg.LogFormat != format { + t.Errorf("Expected LogFormat=%s, got: %s", format, cfg.LogFormat) + } + }) + } +} + +// TestLoadConfigAllLogLevels verifies all supported log levels. +func TestLoadConfigAllLogLevels(t *testing.T) { + levels := []string{"DEBUG", "INFO", "WARN", "ERROR", "debug", "info", "warn", "error"} + + for _, level := range levels { + t.Run(level, func(t *testing.T) { + t.Setenv("DIRECTORY_LOGGER_LOG_LEVEL", level) + + cfg, err := LoadConfig() + if err != nil { + t.Fatalf("LoadConfig() failed for level %s: %v", level, err) + } + + if cfg.LogLevel != level { + t.Errorf("Expected LogLevel=%s, got: %s", level, cfg.LogLevel) + } + }) + } +} + +// TestConfigJSONMarshaling verifies Config can be marshaled to JSON. +func TestConfigJSONMarshaling(t *testing.T) { + cfg := &Config{ + LogFile: "/var/log/app.log", + LogLevel: "INFO", + LogFormat: "json", + } + + // Just verify the struct is valid and fields are accessible + if cfg.LogFile != "/var/log/app.log" { + t.Errorf("LogFile mismatch") + } + + if cfg.LogLevel != "INFO" { + t.Errorf("LogLevel mismatch") + } + + if cfg.LogFormat != "json" { + t.Errorf("LogFormat mismatch") + } +} + +// TestConfigConstants verifies all constants are correctly defined. +func TestConfigConstants(t *testing.T) { + tests := []struct { + name string + value string + expected string + }{ + {"DefaultEnvPrefix", DefaultEnvPrefix, "DIRECTORY_LOGGER"}, + {"DefaultLogLevel", DefaultLogLevel, "INFO"}, + {"DefaultLogFormat", DefaultLogFormat, "text"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.value != tt.expected { + t.Errorf("Expected %s=%s, got: %s", tt.name, tt.expected, tt.value) + } + }) + } +} diff --git a/utils/logging/logging.go b/utils/logging/logging.go index fef98f6fe..3df2d46fa 100644 --- a/utils/logging/logging.go +++ b/utils/logging/logging.go @@ -1,85 +1,85 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package logging - -import ( - "log/slog" - "os" - "strings" - "sync" -) - -const ( - filePermission = 0o644 - - // Log format types. - formatJSON = "json" - formatText = "text" -) - -var once sync.Once - -// getLogOutput determines where logs should be written. -func getLogOutput(logFilePath string) *os.File { - if logFilePath != "" { - // Try to open or create the log file. - file, err := os.OpenFile(logFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, filePermission) - if err == nil { - return file - } - - slog.Error("Failed to open log file, defaulting to stdout", "error", err) - } - - return os.Stdout -} - -// InitLogger initializes the global logger with the provided configuration. -// It supports multiple output formats: text, json. -// This function is idempotent and thread-safe - it will only initialize once. -func InitLogger(cfg *Config) { - once.Do(func() { - var logLevel slog.Level - - logOutput := getLogOutput(cfg.LogFile) - - // Parse log level; default to INFO if invalid. - if err := logLevel.UnmarshalText([]byte(strings.ToLower(cfg.LogLevel))); err != nil { - slog.Warn("Invalid log level, defaulting to INFO", "error", err) - logLevel = slog.LevelInfo - } - - // Create handler based on format - var handler slog.Handler - - opts := &slog.HandlerOptions{Level: logLevel} - - switch strings.ToLower(cfg.LogFormat) { - case formatJSON: - handler = slog.NewJSONHandler(logOutput, opts) - case formatText: - handler = slog.NewTextHandler(logOutput, opts) - default: - slog.Warn("Invalid log format, defaulting to text", "format", cfg.LogFormat) - handler = slog.NewTextHandler(logOutput, opts) - } - - // Set global logger before other packages initialize. - slog.SetDefault(slog.New(handler)) - }) -} - -func Logger(component string) *slog.Logger { - return slog.Default().With("component", component) -} - -func init() { - cfg, err := LoadConfig() - if err != nil { - slog.Error("Failed to load config", "error", err) - os.Exit(1) - } - - InitLogger(cfg) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package logging + +import ( + "log/slog" + "os" + "strings" + "sync" +) + +const ( + filePermission = 0o644 + + // Log format types. + formatJSON = "json" + formatText = "text" +) + +var once sync.Once + +// getLogOutput determines where logs should be written. +func getLogOutput(logFilePath string) *os.File { + if logFilePath != "" { + // Try to open or create the log file. + file, err := os.OpenFile(logFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, filePermission) + if err == nil { + return file + } + + slog.Error("Failed to open log file, defaulting to stdout", "error", err) + } + + return os.Stdout +} + +// InitLogger initializes the global logger with the provided configuration. +// It supports multiple output formats: text, json. +// This function is idempotent and thread-safe - it will only initialize once. +func InitLogger(cfg *Config) { + once.Do(func() { + var logLevel slog.Level + + logOutput := getLogOutput(cfg.LogFile) + + // Parse log level; default to INFO if invalid. + if err := logLevel.UnmarshalText([]byte(strings.ToLower(cfg.LogLevel))); err != nil { + slog.Warn("Invalid log level, defaulting to INFO", "error", err) + logLevel = slog.LevelInfo + } + + // Create handler based on format + var handler slog.Handler + + opts := &slog.HandlerOptions{Level: logLevel} + + switch strings.ToLower(cfg.LogFormat) { + case formatJSON: + handler = slog.NewJSONHandler(logOutput, opts) + case formatText: + handler = slog.NewTextHandler(logOutput, opts) + default: + slog.Warn("Invalid log format, defaulting to text", "format", cfg.LogFormat) + handler = slog.NewTextHandler(logOutput, opts) + } + + // Set global logger before other packages initialize. + slog.SetDefault(slog.New(handler)) + }) +} + +func Logger(component string) *slog.Logger { + return slog.Default().With("component", component) +} + +func init() { + cfg, err := LoadConfig() + if err != nil { + slog.Error("Failed to load config", "error", err) + os.Exit(1) + } + + InitLogger(cfg) +} diff --git a/utils/logging/logging_test.go b/utils/logging/logging_test.go index 4251c44fd..61927f7e4 100644 --- a/utils/logging/logging_test.go +++ b/utils/logging/logging_test.go @@ -1,495 +1,495 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package logging - -import ( - "bytes" - "encoding/json" - "log/slog" - "os" - "strings" - "testing" -) - -// TestJSONHandler verifies JSON output format. -func TestJSONHandler(t *testing.T) { - var buf bytes.Buffer - - // Create JSON handler - logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) - logger.Info("test message", "key", "value", "number", 42) - - output := buf.String() - - // Verify it's valid JSON - var parsed map[string]interface{} - if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &parsed); err != nil { - t.Fatalf("Output is not valid JSON: %v\nOutput: %s", err, output) - } - - // Verify expected fields - if parsed["msg"] != "test message" { - t.Errorf("Expected msg='test message', got: %v", parsed["msg"]) - } - - if parsed["key"] != "value" { - t.Errorf("Expected key='value', got: %v", parsed["key"]) - } - - if parsed["number"] != float64(42) { // JSON numbers are float64 - t.Errorf("Expected number=42, got: %v", parsed["number"]) - } - - if parsed["level"] != DefaultLogLevel { - t.Errorf("Expected level=%s, got: %v", DefaultLogLevel, parsed["level"]) - } -} - -// TestTextHandler verifies text output format. -func TestTextHandler(t *testing.T) { - var buf bytes.Buffer - - logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) - logger.Info("test message", "key", "value") - - output := buf.String() - - // Verify key-value format - if !strings.Contains(output, "msg=\"test message\"") && !strings.Contains(output, "msg=test message") { - t.Errorf("Expected text format with msg, got: %s", output) - } - - if !strings.Contains(output, "key=value") { - t.Errorf("Expected text format with key=value, got: %s", output) - } - - if !strings.Contains(output, "level=INFO") { - t.Errorf("Expected text format with level=INFO, got: %s", output) - } -} - -// TestJSONHandlerMultipleFields verifies JSON with multiple fields. -func TestJSONHandlerMultipleFields(t *testing.T) { - var buf bytes.Buffer - - logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelDebug})) - logger.Debug("debug message", - "string_field", "test", - "int_field", 123, - "bool_field", true, - "float_field", 3.14, - ) - - output := buf.String() - - var parsed map[string]interface{} - if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &parsed); err != nil { - t.Fatalf("Failed to parse JSON: %v\nOutput: %s", err, output) - } - - // Verify different data types are preserved - if parsed["string_field"] != "test" { - t.Errorf("Expected string_field='test', got: %v", parsed["string_field"]) - } - - if parsed["int_field"] != float64(123) { - t.Errorf("Expected int_field=123, got: %v", parsed["int_field"]) - } - - if parsed["bool_field"] != true { - t.Errorf("Expected bool_field=true, got: %v", parsed["bool_field"]) - } - - if parsed["float_field"] != 3.14 { - t.Errorf("Expected float_field=3.14, got: %v", parsed["float_field"]) - } -} - -// TestLogLevels verifies different log levels work correctly. -func TestLogLevels(t *testing.T) { - tests := []struct { - name string - level slog.Level - logFunc func(*slog.Logger, string) - expected string - }{ - {"DEBUG", slog.LevelDebug, func(l *slog.Logger, msg string) { l.Debug(msg) }, "DEBUG"}, - {"INFO", slog.LevelInfo, func(l *slog.Logger, msg string) { l.Info(msg) }, "INFO"}, - {"WARN", slog.LevelWarn, func(l *slog.Logger, msg string) { l.Warn(msg) }, "WARN"}, - {"ERROR", slog.LevelError, func(l *slog.Logger, msg string) { l.Error(msg) }, "ERROR"}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var buf bytes.Buffer - - logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelDebug})) - - tt.logFunc(logger, "test") - - var parsed map[string]interface{} - if err := json.Unmarshal([]byte(strings.TrimSpace(buf.String())), &parsed); err != nil { - t.Fatalf("Failed to parse JSON: %v", err) - } - - if parsed["level"] != tt.expected { - t.Errorf("Expected level=%s, got: %v", tt.expected, parsed["level"]) - } - }) - } -} - -// TestComponentLogger verifies component-specific loggers. -func TestComponentLogger(t *testing.T) { - var buf bytes.Buffer - - // Create base logger - baseLogger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) - slog.SetDefault(baseLogger) - - // Create component logger (simulating Logger function) - componentLogger := slog.Default().With("component", "test-component") - componentLogger.Info("component message", "extra", "data") - - output := buf.String() - - var parsed map[string]interface{} - if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &parsed); err != nil { - t.Fatalf("Failed to parse JSON: %v", err) - } - - // Verify component field is present - if parsed["component"] != "test-component" { - t.Errorf("Expected component='test-component', got: %v", parsed["component"]) - } - - if parsed["extra"] != "data" { - t.Errorf("Expected extra='data', got: %v", parsed["extra"]) - } -} - -// TestJSONHandlerNilSafety verifies handler works with nil/empty values. -func TestJSONHandlerNilSafety(t *testing.T) { - var buf bytes.Buffer - - logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) - logger.Info("test", "empty_string", "", "zero", 0) - - output := buf.String() - - var parsed map[string]interface{} - if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &parsed); err != nil { - t.Fatalf("Failed to parse JSON: %v", err) - } - - // Verify empty values are handled correctly - if parsed["empty_string"] != "" { - t.Errorf("Expected empty_string='', got: %v", parsed["empty_string"]) - } - - if parsed["zero"] != float64(0) { - t.Errorf("Expected zero=0, got: %v", parsed["zero"]) - } -} - -// TestDefaultConfig verifies default configuration values. -func TestDefaultConfig(t *testing.T) { - const ( - expectedLogLevel = "INFO" - expectedLogFormat = "text" - expectedEnvPrefix = "DIRECTORY_LOGGER" - ) - - if DefaultLogLevel != expectedLogLevel { - t.Errorf("Expected DefaultLogLevel='%s', got: %s", expectedLogLevel, DefaultLogLevel) - } - - if DefaultLogFormat != expectedLogFormat { - t.Errorf("Expected DefaultLogFormat='%s', got: %s", expectedLogFormat, DefaultLogFormat) - } - - if DefaultEnvPrefix != expectedEnvPrefix { - t.Errorf("Expected DefaultEnvPrefix='%s', got: %s", expectedEnvPrefix, DefaultEnvPrefix) - } -} - -// TestConfigStruct verifies Config struct can be marshaled. -func TestConfigStruct(t *testing.T) { - const testLogFormat = "json" - - cfg := Config{ - LogFile: "/tmp/test.log", - LogLevel: "DEBUG", - LogFormat: testLogFormat, - } - - if cfg.LogFile != "/tmp/test.log" { - t.Errorf("Expected LogFile='/tmp/test.log', got: %s", cfg.LogFile) - } - - if cfg.LogLevel != "DEBUG" { - t.Errorf("Expected LogLevel='DEBUG', got: %s", cfg.LogLevel) - } - - if cfg.LogFormat != testLogFormat { - t.Errorf("Expected LogFormat='%s', got: %s", testLogFormat, cfg.LogFormat) - } -} - -// TestTextHandlerMultipleMessages verifies multiple log messages don't interfere. -func TestTextHandlerMultipleMessages(t *testing.T) { - var buf bytes.Buffer - - logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) - logger.Info("first message", "id", 1) - logger.Info("second message", "id", 2) - - output := buf.String() - lines := strings.Split(strings.TrimSpace(output), "\n") - - if len(lines) != 2 { - t.Errorf("Expected 2 log lines, got: %d", len(lines)) - } - - // Verify both messages are present - if !strings.Contains(output, "first message") { - t.Error("Expected 'first message' in output") - } - - if !strings.Contains(output, "second message") { - t.Error("Expected 'second message' in output") - } -} - -// TestJSONHandlerMultipleMessages verifies multiple JSON log entries. -func TestJSONHandlerMultipleMessages(t *testing.T) { - var buf bytes.Buffer - - logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) - logger.Info("first", "id", 1) - logger.Info("second", "id", 2) - - output := buf.String() - lines := strings.Split(strings.TrimSpace(output), "\n") - - if len(lines) != 2 { - t.Errorf("Expected 2 JSON lines, got: %d", len(lines)) - } - - // Verify each line is valid JSON - for i, line := range lines { - var parsed map[string]interface{} - if err := json.Unmarshal([]byte(line), &parsed); err != nil { - t.Errorf("Line %d is not valid JSON: %v\nLine: %s", i+1, err, line) - } - } -} - -// TestGetLogOutputStdout verifies getLogOutput returns stdout for empty path. -func TestGetLogOutputStdout(t *testing.T) { - output := getLogOutput("") - if output != os.Stdout { - t.Error("Expected stdout for empty path") - } -} - -// TestGetLogOutputInvalidPath verifies getLogOutput falls back to stdout for invalid path. -func TestGetLogOutputInvalidPath(t *testing.T) { - // Use an invalid path (directory that doesn't exist) - output := getLogOutput("/invalid/directory/that/does/not/exist/test.log") - if output != os.Stdout { - t.Error("Expected stdout fallback for invalid path") - } -} - -// TestGetLogOutputValidPath verifies getLogOutput can create a log file. -func TestGetLogOutputValidPath(t *testing.T) { - // Create a temporary file - tmpFile := t.TempDir() + "/test.log" - - output := getLogOutput(tmpFile) - if output == os.Stdout { - t.Error("Expected file handle, got stdout") - } - - // Verify it's a file we can write to - if output != nil { - defer output.Close() - - _, err := output.WriteString("test") - if err != nil { - t.Errorf("Failed to write to log file: %v", err) - } - } -} - -// TestLoggerFunction verifies Logger() creates component-specific loggers. -func TestLoggerFunction(t *testing.T) { - // Set up a JSON handler so we can verify the output - var buf bytes.Buffer - - baseLogger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) - slog.SetDefault(baseLogger) - - // Create component logger - componentLogger := Logger("test-component") - if componentLogger == nil { - t.Fatal("Logger() returned nil") - } - - // Log a message - componentLogger.Info("test message") - - // Verify component field is present - output := buf.String() - - var parsed map[string]interface{} - if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &parsed); err != nil { - t.Fatalf("Failed to parse JSON: %v", err) - } - - if parsed["component"] != "test-component" { - t.Errorf("Expected component='test-component', got: %v", parsed["component"]) - } -} - -// TestLoggerMultipleComponents verifies multiple component loggers work independently. -func TestLoggerMultipleComponents(t *testing.T) { - var buf bytes.Buffer - - baseLogger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) - slog.SetDefault(baseLogger) - - // Create multiple component loggers - logger1 := Logger("component1") - logger2 := Logger("component2") - - logger1.Info("message from component1") - logger2.Info("message from component2") - - output := buf.String() - lines := strings.Split(strings.TrimSpace(output), "\n") - - if len(lines) != 2 { - t.Fatalf("Expected 2 log lines, got: %d", len(lines)) - } - - // Verify first line has component1 - var parsed1 map[string]interface{} - if err := json.Unmarshal([]byte(lines[0]), &parsed1); err != nil { - t.Fatalf("Failed to parse first line: %v", err) - } - - if parsed1["component"] != "component1" { - t.Errorf("Expected component='component1', got: %v", parsed1["component"]) - } - - // Verify second line has component2 - var parsed2 map[string]interface{} - if err := json.Unmarshal([]byte(lines[1]), &parsed2); err != nil { - t.Fatalf("Failed to parse second line: %v", err) - } - - if parsed2["component"] != "component2" { - t.Errorf("Expected component='component2', got: %v", parsed2["component"]) - } -} - -// TestInitLoggerWithTextFormat verifies InitLogger with text format. -func TestInitLoggerWithTextFormat(t *testing.T) { - // Note: InitLogger uses sync.Once, so we can't easily reset it. - // This test verifies the logic would work by testing the handler creation directly. - cfg := &Config{ - LogLevel: "INFO", - LogFormat: "text", - LogFile: "", - } - - // Verify config values are valid - if cfg.LogFormat != "text" { - t.Errorf("Expected LogFormat='text', got: %s", cfg.LogFormat) - } -} - -// TestInitLoggerWithJSONFormat verifies InitLogger with JSON format. -func TestInitLoggerWithJSONFormat(t *testing.T) { - const jsonFormat = "json" - - cfg := &Config{ - LogLevel: "DEBUG", - LogFormat: jsonFormat, - LogFile: "", - } - - // Verify config values are valid - if cfg.LogFormat != jsonFormat { - t.Errorf("Expected LogFormat='%s', got: %s", jsonFormat, cfg.LogFormat) - } -} - -// TestInitLoggerWithInvalidFormat verifies InitLogger with invalid format. -func TestInitLoggerWithInvalidFormat(t *testing.T) { - cfg := &Config{ - LogLevel: "INFO", - LogFormat: "invalid", - LogFile: "", - } - - // The actual InitLogger would fall back to text - // We verify the config can hold invalid values - if cfg.LogFormat != "invalid" { - t.Errorf("Config should preserve invalid format for InitLogger to handle") - } -} - -// TestInitLoggerWithFile verifies InitLogger with log file. -func TestInitLoggerWithFile(t *testing.T) { - const jsonFormat = "json" - - tmpFile := t.TempDir() + "/test.log" - - cfg := &Config{ - LogLevel: "INFO", - LogFormat: jsonFormat, - LogFile: tmpFile, - } - - // Verify config values - if cfg.LogFile != tmpFile { - t.Errorf("Expected LogFile=%s, got: %s", tmpFile, cfg.LogFile) - } -} - -// TestLogLevelParsing verifies different log level strings. -func TestLogLevelParsing(t *testing.T) { - tests := []struct { - input string - expected slog.Level - shouldOK bool - }{ - {"DEBUG", slog.LevelDebug, true}, - {"INFO", slog.LevelInfo, true}, - {"WARN", slog.LevelWarn, true}, - {"ERROR", slog.LevelError, true}, - {"debug", slog.LevelDebug, true}, - {"info", slog.LevelInfo, true}, - {"warn", slog.LevelWarn, true}, - {"error", slog.LevelError, true}, - } - - for _, tt := range tests { - t.Run(tt.input, func(t *testing.T) { - var level slog.Level - - err := level.UnmarshalText([]byte(strings.ToLower(tt.input))) - if tt.shouldOK && err != nil { - t.Errorf("Expected successful parse for %s, got error: %v", tt.input, err) - } - - if tt.shouldOK && level != tt.expected { - t.Errorf("Expected level=%v, got: %v", tt.expected, level) - } - }) - } -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package logging + +import ( + "bytes" + "encoding/json" + "log/slog" + "os" + "strings" + "testing" +) + +// TestJSONHandler verifies JSON output format. +func TestJSONHandler(t *testing.T) { + var buf bytes.Buffer + + // Create JSON handler + logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) + logger.Info("test message", "key", "value", "number", 42) + + output := buf.String() + + // Verify it's valid JSON + var parsed map[string]interface{} + if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &parsed); err != nil { + t.Fatalf("Output is not valid JSON: %v\nOutput: %s", err, output) + } + + // Verify expected fields + if parsed["msg"] != "test message" { + t.Errorf("Expected msg='test message', got: %v", parsed["msg"]) + } + + if parsed["key"] != "value" { + t.Errorf("Expected key='value', got: %v", parsed["key"]) + } + + if parsed["number"] != float64(42) { // JSON numbers are float64 + t.Errorf("Expected number=42, got: %v", parsed["number"]) + } + + if parsed["level"] != DefaultLogLevel { + t.Errorf("Expected level=%s, got: %v", DefaultLogLevel, parsed["level"]) + } +} + +// TestTextHandler verifies text output format. +func TestTextHandler(t *testing.T) { + var buf bytes.Buffer + + logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) + logger.Info("test message", "key", "value") + + output := buf.String() + + // Verify key-value format + if !strings.Contains(output, "msg=\"test message\"") && !strings.Contains(output, "msg=test message") { + t.Errorf("Expected text format with msg, got: %s", output) + } + + if !strings.Contains(output, "key=value") { + t.Errorf("Expected text format with key=value, got: %s", output) + } + + if !strings.Contains(output, "level=INFO") { + t.Errorf("Expected text format with level=INFO, got: %s", output) + } +} + +// TestJSONHandlerMultipleFields verifies JSON with multiple fields. +func TestJSONHandlerMultipleFields(t *testing.T) { + var buf bytes.Buffer + + logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelDebug})) + logger.Debug("debug message", + "string_field", "test", + "int_field", 123, + "bool_field", true, + "float_field", 3.14, + ) + + output := buf.String() + + var parsed map[string]interface{} + if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &parsed); err != nil { + t.Fatalf("Failed to parse JSON: %v\nOutput: %s", err, output) + } + + // Verify different data types are preserved + if parsed["string_field"] != "test" { + t.Errorf("Expected string_field='test', got: %v", parsed["string_field"]) + } + + if parsed["int_field"] != float64(123) { + t.Errorf("Expected int_field=123, got: %v", parsed["int_field"]) + } + + if parsed["bool_field"] != true { + t.Errorf("Expected bool_field=true, got: %v", parsed["bool_field"]) + } + + if parsed["float_field"] != 3.14 { + t.Errorf("Expected float_field=3.14, got: %v", parsed["float_field"]) + } +} + +// TestLogLevels verifies different log levels work correctly. +func TestLogLevels(t *testing.T) { + tests := []struct { + name string + level slog.Level + logFunc func(*slog.Logger, string) + expected string + }{ + {"DEBUG", slog.LevelDebug, func(l *slog.Logger, msg string) { l.Debug(msg) }, "DEBUG"}, + {"INFO", slog.LevelInfo, func(l *slog.Logger, msg string) { l.Info(msg) }, "INFO"}, + {"WARN", slog.LevelWarn, func(l *slog.Logger, msg string) { l.Warn(msg) }, "WARN"}, + {"ERROR", slog.LevelError, func(l *slog.Logger, msg string) { l.Error(msg) }, "ERROR"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var buf bytes.Buffer + + logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelDebug})) + + tt.logFunc(logger, "test") + + var parsed map[string]interface{} + if err := json.Unmarshal([]byte(strings.TrimSpace(buf.String())), &parsed); err != nil { + t.Fatalf("Failed to parse JSON: %v", err) + } + + if parsed["level"] != tt.expected { + t.Errorf("Expected level=%s, got: %v", tt.expected, parsed["level"]) + } + }) + } +} + +// TestComponentLogger verifies component-specific loggers. +func TestComponentLogger(t *testing.T) { + var buf bytes.Buffer + + // Create base logger + baseLogger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) + slog.SetDefault(baseLogger) + + // Create component logger (simulating Logger function) + componentLogger := slog.Default().With("component", "test-component") + componentLogger.Info("component message", "extra", "data") + + output := buf.String() + + var parsed map[string]interface{} + if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &parsed); err != nil { + t.Fatalf("Failed to parse JSON: %v", err) + } + + // Verify component field is present + if parsed["component"] != "test-component" { + t.Errorf("Expected component='test-component', got: %v", parsed["component"]) + } + + if parsed["extra"] != "data" { + t.Errorf("Expected extra='data', got: %v", parsed["extra"]) + } +} + +// TestJSONHandlerNilSafety verifies handler works with nil/empty values. +func TestJSONHandlerNilSafety(t *testing.T) { + var buf bytes.Buffer + + logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) + logger.Info("test", "empty_string", "", "zero", 0) + + output := buf.String() + + var parsed map[string]interface{} + if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &parsed); err != nil { + t.Fatalf("Failed to parse JSON: %v", err) + } + + // Verify empty values are handled correctly + if parsed["empty_string"] != "" { + t.Errorf("Expected empty_string='', got: %v", parsed["empty_string"]) + } + + if parsed["zero"] != float64(0) { + t.Errorf("Expected zero=0, got: %v", parsed["zero"]) + } +} + +// TestDefaultConfig verifies default configuration values. +func TestDefaultConfig(t *testing.T) { + const ( + expectedLogLevel = "INFO" + expectedLogFormat = "text" + expectedEnvPrefix = "DIRECTORY_LOGGER" + ) + + if DefaultLogLevel != expectedLogLevel { + t.Errorf("Expected DefaultLogLevel='%s', got: %s", expectedLogLevel, DefaultLogLevel) + } + + if DefaultLogFormat != expectedLogFormat { + t.Errorf("Expected DefaultLogFormat='%s', got: %s", expectedLogFormat, DefaultLogFormat) + } + + if DefaultEnvPrefix != expectedEnvPrefix { + t.Errorf("Expected DefaultEnvPrefix='%s', got: %s", expectedEnvPrefix, DefaultEnvPrefix) + } +} + +// TestConfigStruct verifies Config struct can be marshaled. +func TestConfigStruct(t *testing.T) { + const testLogFormat = "json" + + cfg := Config{ + LogFile: "/tmp/test.log", + LogLevel: "DEBUG", + LogFormat: testLogFormat, + } + + if cfg.LogFile != "/tmp/test.log" { + t.Errorf("Expected LogFile='/tmp/test.log', got: %s", cfg.LogFile) + } + + if cfg.LogLevel != "DEBUG" { + t.Errorf("Expected LogLevel='DEBUG', got: %s", cfg.LogLevel) + } + + if cfg.LogFormat != testLogFormat { + t.Errorf("Expected LogFormat='%s', got: %s", testLogFormat, cfg.LogFormat) + } +} + +// TestTextHandlerMultipleMessages verifies multiple log messages don't interfere. +func TestTextHandlerMultipleMessages(t *testing.T) { + var buf bytes.Buffer + + logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) + logger.Info("first message", "id", 1) + logger.Info("second message", "id", 2) + + output := buf.String() + lines := strings.Split(strings.TrimSpace(output), "\n") + + if len(lines) != 2 { + t.Errorf("Expected 2 log lines, got: %d", len(lines)) + } + + // Verify both messages are present + if !strings.Contains(output, "first message") { + t.Error("Expected 'first message' in output") + } + + if !strings.Contains(output, "second message") { + t.Error("Expected 'second message' in output") + } +} + +// TestJSONHandlerMultipleMessages verifies multiple JSON log entries. +func TestJSONHandlerMultipleMessages(t *testing.T) { + var buf bytes.Buffer + + logger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) + logger.Info("first", "id", 1) + logger.Info("second", "id", 2) + + output := buf.String() + lines := strings.Split(strings.TrimSpace(output), "\n") + + if len(lines) != 2 { + t.Errorf("Expected 2 JSON lines, got: %d", len(lines)) + } + + // Verify each line is valid JSON + for i, line := range lines { + var parsed map[string]interface{} + if err := json.Unmarshal([]byte(line), &parsed); err != nil { + t.Errorf("Line %d is not valid JSON: %v\nLine: %s", i+1, err, line) + } + } +} + +// TestGetLogOutputStdout verifies getLogOutput returns stdout for empty path. +func TestGetLogOutputStdout(t *testing.T) { + output := getLogOutput("") + if output != os.Stdout { + t.Error("Expected stdout for empty path") + } +} + +// TestGetLogOutputInvalidPath verifies getLogOutput falls back to stdout for invalid path. +func TestGetLogOutputInvalidPath(t *testing.T) { + // Use an invalid path (directory that doesn't exist) + output := getLogOutput("/invalid/directory/that/does/not/exist/test.log") + if output != os.Stdout { + t.Error("Expected stdout fallback for invalid path") + } +} + +// TestGetLogOutputValidPath verifies getLogOutput can create a log file. +func TestGetLogOutputValidPath(t *testing.T) { + // Create a temporary file + tmpFile := t.TempDir() + "/test.log" + + output := getLogOutput(tmpFile) + if output == os.Stdout { + t.Error("Expected file handle, got stdout") + } + + // Verify it's a file we can write to + if output != nil { + defer output.Close() + + _, err := output.WriteString("test") + if err != nil { + t.Errorf("Failed to write to log file: %v", err) + } + } +} + +// TestLoggerFunction verifies Logger() creates component-specific loggers. +func TestLoggerFunction(t *testing.T) { + // Set up a JSON handler so we can verify the output + var buf bytes.Buffer + + baseLogger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) + slog.SetDefault(baseLogger) + + // Create component logger + componentLogger := Logger("test-component") + if componentLogger == nil { + t.Fatal("Logger() returned nil") + } + + // Log a message + componentLogger.Info("test message") + + // Verify component field is present + output := buf.String() + + var parsed map[string]interface{} + if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &parsed); err != nil { + t.Fatalf("Failed to parse JSON: %v", err) + } + + if parsed["component"] != "test-component" { + t.Errorf("Expected component='test-component', got: %v", parsed["component"]) + } +} + +// TestLoggerMultipleComponents verifies multiple component loggers work independently. +func TestLoggerMultipleComponents(t *testing.T) { + var buf bytes.Buffer + + baseLogger := slog.New(slog.NewJSONHandler(&buf, &slog.HandlerOptions{Level: slog.LevelInfo})) + slog.SetDefault(baseLogger) + + // Create multiple component loggers + logger1 := Logger("component1") + logger2 := Logger("component2") + + logger1.Info("message from component1") + logger2.Info("message from component2") + + output := buf.String() + lines := strings.Split(strings.TrimSpace(output), "\n") + + if len(lines) != 2 { + t.Fatalf("Expected 2 log lines, got: %d", len(lines)) + } + + // Verify first line has component1 + var parsed1 map[string]interface{} + if err := json.Unmarshal([]byte(lines[0]), &parsed1); err != nil { + t.Fatalf("Failed to parse first line: %v", err) + } + + if parsed1["component"] != "component1" { + t.Errorf("Expected component='component1', got: %v", parsed1["component"]) + } + + // Verify second line has component2 + var parsed2 map[string]interface{} + if err := json.Unmarshal([]byte(lines[1]), &parsed2); err != nil { + t.Fatalf("Failed to parse second line: %v", err) + } + + if parsed2["component"] != "component2" { + t.Errorf("Expected component='component2', got: %v", parsed2["component"]) + } +} + +// TestInitLoggerWithTextFormat verifies InitLogger with text format. +func TestInitLoggerWithTextFormat(t *testing.T) { + // Note: InitLogger uses sync.Once, so we can't easily reset it. + // This test verifies the logic would work by testing the handler creation directly. + cfg := &Config{ + LogLevel: "INFO", + LogFormat: "text", + LogFile: "", + } + + // Verify config values are valid + if cfg.LogFormat != "text" { + t.Errorf("Expected LogFormat='text', got: %s", cfg.LogFormat) + } +} + +// TestInitLoggerWithJSONFormat verifies InitLogger with JSON format. +func TestInitLoggerWithJSONFormat(t *testing.T) { + const jsonFormat = "json" + + cfg := &Config{ + LogLevel: "DEBUG", + LogFormat: jsonFormat, + LogFile: "", + } + + // Verify config values are valid + if cfg.LogFormat != jsonFormat { + t.Errorf("Expected LogFormat='%s', got: %s", jsonFormat, cfg.LogFormat) + } +} + +// TestInitLoggerWithInvalidFormat verifies InitLogger with invalid format. +func TestInitLoggerWithInvalidFormat(t *testing.T) { + cfg := &Config{ + LogLevel: "INFO", + LogFormat: "invalid", + LogFile: "", + } + + // The actual InitLogger would fall back to text + // We verify the config can hold invalid values + if cfg.LogFormat != "invalid" { + t.Errorf("Config should preserve invalid format for InitLogger to handle") + } +} + +// TestInitLoggerWithFile verifies InitLogger with log file. +func TestInitLoggerWithFile(t *testing.T) { + const jsonFormat = "json" + + tmpFile := t.TempDir() + "/test.log" + + cfg := &Config{ + LogLevel: "INFO", + LogFormat: jsonFormat, + LogFile: tmpFile, + } + + // Verify config values + if cfg.LogFile != tmpFile { + t.Errorf("Expected LogFile=%s, got: %s", tmpFile, cfg.LogFile) + } +} + +// TestLogLevelParsing verifies different log level strings. +func TestLogLevelParsing(t *testing.T) { + tests := []struct { + input string + expected slog.Level + shouldOK bool + }{ + {"DEBUG", slog.LevelDebug, true}, + {"INFO", slog.LevelInfo, true}, + {"WARN", slog.LevelWarn, true}, + {"ERROR", slog.LevelError, true}, + {"debug", slog.LevelDebug, true}, + {"info", slog.LevelInfo, true}, + {"warn", slog.LevelWarn, true}, + {"error", slog.LevelError, true}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + var level slog.Level + + err := level.UnmarshalText([]byte(strings.ToLower(tt.input))) + if tt.shouldOK && err != nil { + t.Errorf("Expected successful parse for %s, got error: %v", tt.input, err) + } + + if tt.shouldOK && level != tt.expected { + t.Errorf("Expected level=%v, got: %v", tt.expected, level) + } + }) + } +} diff --git a/utils/spiffe/retry.go b/utils/spiffe/retry.go index 662b57792..9bb8a365b 100644 --- a/utils/spiffe/retry.go +++ b/utils/spiffe/retry.go @@ -1,173 +1,173 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package spiffe - -import ( - "errors" - "fmt" - "io" - "log/slog" - "time" - - "github.com/spiffe/go-spiffe/v2/svid/x509svid" -) - -// Default retry configuration constants. -const ( - DefaultMaxRetries = 10 - DefaultInitialBackoff = 500 * time.Millisecond - DefaultMaxBackoff = 10 * time.Second -) - -// x509SourceGetter defines the interface for getting X509-SVIDs. -// This interface allows us to work with any X509 source that implements GetX509SVID(). -type x509SourceGetter interface { - GetX509SVID() (*x509svid.SVID, error) -} - -// X509SourceWithRetry wraps an x509svid.Source and adds retry logic to GetX509SVID(). -// This ensures that retry logic is applied not just during setup, but also during -// TLS handshakes when grpccredentials.MTLSClientCredentials or MTLSServerCredentials -// calls GetX509SVID(). -// The wrapper implements both x509svid.Source and io.Closer interfaces. -type X509SourceWithRetry struct { - src x509svid.Source // The Source interface (workloadapi.X509Source implements this) - closer io.Closer // The Closer interface for Close() - maxRetries int - initialBackoff time.Duration - maxBackoff time.Duration - logger *slog.Logger -} - -// NewX509SourceWithRetry creates a new X509SourceWithRetry wrapper with configurable retry parameters. -// -// Parameters: -// - src: The X509 source to wrap (must implement x509svid.Source) -// - closer: The closer for cleanup (typically the same as src) -// - logger: Logger instance for retry logic logging -// - maxRetries: Maximum number of retry attempts (use DefaultMaxRetries for default) -// - initialBackoff: Initial backoff duration between retries (use DefaultInitialBackoff for default) -// - maxBackoff: Maximum backoff duration (exponential backoff is capped at this value, use DefaultMaxBackoff for default) -func NewX509SourceWithRetry( - src x509svid.Source, - closer io.Closer, - logger *slog.Logger, - maxRetries int, - initialBackoff time.Duration, - maxBackoff time.Duration, -) *X509SourceWithRetry { - return &X509SourceWithRetry{ - src: src, - closer: closer, - maxRetries: maxRetries, - initialBackoff: initialBackoff, - maxBackoff: maxBackoff, - logger: logger, - } -} - -// GetX509SVID implements x509svid.Source interface with retry logic. -// This method is called by grpccredentials.MTLSClientCredentials/MTLSServerCredentials during TLS handshake. -func (w *X509SourceWithRetry) GetX509SVID() (*x509svid.SVID, error) { - w.logger.Info("X509SourceWithRetry.GetX509SVID() called (likely during TLS handshake)", - "max_retries", w.maxRetries, - "initial_backoff", w.initialBackoff, - "max_backoff", w.maxBackoff) - - svid, err := GetX509SVIDWithRetry(w.src, w.maxRetries, w.initialBackoff, w.maxBackoff, w.logger) - switch { - case err != nil: - w.logger.Error("X509SourceWithRetry.GetX509SVID() failed after retries", "error", err, "max_retries", w.maxRetries) - case svid == nil: - w.logger.Warn("X509SourceWithRetry.GetX509SVID() returned nil SVID") - case svid.ID.IsZero(): - w.logger.Warn("X509SourceWithRetry.GetX509SVID() returned SVID with zero ID (no URI SAN)", "has_certificate", svid != nil) - default: - w.logger.Info("X509SourceWithRetry.GetX509SVID() succeeded", "spiffe_id", svid.ID.String(), "has_certificate", svid != nil) - } - - return svid, err -} - -// Close implements io.Closer interface by delegating to the wrapped source. -func (w *X509SourceWithRetry) Close() error { - if err := w.closer.Close(); err != nil { - return fmt.Errorf("failed to close X509 source: %w", err) - } - - return nil -} - -// GetX509SVIDWithRetry attempts to get a valid X509-SVID with retry logic. -// This handles timing issues where the SPIRE entry hasn't synced to the agent yet -// (common with CronJobs and other short-lived workloads or pod restarts). -// The agent may return a certificate without a URI SAN (SPIFFE ID) if the entry hasn't synced, -// so we must validate that the certificate actually contains a valid SPIFFE ID. -// -// Parameters: -// - src: The X509 source to get SVIDs from -// - maxRetries: Maximum number of retry attempts -// - initialBackoff: Initial backoff duration between retries -// - maxBackoff: Maximum backoff duration (exponential backoff is capped at this value) -// - logger: Logger instance for retry logic logging -func GetX509SVIDWithRetry( - src x509SourceGetter, - maxRetries int, - initialBackoff, maxBackoff time.Duration, - logger *slog.Logger, -) (*x509svid.SVID, error) { - var ( - svidErr error - svid *x509svid.SVID - ) - - logger.Debug("Starting X509-SVID retry logic", "max_retries", maxRetries, "initial_backoff", initialBackoff, "max_backoff", maxBackoff) - - backoff := initialBackoff - - for attempt := range maxRetries { - logger.Debug("Attempting to get X509-SVID", "attempt", attempt+1) - - svid, svidErr = src.GetX509SVID() - switch { - case svidErr == nil && svid != nil && !svid.ID.IsZero(): - // Valid SVID with SPIFFE ID, proceed - logger.Debug("SVID obtained", "spiffe_id", svid.ID.String(), "is_zero", svid.ID.IsZero()) - logger.Info("Successfully obtained valid X509-SVID with SPIFFE ID", "spiffe_id", svid.ID.String(), "attempt", attempt+1) - - return svid, nil - case svidErr == nil && svid != nil: - // Certificate exists but lacks SPIFFE ID - treat as error and retry - logger.Debug("SVID obtained", "spiffe_id", svid.ID.String(), "is_zero", svid.ID.IsZero()) - - svidErr = errors.New("certificate contains no URI SAN (SPIFFE ID)") - logger.Warn("SVID obtained but lacks URI SAN, retrying", "attempt", attempt+1, "error", svidErr) - case svidErr != nil: - logger.Warn("Failed to get X509-SVID", "attempt", attempt+1, "error", svidErr) - default: - logger.Warn("GetX509SVID returned nil SVID with no error, retrying", "attempt", attempt+1) - - svidErr = errors.New("nil SVID returned") // Force retry - } - - if attempt < maxRetries-1 { - logger.Debug("Backing off before next retry", "duration", backoff, "attempt", attempt+1) - // Exponential backoff: initialBackoff, initialBackoff*2, initialBackoff*4, ... (capped at maxBackoff) - time.Sleep(backoff) - - backoff *= 2 - if backoff > maxBackoff { - backoff = maxBackoff - } - } - } - - if svidErr == nil { - svidErr = errors.New("certificate contains no URI SAN (SPIFFE ID)") - } - - logger.Error("Failed to get valid X509-SVID after retries", "max_retries", maxRetries, "error", svidErr, "final_svid", svid) - - return nil, fmt.Errorf("failed to get valid X509-SVID after %d retries: %w", maxRetries, svidErr) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package spiffe + +import ( + "errors" + "fmt" + "io" + "log/slog" + "time" + + "github.com/spiffe/go-spiffe/v2/svid/x509svid" +) + +// Default retry configuration constants. +const ( + DefaultMaxRetries = 10 + DefaultInitialBackoff = 500 * time.Millisecond + DefaultMaxBackoff = 10 * time.Second +) + +// x509SourceGetter defines the interface for getting X509-SVIDs. +// This interface allows us to work with any X509 source that implements GetX509SVID(). +type x509SourceGetter interface { + GetX509SVID() (*x509svid.SVID, error) +} + +// X509SourceWithRetry wraps an x509svid.Source and adds retry logic to GetX509SVID(). +// This ensures that retry logic is applied not just during setup, but also during +// TLS handshakes when grpccredentials.MTLSClientCredentials or MTLSServerCredentials +// calls GetX509SVID(). +// The wrapper implements both x509svid.Source and io.Closer interfaces. +type X509SourceWithRetry struct { + src x509svid.Source // The Source interface (workloadapi.X509Source implements this) + closer io.Closer // The Closer interface for Close() + maxRetries int + initialBackoff time.Duration + maxBackoff time.Duration + logger *slog.Logger +} + +// NewX509SourceWithRetry creates a new X509SourceWithRetry wrapper with configurable retry parameters. +// +// Parameters: +// - src: The X509 source to wrap (must implement x509svid.Source) +// - closer: The closer for cleanup (typically the same as src) +// - logger: Logger instance for retry logic logging +// - maxRetries: Maximum number of retry attempts (use DefaultMaxRetries for default) +// - initialBackoff: Initial backoff duration between retries (use DefaultInitialBackoff for default) +// - maxBackoff: Maximum backoff duration (exponential backoff is capped at this value, use DefaultMaxBackoff for default) +func NewX509SourceWithRetry( + src x509svid.Source, + closer io.Closer, + logger *slog.Logger, + maxRetries int, + initialBackoff time.Duration, + maxBackoff time.Duration, +) *X509SourceWithRetry { + return &X509SourceWithRetry{ + src: src, + closer: closer, + maxRetries: maxRetries, + initialBackoff: initialBackoff, + maxBackoff: maxBackoff, + logger: logger, + } +} + +// GetX509SVID implements x509svid.Source interface with retry logic. +// This method is called by grpccredentials.MTLSClientCredentials/MTLSServerCredentials during TLS handshake. +func (w *X509SourceWithRetry) GetX509SVID() (*x509svid.SVID, error) { + w.logger.Info("X509SourceWithRetry.GetX509SVID() called (likely during TLS handshake)", + "max_retries", w.maxRetries, + "initial_backoff", w.initialBackoff, + "max_backoff", w.maxBackoff) + + svid, err := GetX509SVIDWithRetry(w.src, w.maxRetries, w.initialBackoff, w.maxBackoff, w.logger) + switch { + case err != nil: + w.logger.Error("X509SourceWithRetry.GetX509SVID() failed after retries", "error", err, "max_retries", w.maxRetries) + case svid == nil: + w.logger.Warn("X509SourceWithRetry.GetX509SVID() returned nil SVID") + case svid.ID.IsZero(): + w.logger.Warn("X509SourceWithRetry.GetX509SVID() returned SVID with zero ID (no URI SAN)", "has_certificate", svid != nil) + default: + w.logger.Info("X509SourceWithRetry.GetX509SVID() succeeded", "spiffe_id", svid.ID.String(), "has_certificate", svid != nil) + } + + return svid, err +} + +// Close implements io.Closer interface by delegating to the wrapped source. +func (w *X509SourceWithRetry) Close() error { + if err := w.closer.Close(); err != nil { + return fmt.Errorf("failed to close X509 source: %w", err) + } + + return nil +} + +// GetX509SVIDWithRetry attempts to get a valid X509-SVID with retry logic. +// This handles timing issues where the SPIRE entry hasn't synced to the agent yet +// (common with CronJobs and other short-lived workloads or pod restarts). +// The agent may return a certificate without a URI SAN (SPIFFE ID) if the entry hasn't synced, +// so we must validate that the certificate actually contains a valid SPIFFE ID. +// +// Parameters: +// - src: The X509 source to get SVIDs from +// - maxRetries: Maximum number of retry attempts +// - initialBackoff: Initial backoff duration between retries +// - maxBackoff: Maximum backoff duration (exponential backoff is capped at this value) +// - logger: Logger instance for retry logic logging +func GetX509SVIDWithRetry( + src x509SourceGetter, + maxRetries int, + initialBackoff, maxBackoff time.Duration, + logger *slog.Logger, +) (*x509svid.SVID, error) { + var ( + svidErr error + svid *x509svid.SVID + ) + + logger.Debug("Starting X509-SVID retry logic", "max_retries", maxRetries, "initial_backoff", initialBackoff, "max_backoff", maxBackoff) + + backoff := initialBackoff + + for attempt := range maxRetries { + logger.Debug("Attempting to get X509-SVID", "attempt", attempt+1) + + svid, svidErr = src.GetX509SVID() + switch { + case svidErr == nil && svid != nil && !svid.ID.IsZero(): + // Valid SVID with SPIFFE ID, proceed + logger.Debug("SVID obtained", "spiffe_id", svid.ID.String(), "is_zero", svid.ID.IsZero()) + logger.Info("Successfully obtained valid X509-SVID with SPIFFE ID", "spiffe_id", svid.ID.String(), "attempt", attempt+1) + + return svid, nil + case svidErr == nil && svid != nil: + // Certificate exists but lacks SPIFFE ID - treat as error and retry + logger.Debug("SVID obtained", "spiffe_id", svid.ID.String(), "is_zero", svid.ID.IsZero()) + + svidErr = errors.New("certificate contains no URI SAN (SPIFFE ID)") + logger.Warn("SVID obtained but lacks URI SAN, retrying", "attempt", attempt+1, "error", svidErr) + case svidErr != nil: + logger.Warn("Failed to get X509-SVID", "attempt", attempt+1, "error", svidErr) + default: + logger.Warn("GetX509SVID returned nil SVID with no error, retrying", "attempt", attempt+1) + + svidErr = errors.New("nil SVID returned") // Force retry + } + + if attempt < maxRetries-1 { + logger.Debug("Backing off before next retry", "duration", backoff, "attempt", attempt+1) + // Exponential backoff: initialBackoff, initialBackoff*2, initialBackoff*4, ... (capped at maxBackoff) + time.Sleep(backoff) + + backoff *= 2 + if backoff > maxBackoff { + backoff = maxBackoff + } + } + } + + if svidErr == nil { + svidErr = errors.New("certificate contains no URI SAN (SPIFFE ID)") + } + + logger.Error("Failed to get valid X509-SVID after retries", "max_retries", maxRetries, "error", svidErr, "final_svid", svid) + + return nil, fmt.Errorf("failed to get valid X509-SVID after %d retries: %w", maxRetries, svidErr) +} diff --git a/utils/spiffe/retry_test.go b/utils/spiffe/retry_test.go index 2af8b3cd6..8aad7747b 100644 --- a/utils/spiffe/retry_test.go +++ b/utils/spiffe/retry_test.go @@ -1,216 +1,216 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package spiffe - -import ( - "errors" - "log/slog" - "testing" - "time" - - "github.com/spiffe/go-spiffe/v2/spiffeid" - "github.com/spiffe/go-spiffe/v2/svid/x509svid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// mockX509Source is a mock implementation of x509SourceGetter for testing. -type mockX509Source struct { - calls []int - responses []mockResponse - callCount int - closeFunc func() error -} - -type mockResponse struct { - svid *x509svid.SVID - err error -} - -func newMockX509Source(responses ...mockResponse) *mockX509Source { - return &mockX509Source{ - responses: responses, - calls: make([]int, 0), - } -} - -func (m *mockX509Source) GetX509SVID() (*x509svid.SVID, error) { - m.callCount++ - m.calls = append(m.calls, m.callCount) - - if len(m.responses) == 0 { - return nil, errors.New("no mock response configured") - } - - // Use modulo to cycle through responses if we have fewer responses than calls - idx := (m.callCount - 1) % len(m.responses) - - return m.responses[idx].svid, m.responses[idx].err -} - -func (m *mockX509Source) Close() error { - if m.closeFunc != nil { - return m.closeFunc() - } - - return nil -} - -// Test retry parameters - use smaller values for faster tests. -const ( - testMaxRetries = 3 - testInitialBackoff = 10 * time.Millisecond - testMaxBackoff = 100 * time.Millisecond -) - -// testLogger creates a logger for testing. -func testLogger() *slog.Logger { - return slog.Default() -} - -// createValidSVID creates a mock SVID with a valid SPIFFE ID. -func createValidSVID(t *testing.T) *x509svid.SVID { - t.Helper() - // Create a minimal valid SPIFFE ID - id, err := spiffeid.FromString("spiffe://example.org/test/workload") - require.NoError(t, err) - - // Create a minimal SVID with the ID - // Note: In a real scenario, this would have certificates, but for testing - // we only need to validate the ID field - return &x509svid.SVID{ - ID: id, - } -} - -// createZeroIDSVID creates a mock SVID with a zero SPIFFE ID (no URI SAN). -func createZeroIDSVID(t *testing.T) *x509svid.SVID { - t.Helper() - // Create an SVID with zero ID (no URI SAN) - this is the zero value - return &x509svid.SVID{ - ID: spiffeid.ID{}, // Zero value - } -} - -func TestGetX509SVIDWithRetry_ImmediateSuccess(t *testing.T) { - t.Run("should return valid SVID on first attempt", func(t *testing.T) { - validSVID := createValidSVID(t) - mockSrc := newMockX509Source(mockResponse{svid: validSVID, err: nil}) - - svid, err := GetX509SVIDWithRetry(mockSrc, testMaxRetries, testInitialBackoff, testMaxBackoff, testLogger()) - - require.NoError(t, err) - assert.NotNil(t, svid) - assert.Equal(t, validSVID.ID, svid.ID) - assert.Equal(t, 1, mockSrc.callCount, "should only call GetX509SVID once") - }) -} - -func TestGetX509SVIDWithRetry_RetryAfterError(t *testing.T) { - t.Run("should retry after errors and eventually succeed", func(t *testing.T) { - validSVID := createValidSVID(t) - mockSrc := newMockX509Source( - mockResponse{svid: nil, err: errors.New("temporary error")}, - mockResponse{svid: nil, err: errors.New("temporary error")}, - mockResponse{svid: validSVID, err: nil}, - ) - - svid, err := GetX509SVIDWithRetry(mockSrc, testMaxRetries, testInitialBackoff, testMaxBackoff, testLogger()) - - require.NoError(t, err) - assert.NotNil(t, svid) - assert.Equal(t, validSVID.ID, svid.ID) - assert.Equal(t, 3, mockSrc.callCount, "should retry 3 times") - }) -} - -func TestGetX509SVIDWithRetry_RetryAfterZeroID(t *testing.T) { - t.Run("should retry when SVID has zero SPIFFE ID", func(t *testing.T) { - zeroIDSVID := createZeroIDSVID(t) - validSVID := createValidSVID(t) - mockSrc := newMockX509Source( - mockResponse{svid: zeroIDSVID, err: nil}, // Certificate but no URI SAN - mockResponse{svid: zeroIDSVID, err: nil}, // Still no URI SAN - mockResponse{svid: validSVID, err: nil}, // Finally valid - ) - - svid, err := GetX509SVIDWithRetry(mockSrc, testMaxRetries, testInitialBackoff, testMaxBackoff, testLogger()) - - require.NoError(t, err) - assert.NotNil(t, svid) - assert.Equal(t, validSVID.ID, svid.ID) - assert.Equal(t, 3, mockSrc.callCount, "should retry until valid SVID") - }) -} - -func TestGetX509SVIDWithRetry_MaxRetriesExceeded(t *testing.T) { - t.Run("should return error after max retries with errors", func(t *testing.T) { - mockSrc := newMockX509Source( - mockResponse{svid: nil, err: errors.New("persistent error")}, - ) - - svid, err := GetX509SVIDWithRetry(mockSrc, testMaxRetries, testInitialBackoff, testMaxBackoff, testLogger()) - - require.Error(t, err) - assert.Nil(t, svid) - assert.Contains(t, err.Error(), "failed to get valid X509-SVID after 3 retries") - assert.Contains(t, err.Error(), "persistent error") - assert.Equal(t, testMaxRetries, mockSrc.callCount, "should retry max 3 times") - }) - - t.Run("should return error after max retries with zero ID", func(t *testing.T) { - zeroIDSVID := createZeroIDSVID(t) - mockSrc := newMockX509Source( - mockResponse{svid: zeroIDSVID, err: nil}, // Certificate but no URI SAN - ) - - svid, err := GetX509SVIDWithRetry(mockSrc, testMaxRetries, testInitialBackoff, testMaxBackoff, testLogger()) - - require.Error(t, err) - assert.Nil(t, svid) - assert.Contains(t, err.Error(), "failed to get valid X509-SVID after 3 retries") - assert.Contains(t, err.Error(), "certificate contains no URI SAN") - assert.Equal(t, testMaxRetries, mockSrc.callCount, "should retry max 3 times") - }) -} - -func TestGetX509SVIDWithRetry_NilSVID(t *testing.T) { - t.Run("should retry when GetX509SVID returns nil SVID", func(t *testing.T) { - validSVID := createValidSVID(t) - mockSrc := newMockX509Source( - mockResponse{svid: nil, err: nil}, // Nil SVID, no error - mockResponse{svid: validSVID, err: nil}, - ) - - svid, err := GetX509SVIDWithRetry(mockSrc, testMaxRetries, testInitialBackoff, testMaxBackoff, testLogger()) - - require.NoError(t, err) - assert.NotNil(t, svid) - assert.Equal(t, validSVID.ID, svid.ID) - assert.Equal(t, 2, mockSrc.callCount, "should retry after nil SVID") - }) -} - -func TestGetX509SVIDWithRetry_ExponentialBackoff(t *testing.T) { - t.Run("should use exponential backoff between retries", func(t *testing.T) { - // This test verifies that backoff is applied with test parameters - validSVID := createValidSVID(t) - mockSrc := newMockX509Source( - mockResponse{svid: nil, err: errors.New("error 1")}, - mockResponse{svid: nil, err: errors.New("error 2")}, - mockResponse{svid: validSVID, err: nil}, - ) - - start := time.Now() - svid, err := GetX509SVIDWithRetry(mockSrc, testMaxRetries, testInitialBackoff, testMaxBackoff, testLogger()) - duration := time.Since(start) - - require.NoError(t, err) - assert.NotNil(t, svid) - // Verify that some backoff was applied (at least 10ms + 20ms = 30ms) - // But allow some tolerance for test execution time - assert.GreaterOrEqual(t, duration, 20*time.Millisecond, "should have applied exponential backoff") - assert.Equal(t, 3, mockSrc.callCount, "should retry 3 times") - }) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package spiffe + +import ( + "errors" + "log/slog" + "testing" + "time" + + "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/spiffe/go-spiffe/v2/svid/x509svid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// mockX509Source is a mock implementation of x509SourceGetter for testing. +type mockX509Source struct { + calls []int + responses []mockResponse + callCount int + closeFunc func() error +} + +type mockResponse struct { + svid *x509svid.SVID + err error +} + +func newMockX509Source(responses ...mockResponse) *mockX509Source { + return &mockX509Source{ + responses: responses, + calls: make([]int, 0), + } +} + +func (m *mockX509Source) GetX509SVID() (*x509svid.SVID, error) { + m.callCount++ + m.calls = append(m.calls, m.callCount) + + if len(m.responses) == 0 { + return nil, errors.New("no mock response configured") + } + + // Use modulo to cycle through responses if we have fewer responses than calls + idx := (m.callCount - 1) % len(m.responses) + + return m.responses[idx].svid, m.responses[idx].err +} + +func (m *mockX509Source) Close() error { + if m.closeFunc != nil { + return m.closeFunc() + } + + return nil +} + +// Test retry parameters - use smaller values for faster tests. +const ( + testMaxRetries = 3 + testInitialBackoff = 10 * time.Millisecond + testMaxBackoff = 100 * time.Millisecond +) + +// testLogger creates a logger for testing. +func testLogger() *slog.Logger { + return slog.Default() +} + +// createValidSVID creates a mock SVID with a valid SPIFFE ID. +func createValidSVID(t *testing.T) *x509svid.SVID { + t.Helper() + // Create a minimal valid SPIFFE ID + id, err := spiffeid.FromString("spiffe://example.org/test/workload") + require.NoError(t, err) + + // Create a minimal SVID with the ID + // Note: In a real scenario, this would have certificates, but for testing + // we only need to validate the ID field + return &x509svid.SVID{ + ID: id, + } +} + +// createZeroIDSVID creates a mock SVID with a zero SPIFFE ID (no URI SAN). +func createZeroIDSVID(t *testing.T) *x509svid.SVID { + t.Helper() + // Create an SVID with zero ID (no URI SAN) - this is the zero value + return &x509svid.SVID{ + ID: spiffeid.ID{}, // Zero value + } +} + +func TestGetX509SVIDWithRetry_ImmediateSuccess(t *testing.T) { + t.Run("should return valid SVID on first attempt", func(t *testing.T) { + validSVID := createValidSVID(t) + mockSrc := newMockX509Source(mockResponse{svid: validSVID, err: nil}) + + svid, err := GetX509SVIDWithRetry(mockSrc, testMaxRetries, testInitialBackoff, testMaxBackoff, testLogger()) + + require.NoError(t, err) + assert.NotNil(t, svid) + assert.Equal(t, validSVID.ID, svid.ID) + assert.Equal(t, 1, mockSrc.callCount, "should only call GetX509SVID once") + }) +} + +func TestGetX509SVIDWithRetry_RetryAfterError(t *testing.T) { + t.Run("should retry after errors and eventually succeed", func(t *testing.T) { + validSVID := createValidSVID(t) + mockSrc := newMockX509Source( + mockResponse{svid: nil, err: errors.New("temporary error")}, + mockResponse{svid: nil, err: errors.New("temporary error")}, + mockResponse{svid: validSVID, err: nil}, + ) + + svid, err := GetX509SVIDWithRetry(mockSrc, testMaxRetries, testInitialBackoff, testMaxBackoff, testLogger()) + + require.NoError(t, err) + assert.NotNil(t, svid) + assert.Equal(t, validSVID.ID, svid.ID) + assert.Equal(t, 3, mockSrc.callCount, "should retry 3 times") + }) +} + +func TestGetX509SVIDWithRetry_RetryAfterZeroID(t *testing.T) { + t.Run("should retry when SVID has zero SPIFFE ID", func(t *testing.T) { + zeroIDSVID := createZeroIDSVID(t) + validSVID := createValidSVID(t) + mockSrc := newMockX509Source( + mockResponse{svid: zeroIDSVID, err: nil}, // Certificate but no URI SAN + mockResponse{svid: zeroIDSVID, err: nil}, // Still no URI SAN + mockResponse{svid: validSVID, err: nil}, // Finally valid + ) + + svid, err := GetX509SVIDWithRetry(mockSrc, testMaxRetries, testInitialBackoff, testMaxBackoff, testLogger()) + + require.NoError(t, err) + assert.NotNil(t, svid) + assert.Equal(t, validSVID.ID, svid.ID) + assert.Equal(t, 3, mockSrc.callCount, "should retry until valid SVID") + }) +} + +func TestGetX509SVIDWithRetry_MaxRetriesExceeded(t *testing.T) { + t.Run("should return error after max retries with errors", func(t *testing.T) { + mockSrc := newMockX509Source( + mockResponse{svid: nil, err: errors.New("persistent error")}, + ) + + svid, err := GetX509SVIDWithRetry(mockSrc, testMaxRetries, testInitialBackoff, testMaxBackoff, testLogger()) + + require.Error(t, err) + assert.Nil(t, svid) + assert.Contains(t, err.Error(), "failed to get valid X509-SVID after 3 retries") + assert.Contains(t, err.Error(), "persistent error") + assert.Equal(t, testMaxRetries, mockSrc.callCount, "should retry max 3 times") + }) + + t.Run("should return error after max retries with zero ID", func(t *testing.T) { + zeroIDSVID := createZeroIDSVID(t) + mockSrc := newMockX509Source( + mockResponse{svid: zeroIDSVID, err: nil}, // Certificate but no URI SAN + ) + + svid, err := GetX509SVIDWithRetry(mockSrc, testMaxRetries, testInitialBackoff, testMaxBackoff, testLogger()) + + require.Error(t, err) + assert.Nil(t, svid) + assert.Contains(t, err.Error(), "failed to get valid X509-SVID after 3 retries") + assert.Contains(t, err.Error(), "certificate contains no URI SAN") + assert.Equal(t, testMaxRetries, mockSrc.callCount, "should retry max 3 times") + }) +} + +func TestGetX509SVIDWithRetry_NilSVID(t *testing.T) { + t.Run("should retry when GetX509SVID returns nil SVID", func(t *testing.T) { + validSVID := createValidSVID(t) + mockSrc := newMockX509Source( + mockResponse{svid: nil, err: nil}, // Nil SVID, no error + mockResponse{svid: validSVID, err: nil}, + ) + + svid, err := GetX509SVIDWithRetry(mockSrc, testMaxRetries, testInitialBackoff, testMaxBackoff, testLogger()) + + require.NoError(t, err) + assert.NotNil(t, svid) + assert.Equal(t, validSVID.ID, svid.ID) + assert.Equal(t, 2, mockSrc.callCount, "should retry after nil SVID") + }) +} + +func TestGetX509SVIDWithRetry_ExponentialBackoff(t *testing.T) { + t.Run("should use exponential backoff between retries", func(t *testing.T) { + // This test verifies that backoff is applied with test parameters + validSVID := createValidSVID(t) + mockSrc := newMockX509Source( + mockResponse{svid: nil, err: errors.New("error 1")}, + mockResponse{svid: nil, err: errors.New("error 2")}, + mockResponse{svid: validSVID, err: nil}, + ) + + start := time.Now() + svid, err := GetX509SVIDWithRetry(mockSrc, testMaxRetries, testInitialBackoff, testMaxBackoff, testLogger()) + duration := time.Since(start) + + require.NoError(t, err) + assert.NotNil(t, svid) + // Verify that some backoff was applied (at least 10ms + 20ms = 30ms) + // But allow some tolerance for test execution time + assert.GreaterOrEqual(t, duration, 20*time.Millisecond, "should have applied exponential backoff") + assert.Equal(t, 3, mockSrc.callCount, "should retry 3 times") + }) +} diff --git a/utils/zot/config.go b/utils/zot/config.go index 0e4869dfc..7ef5a60e5 100644 --- a/utils/zot/config.go +++ b/utils/zot/config.go @@ -1,248 +1,248 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package zot - -import ( - "encoding/json" - "errors" - "fmt" - "net/url" - "os" - "strings" - - zotconfig "zotregistry.dev/zot/v2/pkg/api/config" - zotextensionsconfig "zotregistry.dev/zot/v2/pkg/extensions/config" - zotsyncconfig "zotregistry.dev/zot/v2/pkg/extensions/config/sync" -) - -// readConfigFile reads and parses the zot configuration file. -func readConfigFile(filePath string) (*zotconfig.Config, error) { - config, err := os.ReadFile(filePath) - if err != nil { - return nil, fmt.Errorf("failed to read zot config file %s: %w", filePath, err) - } - - logger.Debug("Read zot config file", "file", string(config)) - - var zotConfig zotconfig.Config - if err := json.Unmarshal(config, &zotConfig); err != nil { - return nil, fmt.Errorf("failed to unmarshal zot config: %w", err) - } - - return &zotConfig, nil -} - -// writeConfigFile marshals and writes the zot configuration file. -func writeConfigFile(filePath string, zotConfig *zotconfig.Config) error { - updatedConfig, err := json.MarshalIndent(zotConfig, "", " ") - if err != nil { - return fmt.Errorf("failed to marshal updated zot config: %w", err) - } - - if err := os.WriteFile(filePath, updatedConfig, 0o644); err != nil { //nolint:gosec,mnd - return fmt.Errorf("failed to write updated zot config: %w", err) - } - - return nil -} - -// addRegistryToSyncConfig adds a registry to the zot sync configuration. -func AddRegistryToSyncConfig(filePath string, remoteRegistryURL string, remoteRepositoryName string, credentials zotsyncconfig.Credentials, cids []string) error { - logger.Debug("Adding registry to zot sync", "remote_url", remoteRegistryURL) - - // Validate input - if remoteRegistryURL == "" { - return errors.New("remote registry URL cannot be empty") - } - - // Read current zot config - zotConfig, err := readConfigFile(filePath) - if err != nil { - return err - } - - // Initialize extensions if nil - if zotConfig.Extensions == nil { - zotConfig.Extensions = &zotextensionsconfig.ExtensionConfig{} - } - - // Initialize sync config if nil - syncConfig := zotConfig.Extensions.Sync - if syncConfig == nil { - syncConfig = &zotsyncconfig.Config{} - zotConfig.Extensions.Sync = syncConfig - } - - syncConfig.Enable = toPtr(true) - - // Create credentials file if credentials are provided - credentialsFile := DefaultCredentialsPath - if credentials.Username != "" && credentials.Password != "" { - if err := updateCredentialsFile(credentialsFile, remoteRegistryURL, zotsyncconfig.Credentials{ - Username: credentials.Username, - Password: credentials.Password, - }); err != nil { - return fmt.Errorf("failed to create credentials file: %w", err) - } - - // Set credentials file path in sync config - syncConfig.CredentialsFile = credentialsFile - } else { - logger.Info("No credentials provided, using default credentials file", "remote_url", remoteRegistryURL) - } - - // Create registry configuration with credentials if provided - // Add http:// scheme if not present for zot sync - registryURL, err := normalizeRegistryURL(remoteRegistryURL) - if err != nil { - return fmt.Errorf("failed to normalize registry URL: %w", err) - } - - // Check if registry already exists - for _, existingRegistry := range syncConfig.Registries { - for _, existingURL := range existingRegistry.URLs { - if existingURL == registryURL { - logger.Debug("Registry already exists in zot config", "registry_url", registryURL) - - return nil - } - } - } - - var syncContent []zotsyncconfig.Content - - if len(cids) > 0 { - // Create a regex to match the CIDs - cidsRegex := strings.Join(cids, "|") - regex := fmt.Sprintf("^(%s)$", cidsRegex) - - // Add the regex to the sync content - syncContent = []zotsyncconfig.Content{ - { - Prefix: remoteRepositoryName, - Tags: &zotsyncconfig.Tags{ - Regex: ®ex, - }, - }, - } - } else { - syncContent = []zotsyncconfig.Content{ - { - Prefix: remoteRepositoryName, - }, - } - } - - registry := zotsyncconfig.RegistryConfig{ - URLs: []string{registryURL}, - OnDemand: false, // Disable OnDemand for proactive sync - PollInterval: DefaultPollInterval, - MaxRetries: toPtr(DefaultMaxRetries), - RetryDelay: toPtr(DefaultRetryDelay), - TLSVerify: toPtr(false), - Content: syncContent, - } - syncConfig.Registries = append(syncConfig.Registries, registry) - - logger.Debug("Registry added to zot sync", "remote_url", remoteRegistryURL, "registry_url", registryURL) - - // Write the updated config back to the file - if err := writeConfigFile(filePath, zotConfig); err != nil { - return err - } - - logger.Info("Successfully added registry to zot sync", "remote_url", remoteRegistryURL) - - return nil -} - -// removeRegistryFromSyncConfig removes a registry from the zot sync configuration. -func RemoveRegistryFromSyncConfig(filePath string, remoteRegistryURL string) error { - logger.Debug("Removing registry from zot sync", "remote_registry_url", remoteRegistryURL) - - // Validate input - if remoteRegistryURL == "" { - return errors.New("remote directory URL cannot be empty") - } - - // Read current zot config - zotConfig, err := readConfigFile(filePath) - if err != nil { - return err - } - - // Check if sync config exists - if zotConfig.Extensions == nil || zotConfig.Extensions.Sync == nil { - logger.Debug("No sync configuration found") - - return nil - } - - syncConfig := zotConfig.Extensions.Sync - - // Normalize the URL to match what would be stored - registryURL, err := normalizeRegistryURL(remoteRegistryURL) - if err != nil { - return fmt.Errorf("failed to normalize registry URL: %w", err) - } - - // Find and remove the registry - var filteredRegistries []zotsyncconfig.RegistryConfig - - for _, registry := range syncConfig.Registries { - found := false - - for _, url := range registry.URLs { - if url == registryURL { - found = true - - break - } - } - - if !found { - filteredRegistries = append(filteredRegistries, registry) - } - } - - if len(filteredRegistries) == len(syncConfig.Registries) { - logger.Debug("Registry not found in zot config", "registry_url", registryURL) - - return nil - } - - syncConfig.Registries = filteredRegistries - - // Write the updated config back to the file - if err := writeConfigFile(filePath, zotConfig); err != nil { - return err - } - - logger.Info("Successfully removed registry from zot sync") - - return nil -} - -// normalizeRegistryURL ensures the registry URL has the proper scheme for zot sync. -func normalizeRegistryURL(rawURL string) (string, error) { - if rawURL == "" { - return "", errors.New("registry URL cannot be empty") - } - - // Add http:// scheme if not present for zot sync - if !strings.HasPrefix(rawURL, "http://") && !strings.HasPrefix(rawURL, "https://") { - return "http://" + rawURL, nil - } - - // Validate the URL format - if _, err := url.Parse(rawURL); err != nil { - return "", fmt.Errorf("invalid URL format: %w", err) - } - - return rawURL, nil -} - -func toPtr[T any](v T) *T { - return &v -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package zot + +import ( + "encoding/json" + "errors" + "fmt" + "net/url" + "os" + "strings" + + zotconfig "zotregistry.dev/zot/v2/pkg/api/config" + zotextensionsconfig "zotregistry.dev/zot/v2/pkg/extensions/config" + zotsyncconfig "zotregistry.dev/zot/v2/pkg/extensions/config/sync" +) + +// readConfigFile reads and parses the zot configuration file. +func readConfigFile(filePath string) (*zotconfig.Config, error) { + config, err := os.ReadFile(filePath) + if err != nil { + return nil, fmt.Errorf("failed to read zot config file %s: %w", filePath, err) + } + + logger.Debug("Read zot config file", "file", string(config)) + + var zotConfig zotconfig.Config + if err := json.Unmarshal(config, &zotConfig); err != nil { + return nil, fmt.Errorf("failed to unmarshal zot config: %w", err) + } + + return &zotConfig, nil +} + +// writeConfigFile marshals and writes the zot configuration file. +func writeConfigFile(filePath string, zotConfig *zotconfig.Config) error { + updatedConfig, err := json.MarshalIndent(zotConfig, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal updated zot config: %w", err) + } + + if err := os.WriteFile(filePath, updatedConfig, 0o644); err != nil { //nolint:gosec,mnd + return fmt.Errorf("failed to write updated zot config: %w", err) + } + + return nil +} + +// addRegistryToSyncConfig adds a registry to the zot sync configuration. +func AddRegistryToSyncConfig(filePath string, remoteRegistryURL string, remoteRepositoryName string, credentials zotsyncconfig.Credentials, cids []string) error { + logger.Debug("Adding registry to zot sync", "remote_url", remoteRegistryURL) + + // Validate input + if remoteRegistryURL == "" { + return errors.New("remote registry URL cannot be empty") + } + + // Read current zot config + zotConfig, err := readConfigFile(filePath) + if err != nil { + return err + } + + // Initialize extensions if nil + if zotConfig.Extensions == nil { + zotConfig.Extensions = &zotextensionsconfig.ExtensionConfig{} + } + + // Initialize sync config if nil + syncConfig := zotConfig.Extensions.Sync + if syncConfig == nil { + syncConfig = &zotsyncconfig.Config{} + zotConfig.Extensions.Sync = syncConfig + } + + syncConfig.Enable = toPtr(true) + + // Create credentials file if credentials are provided + credentialsFile := DefaultCredentialsPath + if credentials.Username != "" && credentials.Password != "" { + if err := updateCredentialsFile(credentialsFile, remoteRegistryURL, zotsyncconfig.Credentials{ + Username: credentials.Username, + Password: credentials.Password, + }); err != nil { + return fmt.Errorf("failed to create credentials file: %w", err) + } + + // Set credentials file path in sync config + syncConfig.CredentialsFile = credentialsFile + } else { + logger.Info("No credentials provided, using default credentials file", "remote_url", remoteRegistryURL) + } + + // Create registry configuration with credentials if provided + // Add http:// scheme if not present for zot sync + registryURL, err := normalizeRegistryURL(remoteRegistryURL) + if err != nil { + return fmt.Errorf("failed to normalize registry URL: %w", err) + } + + // Check if registry already exists + for _, existingRegistry := range syncConfig.Registries { + for _, existingURL := range existingRegistry.URLs { + if existingURL == registryURL { + logger.Debug("Registry already exists in zot config", "registry_url", registryURL) + + return nil + } + } + } + + var syncContent []zotsyncconfig.Content + + if len(cids) > 0 { + // Create a regex to match the CIDs + cidsRegex := strings.Join(cids, "|") + regex := fmt.Sprintf("^(%s)$", cidsRegex) + + // Add the regex to the sync content + syncContent = []zotsyncconfig.Content{ + { + Prefix: remoteRepositoryName, + Tags: &zotsyncconfig.Tags{ + Regex: ®ex, + }, + }, + } + } else { + syncContent = []zotsyncconfig.Content{ + { + Prefix: remoteRepositoryName, + }, + } + } + + registry := zotsyncconfig.RegistryConfig{ + URLs: []string{registryURL}, + OnDemand: false, // Disable OnDemand for proactive sync + PollInterval: DefaultPollInterval, + MaxRetries: toPtr(DefaultMaxRetries), + RetryDelay: toPtr(DefaultRetryDelay), + TLSVerify: toPtr(false), + Content: syncContent, + } + syncConfig.Registries = append(syncConfig.Registries, registry) + + logger.Debug("Registry added to zot sync", "remote_url", remoteRegistryURL, "registry_url", registryURL) + + // Write the updated config back to the file + if err := writeConfigFile(filePath, zotConfig); err != nil { + return err + } + + logger.Info("Successfully added registry to zot sync", "remote_url", remoteRegistryURL) + + return nil +} + +// removeRegistryFromSyncConfig removes a registry from the zot sync configuration. +func RemoveRegistryFromSyncConfig(filePath string, remoteRegistryURL string) error { + logger.Debug("Removing registry from zot sync", "remote_registry_url", remoteRegistryURL) + + // Validate input + if remoteRegistryURL == "" { + return errors.New("remote directory URL cannot be empty") + } + + // Read current zot config + zotConfig, err := readConfigFile(filePath) + if err != nil { + return err + } + + // Check if sync config exists + if zotConfig.Extensions == nil || zotConfig.Extensions.Sync == nil { + logger.Debug("No sync configuration found") + + return nil + } + + syncConfig := zotConfig.Extensions.Sync + + // Normalize the URL to match what would be stored + registryURL, err := normalizeRegistryURL(remoteRegistryURL) + if err != nil { + return fmt.Errorf("failed to normalize registry URL: %w", err) + } + + // Find and remove the registry + var filteredRegistries []zotsyncconfig.RegistryConfig + + for _, registry := range syncConfig.Registries { + found := false + + for _, url := range registry.URLs { + if url == registryURL { + found = true + + break + } + } + + if !found { + filteredRegistries = append(filteredRegistries, registry) + } + } + + if len(filteredRegistries) == len(syncConfig.Registries) { + logger.Debug("Registry not found in zot config", "registry_url", registryURL) + + return nil + } + + syncConfig.Registries = filteredRegistries + + // Write the updated config back to the file + if err := writeConfigFile(filePath, zotConfig); err != nil { + return err + } + + logger.Info("Successfully removed registry from zot sync") + + return nil +} + +// normalizeRegistryURL ensures the registry URL has the proper scheme for zot sync. +func normalizeRegistryURL(rawURL string) (string, error) { + if rawURL == "" { + return "", errors.New("registry URL cannot be empty") + } + + // Add http:// scheme if not present for zot sync + if !strings.HasPrefix(rawURL, "http://") && !strings.HasPrefix(rawURL, "https://") { + return "http://" + rawURL, nil + } + + // Validate the URL format + if _, err := url.Parse(rawURL); err != nil { + return "", fmt.Errorf("invalid URL format: %w", err) + } + + return rawURL, nil +} + +func toPtr[T any](v T) *T { + return &v +} diff --git a/utils/zot/config_test.go b/utils/zot/config_test.go index 3159d1fed..baa923a7c 100644 --- a/utils/zot/config_test.go +++ b/utils/zot/config_test.go @@ -1,694 +1,694 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// nolint -package zot - -import ( - "encoding/json" - "os" - "strings" - "testing" - - zotconfig "zotregistry.dev/zot/v2/pkg/api/config" - zotsyncconfig "zotregistry.dev/zot/v2/pkg/extensions/config/sync" -) - -func TestReadConfigFile(t *testing.T) { - tests := []struct { - name string - configData string - wantErr bool - errContains string - }{ - { - name: "valid config file", - configData: `{ - "http": { - "address": "0.0.0.0", - "port": "5000" - }, - "storage": { - "rootDirectory": "/var/lib/registry" - } - }`, - wantErr: false, - }, - { - name: "invalid JSON", - configData: `{"invalid": json}`, - wantErr: true, - errContains: "failed to unmarshal zot config", - }, - { - name: "empty file", - configData: "", - wantErr: true, - errContains: "failed to unmarshal zot config", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create temporary file - tmpFile, err := os.CreateTemp(t.TempDir(), "zot-config-*.json") - if err != nil { - t.Fatalf("Failed to create temp file: %v", err) - } - defer os.Remove(tmpFile.Name()) - - // Write test data - if _, err := tmpFile.WriteString(tt.configData); err != nil { - t.Fatalf("Failed to write test data: %v", err) - } - - tmpFile.Close() - - // Test the function - config, err := readConfigFile(tmpFile.Name()) - - if tt.wantErr { - if err == nil { - t.Errorf("Expected error but got none") - } - - if tt.errContains != "" && !strings.Contains(err.Error(), tt.errContains) { - t.Errorf("Expected error to contain %q, got %q", tt.errContains, err.Error()) - } - } else { - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if config == nil { - t.Errorf("Expected config to be non-nil") - } - } - }) - } - - t.Run("file not found", func(t *testing.T) { - _, err := readConfigFile("/non/existent/file.json") - if err == nil { - t.Errorf("Expected error for non-existent file") - } - - if !strings.Contains(err.Error(), "failed to read zot config file") { - t.Errorf("Expected error to contain 'failed to read zot config file', got %q", err.Error()) - } - }) -} - -func TestWriteConfigFile(t *testing.T) { - t.Run("successful write", func(t *testing.T) { - // Create temporary file - tmpFile, err := os.CreateTemp(t.TempDir(), "zot-config-*.json") - if err != nil { - t.Fatalf("Failed to create temp file: %v", err) - } - - defer os.Remove(tmpFile.Name()) - tmpFile.Close() - - // Create test config - config := &zotconfig.Config{ - HTTP: zotconfig.HTTPConfig{ - Address: "0.0.0.0", - Port: "5000", - }, - } - - // Test the function - err = writeConfigFile(tmpFile.Name(), config) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - // Verify file contents - data, err := os.ReadFile(tmpFile.Name()) - if err != nil { - t.Fatalf("Failed to read written file: %v", err) - } - - var writtenConfig zotconfig.Config - if err := json.Unmarshal(data, &writtenConfig); err != nil { - t.Errorf("Failed to unmarshal written config: %v", err) - } - - if writtenConfig.HTTP.Address != "0.0.0.0" || writtenConfig.HTTP.Port != "5000" { - t.Errorf("Config not written correctly") - } - }) - - t.Run("write to invalid path", func(t *testing.T) { - config := &zotconfig.Config{} - - err := writeConfigFile("/invalid/path/config.json", config) - if err == nil { - t.Errorf("Expected error for invalid path") - } - - if !strings.Contains(err.Error(), "failed to write updated zot config") { - t.Errorf("Expected error to contain 'failed to write updated zot config', got %q", err.Error()) - } - }) -} - -func TestNormalizeRegistryURL(t *testing.T) { - tests := []struct { - name string - input string - expected string - wantErr bool - }{ - { - name: "URL without scheme", - input: "registry.example.com", - expected: "http://registry.example.com", - wantErr: false, - }, - { - name: "URL with http scheme", - input: "http://registry.example.com", - expected: "http://registry.example.com", - wantErr: false, - }, - { - name: "URL with https scheme", - input: "https://registry.example.com", - expected: "https://registry.example.com", - wantErr: false, - }, - { - name: "URL with port", - input: "registry.example.com:5000", - expected: "http://registry.example.com:5000", - wantErr: false, - }, - { - name: "empty URL", - input: "", - wantErr: true, - }, - { - name: "URL with spaces (still valid after normalization)", - input: "registry with spaces.com", - expected: "http://registry with spaces.com", - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result, err := normalizeRegistryURL(tt.input) - - if tt.wantErr { - if err == nil { - t.Errorf("Expected error but got none") - } - } else { - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if result != tt.expected { - t.Errorf("Expected %q, got %q", tt.expected, result) - } - } - }) - } -} - -func TestToPtr(t *testing.T) { - t.Run("string pointer", func(t *testing.T) { - str := "test" - - ptr := toPtr(str) - if ptr == nil { - t.Errorf("Expected non-nil pointer") - - return - } - - if *ptr != str { - t.Errorf("Expected %q, got %q", str, *ptr) - } - }) - - t.Run("int pointer", func(t *testing.T) { - num := 42 - - ptr := toPtr(num) - if ptr == nil { - t.Errorf("Expected non-nil pointer") - - return - } - - if *ptr != num { - t.Errorf("Expected %d, got %d", num, *ptr) - } - }) - - t.Run("bool pointer", func(t *testing.T) { - val := true - - ptr := toPtr(val) - if ptr == nil { - t.Errorf("Expected non-nil pointer") - - return - } - - if *ptr != val { - t.Errorf("Expected %t, got %t", val, *ptr) - } - }) -} - -func TestAddRegistryToZotSync(t *testing.T) { - // Helper function to create a basic config file - createBasicConfig := func() string { - tmpFile, err := os.CreateTemp(t.TempDir(), "zot-config-*.json") - if err != nil { - t.Fatalf("Failed to create temp file: %v", err) - } - defer tmpFile.Close() - - basicConfig := `{ - "http": { - "address": "0.0.0.0", - "port": "5000" - }, - "storage": { - "rootDirectory": "/var/lib/registry" - } - }` - - if _, err := tmpFile.WriteString(basicConfig); err != nil { - t.Fatalf("Failed to write basic config: %v", err) - } - - return tmpFile.Name() - } - - // Helper function to create config with existing sync - createConfigWithSync := func() string { - tmpFile, err := os.CreateTemp(t.TempDir(), "zot-config-*.json") - if err != nil { - t.Fatalf("Failed to create temp file: %v", err) - } - defer tmpFile.Close() - - configWithSync := `{ - "http": { - "address": "0.0.0.0", - "port": "5000" - }, - "storage": { - "rootDirectory": "/var/lib/registry" - }, - "extensions": { - "sync": { - "enable": true, - "registries": [ - { - "urls": ["http://existing.registry.com"], - "onDemand": false, - "pollInterval": 60000000000, - "maxRetries": 3, - "retryDelay": 300000000000, - "tlsVerify": false, - "content": [ - { - "prefix": "existing/repo" - } - ] - } - ] - } - } - }` - - if _, err := tmpFile.WriteString(configWithSync); err != nil { - t.Fatalf("Failed to write config with sync: %v", err) - } - - return tmpFile.Name() - } - - t.Run("add registry to empty config", func(t *testing.T) { - configPath := createBasicConfig() - defer os.Remove(configPath) - - err := AddRegistryToSyncConfig( - configPath, - "registry.example.com", - "test/repo", - zotsyncconfig.Credentials{}, - nil, - ) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - // Verify the config was updated - config, err := readConfigFile(configPath) - if err != nil { - t.Fatalf("Failed to read updated config: %v", err) - } - - if config.Extensions == nil || config.Extensions.Sync == nil { - t.Errorf("Sync extension not initialized") - } - - if !*config.Extensions.Sync.Enable { - t.Errorf("Sync not enabled") - } - - if len(config.Extensions.Sync.Registries) != 1 { - t.Errorf("Expected 1 registry, got %d", len(config.Extensions.Sync.Registries)) - } - - registry := config.Extensions.Sync.Registries[0] - if len(registry.URLs) != 1 || registry.URLs[0] != "http://registry.example.com" { - t.Errorf("Registry URL not set correctly: %v", registry.URLs) - } - - if len(registry.Content) != 1 || registry.Content[0].Prefix != "test/repo" { - t.Errorf("Registry content not set correctly: %v", registry.Content) - } - }) - - t.Run("add registry with credentials", func(t *testing.T) { - configPath := createBasicConfig() - defer os.Remove(configPath) - - // This test will fail because the credentials directory doesn't exist - // but we can verify the error is handled properly - err := AddRegistryToSyncConfig( - configPath, - "registry.example.com", - "test/repo", - zotsyncconfig.Credentials{ - Username: "testuser", - Password: "testpass", - }, - nil, - ) - - // Expect an error because /etc/zot directory doesn't exist - if err == nil { - t.Errorf("Expected error when credentials directory doesn't exist") - } else if !strings.Contains(err.Error(), "failed to create credentials file") { - t.Errorf("Expected credentials file error, got: %v", err) - } - }) - - t.Run("add registry with CIDs", func(t *testing.T) { - configPath := createBasicConfig() - defer os.Remove(configPath) - - cids := []string{"cid1", "cid2", "cid3"} - - err := AddRegistryToSyncConfig( - configPath, - "registry.example.com", - "test/repo", - zotsyncconfig.Credentials{}, - cids, - ) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - // Verify the config was updated with regex - config, err := readConfigFile(configPath) - if err != nil { - t.Fatalf("Failed to read updated config: %v", err) - } - - registry := config.Extensions.Sync.Registries[0] - if len(registry.Content) != 1 { - t.Errorf("Expected 1 content item, got %d", len(registry.Content)) - } - - content := registry.Content[0] - if content.Tags == nil || content.Tags.Regex == nil { - t.Errorf("Tags regex not set") - } - - expectedRegex := "^(cid1|cid2|cid3)$" - if *content.Tags.Regex != expectedRegex { - t.Errorf("Expected regex %q, got %q", expectedRegex, *content.Tags.Regex) - } - }) - - t.Run("add duplicate registry", func(t *testing.T) { - configPath := createConfigWithSync() - defer os.Remove(configPath) - - err := AddRegistryToSyncConfig( - configPath, - "existing.registry.com", - "new/repo", - zotsyncconfig.Credentials{}, - nil, - ) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - // Verify no duplicate was added - config, err := readConfigFile(configPath) - if err != nil { - t.Fatalf("Failed to read updated config: %v", err) - } - - if len(config.Extensions.Sync.Registries) != 1 { - t.Errorf("Expected 1 registry (no duplicate), got %d", len(config.Extensions.Sync.Registries)) - } - }) - - t.Run("empty registry URL", func(t *testing.T) { - configPath := createBasicConfig() - defer os.Remove(configPath) - - err := AddRegistryToSyncConfig( - configPath, - "", - "test/repo", - zotsyncconfig.Credentials{}, - nil, - ) - - if err == nil { - t.Errorf("Expected error for empty registry URL") - } - - if !strings.Contains(err.Error(), "remote registry URL cannot be empty") { - t.Errorf("Expected error about empty URL, got %q", err.Error()) - } - }) - - t.Run("invalid config file", func(t *testing.T) { - tmpFile, err := os.CreateTemp(t.TempDir(), "invalid-config-*.json") - if err != nil { - t.Fatalf("Failed to create temp file: %v", err) - } - defer os.Remove(tmpFile.Name()) - - if _, err := tmpFile.WriteString("invalid json"); err != nil { - t.Fatalf("Failed to write invalid JSON: %v", err) - } - - tmpFile.Close() - - err = AddRegistryToSyncConfig( - tmpFile.Name(), - "registry.example.com", - "test/repo", - zotsyncconfig.Credentials{}, - nil, - ) - - if err == nil { - t.Errorf("Expected error for invalid config file") - } - }) -} - -func TestRemoveRegistryFromZotSync(t *testing.T) { - // Helper function to create config with sync registries - createConfigWithRegistries := func() string { - tmpFile, err := os.CreateTemp(t.TempDir(), "zot-config-*.json") - if err != nil { - t.Fatalf("Failed to create temp file: %v", err) - } - defer tmpFile.Close() - - configWithRegistries := `{ - "http": { - "address": "0.0.0.0", - "port": "5000" - }, - "storage": { - "rootDirectory": "/var/lib/registry" - }, - "extensions": { - "sync": { - "enable": true, - "registries": [ - { - "urls": ["http://registry1.example.com"], - "onDemand": false, - "content": [{"prefix": "repo1"}] - }, - { - "urls": ["http://registry2.example.com"], - "onDemand": false, - "content": [{"prefix": "repo2"}] - } - ] - } - } - }` - - if _, err := tmpFile.WriteString(configWithRegistries); err != nil { - t.Fatalf("Failed to write config: %v", err) - } - - return tmpFile.Name() - } - - // Helper function to create basic config without sync - createBasicConfig := func() string { - tmpFile, err := os.CreateTemp(t.TempDir(), "zot-config-*.json") - if err != nil { - t.Fatalf("Failed to create temp file: %v", err) - } - defer tmpFile.Close() - - basicConfig := `{ - "http": { - "address": "0.0.0.0", - "port": "5000" - }, - "storage": { - "rootDirectory": "/var/lib/registry" - } - }` - - if _, err := tmpFile.WriteString(basicConfig); err != nil { - t.Fatalf("Failed to write basic config: %v", err) - } - - return tmpFile.Name() - } - - t.Run("remove existing registry", func(t *testing.T) { - configPath := createConfigWithRegistries() - defer os.Remove(configPath) - - err := RemoveRegistryFromSyncConfig(configPath, "registry1.example.com") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - // Verify the registry was removed - config, err := readConfigFile(configPath) - if err != nil { - t.Fatalf("Failed to read updated config: %v", err) - } - - if len(config.Extensions.Sync.Registries) != 1 { - t.Errorf("Expected 1 registry after removal, got %d", len(config.Extensions.Sync.Registries)) - } - - // Verify the correct registry remains - remaining := config.Extensions.Sync.Registries[0] - if len(remaining.URLs) != 1 || remaining.URLs[0] != "http://registry2.example.com" { - t.Errorf("Wrong registry remained: %v", remaining.URLs) - } - }) - - t.Run("remove non-existent registry", func(t *testing.T) { - configPath := createConfigWithRegistries() - defer os.Remove(configPath) - - err := RemoveRegistryFromSyncConfig(configPath, "nonexistent.registry.com") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - // Verify no registries were removed - config, err := readConfigFile(configPath) - if err != nil { - t.Fatalf("Failed to read updated config: %v", err) - } - - if len(config.Extensions.Sync.Registries) != 2 { - t.Errorf("Expected 2 registries (no removal), got %d", len(config.Extensions.Sync.Registries)) - } - }) - - t.Run("remove from config without sync", func(t *testing.T) { - configPath := createBasicConfig() - defer os.Remove(configPath) - - err := RemoveRegistryFromSyncConfig(configPath, "registry.example.com") - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - // Verify config remains unchanged - config, err := readConfigFile(configPath) - if err != nil { - t.Fatalf("Failed to read config: %v", err) - } - - if config.Extensions != nil && config.Extensions.Sync != nil { - t.Errorf("Sync config should remain nil") - } - }) - - t.Run("empty registry URL", func(t *testing.T) { - configPath := createConfigWithRegistries() - defer os.Remove(configPath) - - err := RemoveRegistryFromSyncConfig(configPath, "") - if err == nil { - t.Errorf("Expected error for empty registry URL") - } - - if !strings.Contains(err.Error(), "remote directory URL cannot be empty") { - t.Errorf("Expected error about empty URL, got %q", err.Error()) - } - }) - - t.Run("invalid config file", func(t *testing.T) { - tmpFile, err := os.CreateTemp(t.TempDir(), "invalid-config-*.json") - if err != nil { - t.Fatalf("Failed to create temp file: %v", err) - } - defer os.Remove(tmpFile.Name()) - - if _, err := tmpFile.WriteString("invalid json"); err != nil { - t.Fatalf("Failed to write invalid JSON: %v", err) - } - - tmpFile.Close() - - err = RemoveRegistryFromSyncConfig(tmpFile.Name(), "registry.example.com") - if err == nil { - t.Errorf("Expected error for invalid config file") - } - }) - - t.Run("file not found", func(t *testing.T) { - err := RemoveRegistryFromSyncConfig("/non/existent/file.json", "registry.example.com") - if err == nil { - t.Errorf("Expected error for non-existent file") - } - }) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// nolint +package zot + +import ( + "encoding/json" + "os" + "strings" + "testing" + + zotconfig "zotregistry.dev/zot/v2/pkg/api/config" + zotsyncconfig "zotregistry.dev/zot/v2/pkg/extensions/config/sync" +) + +func TestReadConfigFile(t *testing.T) { + tests := []struct { + name string + configData string + wantErr bool + errContains string + }{ + { + name: "valid config file", + configData: `{ + "http": { + "address": "0.0.0.0", + "port": "5000" + }, + "storage": { + "rootDirectory": "/var/lib/registry" + } + }`, + wantErr: false, + }, + { + name: "invalid JSON", + configData: `{"invalid": json}`, + wantErr: true, + errContains: "failed to unmarshal zot config", + }, + { + name: "empty file", + configData: "", + wantErr: true, + errContains: "failed to unmarshal zot config", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create temporary file + tmpFile, err := os.CreateTemp(t.TempDir(), "zot-config-*.json") + if err != nil { + t.Fatalf("Failed to create temp file: %v", err) + } + defer os.Remove(tmpFile.Name()) + + // Write test data + if _, err := tmpFile.WriteString(tt.configData); err != nil { + t.Fatalf("Failed to write test data: %v", err) + } + + tmpFile.Close() + + // Test the function + config, err := readConfigFile(tmpFile.Name()) + + if tt.wantErr { + if err == nil { + t.Errorf("Expected error but got none") + } + + if tt.errContains != "" && !strings.Contains(err.Error(), tt.errContains) { + t.Errorf("Expected error to contain %q, got %q", tt.errContains, err.Error()) + } + } else { + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if config == nil { + t.Errorf("Expected config to be non-nil") + } + } + }) + } + + t.Run("file not found", func(t *testing.T) { + _, err := readConfigFile("/non/existent/file.json") + if err == nil { + t.Errorf("Expected error for non-existent file") + } + + if !strings.Contains(err.Error(), "failed to read zot config file") { + t.Errorf("Expected error to contain 'failed to read zot config file', got %q", err.Error()) + } + }) +} + +func TestWriteConfigFile(t *testing.T) { + t.Run("successful write", func(t *testing.T) { + // Create temporary file + tmpFile, err := os.CreateTemp(t.TempDir(), "zot-config-*.json") + if err != nil { + t.Fatalf("Failed to create temp file: %v", err) + } + + defer os.Remove(tmpFile.Name()) + tmpFile.Close() + + // Create test config + config := &zotconfig.Config{ + HTTP: zotconfig.HTTPConfig{ + Address: "0.0.0.0", + Port: "5000", + }, + } + + // Test the function + err = writeConfigFile(tmpFile.Name(), config) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // Verify file contents + data, err := os.ReadFile(tmpFile.Name()) + if err != nil { + t.Fatalf("Failed to read written file: %v", err) + } + + var writtenConfig zotconfig.Config + if err := json.Unmarshal(data, &writtenConfig); err != nil { + t.Errorf("Failed to unmarshal written config: %v", err) + } + + if writtenConfig.HTTP.Address != "0.0.0.0" || writtenConfig.HTTP.Port != "5000" { + t.Errorf("Config not written correctly") + } + }) + + t.Run("write to invalid path", func(t *testing.T) { + config := &zotconfig.Config{} + + err := writeConfigFile("/invalid/path/config.json", config) + if err == nil { + t.Errorf("Expected error for invalid path") + } + + if !strings.Contains(err.Error(), "failed to write updated zot config") { + t.Errorf("Expected error to contain 'failed to write updated zot config', got %q", err.Error()) + } + }) +} + +func TestNormalizeRegistryURL(t *testing.T) { + tests := []struct { + name string + input string + expected string + wantErr bool + }{ + { + name: "URL without scheme", + input: "registry.example.com", + expected: "http://registry.example.com", + wantErr: false, + }, + { + name: "URL with http scheme", + input: "http://registry.example.com", + expected: "http://registry.example.com", + wantErr: false, + }, + { + name: "URL with https scheme", + input: "https://registry.example.com", + expected: "https://registry.example.com", + wantErr: false, + }, + { + name: "URL with port", + input: "registry.example.com:5000", + expected: "http://registry.example.com:5000", + wantErr: false, + }, + { + name: "empty URL", + input: "", + wantErr: true, + }, + { + name: "URL with spaces (still valid after normalization)", + input: "registry with spaces.com", + expected: "http://registry with spaces.com", + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := normalizeRegistryURL(tt.input) + + if tt.wantErr { + if err == nil { + t.Errorf("Expected error but got none") + } + } else { + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if result != tt.expected { + t.Errorf("Expected %q, got %q", tt.expected, result) + } + } + }) + } +} + +func TestToPtr(t *testing.T) { + t.Run("string pointer", func(t *testing.T) { + str := "test" + + ptr := toPtr(str) + if ptr == nil { + t.Errorf("Expected non-nil pointer") + + return + } + + if *ptr != str { + t.Errorf("Expected %q, got %q", str, *ptr) + } + }) + + t.Run("int pointer", func(t *testing.T) { + num := 42 + + ptr := toPtr(num) + if ptr == nil { + t.Errorf("Expected non-nil pointer") + + return + } + + if *ptr != num { + t.Errorf("Expected %d, got %d", num, *ptr) + } + }) + + t.Run("bool pointer", func(t *testing.T) { + val := true + + ptr := toPtr(val) + if ptr == nil { + t.Errorf("Expected non-nil pointer") + + return + } + + if *ptr != val { + t.Errorf("Expected %t, got %t", val, *ptr) + } + }) +} + +func TestAddRegistryToZotSync(t *testing.T) { + // Helper function to create a basic config file + createBasicConfig := func() string { + tmpFile, err := os.CreateTemp(t.TempDir(), "zot-config-*.json") + if err != nil { + t.Fatalf("Failed to create temp file: %v", err) + } + defer tmpFile.Close() + + basicConfig := `{ + "http": { + "address": "0.0.0.0", + "port": "5000" + }, + "storage": { + "rootDirectory": "/var/lib/registry" + } + }` + + if _, err := tmpFile.WriteString(basicConfig); err != nil { + t.Fatalf("Failed to write basic config: %v", err) + } + + return tmpFile.Name() + } + + // Helper function to create config with existing sync + createConfigWithSync := func() string { + tmpFile, err := os.CreateTemp(t.TempDir(), "zot-config-*.json") + if err != nil { + t.Fatalf("Failed to create temp file: %v", err) + } + defer tmpFile.Close() + + configWithSync := `{ + "http": { + "address": "0.0.0.0", + "port": "5000" + }, + "storage": { + "rootDirectory": "/var/lib/registry" + }, + "extensions": { + "sync": { + "enable": true, + "registries": [ + { + "urls": ["http://existing.registry.com"], + "onDemand": false, + "pollInterval": 60000000000, + "maxRetries": 3, + "retryDelay": 300000000000, + "tlsVerify": false, + "content": [ + { + "prefix": "existing/repo" + } + ] + } + ] + } + } + }` + + if _, err := tmpFile.WriteString(configWithSync); err != nil { + t.Fatalf("Failed to write config with sync: %v", err) + } + + return tmpFile.Name() + } + + t.Run("add registry to empty config", func(t *testing.T) { + configPath := createBasicConfig() + defer os.Remove(configPath) + + err := AddRegistryToSyncConfig( + configPath, + "registry.example.com", + "test/repo", + zotsyncconfig.Credentials{}, + nil, + ) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // Verify the config was updated + config, err := readConfigFile(configPath) + if err != nil { + t.Fatalf("Failed to read updated config: %v", err) + } + + if config.Extensions == nil || config.Extensions.Sync == nil { + t.Errorf("Sync extension not initialized") + } + + if !*config.Extensions.Sync.Enable { + t.Errorf("Sync not enabled") + } + + if len(config.Extensions.Sync.Registries) != 1 { + t.Errorf("Expected 1 registry, got %d", len(config.Extensions.Sync.Registries)) + } + + registry := config.Extensions.Sync.Registries[0] + if len(registry.URLs) != 1 || registry.URLs[0] != "http://registry.example.com" { + t.Errorf("Registry URL not set correctly: %v", registry.URLs) + } + + if len(registry.Content) != 1 || registry.Content[0].Prefix != "test/repo" { + t.Errorf("Registry content not set correctly: %v", registry.Content) + } + }) + + t.Run("add registry with credentials", func(t *testing.T) { + configPath := createBasicConfig() + defer os.Remove(configPath) + + // This test will fail because the credentials directory doesn't exist + // but we can verify the error is handled properly + err := AddRegistryToSyncConfig( + configPath, + "registry.example.com", + "test/repo", + zotsyncconfig.Credentials{ + Username: "testuser", + Password: "testpass", + }, + nil, + ) + + // Expect an error because /etc/zot directory doesn't exist + if err == nil { + t.Errorf("Expected error when credentials directory doesn't exist") + } else if !strings.Contains(err.Error(), "failed to create credentials file") { + t.Errorf("Expected credentials file error, got: %v", err) + } + }) + + t.Run("add registry with CIDs", func(t *testing.T) { + configPath := createBasicConfig() + defer os.Remove(configPath) + + cids := []string{"cid1", "cid2", "cid3"} + + err := AddRegistryToSyncConfig( + configPath, + "registry.example.com", + "test/repo", + zotsyncconfig.Credentials{}, + cids, + ) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // Verify the config was updated with regex + config, err := readConfigFile(configPath) + if err != nil { + t.Fatalf("Failed to read updated config: %v", err) + } + + registry := config.Extensions.Sync.Registries[0] + if len(registry.Content) != 1 { + t.Errorf("Expected 1 content item, got %d", len(registry.Content)) + } + + content := registry.Content[0] + if content.Tags == nil || content.Tags.Regex == nil { + t.Errorf("Tags regex not set") + } + + expectedRegex := "^(cid1|cid2|cid3)$" + if *content.Tags.Regex != expectedRegex { + t.Errorf("Expected regex %q, got %q", expectedRegex, *content.Tags.Regex) + } + }) + + t.Run("add duplicate registry", func(t *testing.T) { + configPath := createConfigWithSync() + defer os.Remove(configPath) + + err := AddRegistryToSyncConfig( + configPath, + "existing.registry.com", + "new/repo", + zotsyncconfig.Credentials{}, + nil, + ) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // Verify no duplicate was added + config, err := readConfigFile(configPath) + if err != nil { + t.Fatalf("Failed to read updated config: %v", err) + } + + if len(config.Extensions.Sync.Registries) != 1 { + t.Errorf("Expected 1 registry (no duplicate), got %d", len(config.Extensions.Sync.Registries)) + } + }) + + t.Run("empty registry URL", func(t *testing.T) { + configPath := createBasicConfig() + defer os.Remove(configPath) + + err := AddRegistryToSyncConfig( + configPath, + "", + "test/repo", + zotsyncconfig.Credentials{}, + nil, + ) + + if err == nil { + t.Errorf("Expected error for empty registry URL") + } + + if !strings.Contains(err.Error(), "remote registry URL cannot be empty") { + t.Errorf("Expected error about empty URL, got %q", err.Error()) + } + }) + + t.Run("invalid config file", func(t *testing.T) { + tmpFile, err := os.CreateTemp(t.TempDir(), "invalid-config-*.json") + if err != nil { + t.Fatalf("Failed to create temp file: %v", err) + } + defer os.Remove(tmpFile.Name()) + + if _, err := tmpFile.WriteString("invalid json"); err != nil { + t.Fatalf("Failed to write invalid JSON: %v", err) + } + + tmpFile.Close() + + err = AddRegistryToSyncConfig( + tmpFile.Name(), + "registry.example.com", + "test/repo", + zotsyncconfig.Credentials{}, + nil, + ) + + if err == nil { + t.Errorf("Expected error for invalid config file") + } + }) +} + +func TestRemoveRegistryFromZotSync(t *testing.T) { + // Helper function to create config with sync registries + createConfigWithRegistries := func() string { + tmpFile, err := os.CreateTemp(t.TempDir(), "zot-config-*.json") + if err != nil { + t.Fatalf("Failed to create temp file: %v", err) + } + defer tmpFile.Close() + + configWithRegistries := `{ + "http": { + "address": "0.0.0.0", + "port": "5000" + }, + "storage": { + "rootDirectory": "/var/lib/registry" + }, + "extensions": { + "sync": { + "enable": true, + "registries": [ + { + "urls": ["http://registry1.example.com"], + "onDemand": false, + "content": [{"prefix": "repo1"}] + }, + { + "urls": ["http://registry2.example.com"], + "onDemand": false, + "content": [{"prefix": "repo2"}] + } + ] + } + } + }` + + if _, err := tmpFile.WriteString(configWithRegistries); err != nil { + t.Fatalf("Failed to write config: %v", err) + } + + return tmpFile.Name() + } + + // Helper function to create basic config without sync + createBasicConfig := func() string { + tmpFile, err := os.CreateTemp(t.TempDir(), "zot-config-*.json") + if err != nil { + t.Fatalf("Failed to create temp file: %v", err) + } + defer tmpFile.Close() + + basicConfig := `{ + "http": { + "address": "0.0.0.0", + "port": "5000" + }, + "storage": { + "rootDirectory": "/var/lib/registry" + } + }` + + if _, err := tmpFile.WriteString(basicConfig); err != nil { + t.Fatalf("Failed to write basic config: %v", err) + } + + return tmpFile.Name() + } + + t.Run("remove existing registry", func(t *testing.T) { + configPath := createConfigWithRegistries() + defer os.Remove(configPath) + + err := RemoveRegistryFromSyncConfig(configPath, "registry1.example.com") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // Verify the registry was removed + config, err := readConfigFile(configPath) + if err != nil { + t.Fatalf("Failed to read updated config: %v", err) + } + + if len(config.Extensions.Sync.Registries) != 1 { + t.Errorf("Expected 1 registry after removal, got %d", len(config.Extensions.Sync.Registries)) + } + + // Verify the correct registry remains + remaining := config.Extensions.Sync.Registries[0] + if len(remaining.URLs) != 1 || remaining.URLs[0] != "http://registry2.example.com" { + t.Errorf("Wrong registry remained: %v", remaining.URLs) + } + }) + + t.Run("remove non-existent registry", func(t *testing.T) { + configPath := createConfigWithRegistries() + defer os.Remove(configPath) + + err := RemoveRegistryFromSyncConfig(configPath, "nonexistent.registry.com") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // Verify no registries were removed + config, err := readConfigFile(configPath) + if err != nil { + t.Fatalf("Failed to read updated config: %v", err) + } + + if len(config.Extensions.Sync.Registries) != 2 { + t.Errorf("Expected 2 registries (no removal), got %d", len(config.Extensions.Sync.Registries)) + } + }) + + t.Run("remove from config without sync", func(t *testing.T) { + configPath := createBasicConfig() + defer os.Remove(configPath) + + err := RemoveRegistryFromSyncConfig(configPath, "registry.example.com") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // Verify config remains unchanged + config, err := readConfigFile(configPath) + if err != nil { + t.Fatalf("Failed to read config: %v", err) + } + + if config.Extensions != nil && config.Extensions.Sync != nil { + t.Errorf("Sync config should remain nil") + } + }) + + t.Run("empty registry URL", func(t *testing.T) { + configPath := createConfigWithRegistries() + defer os.Remove(configPath) + + err := RemoveRegistryFromSyncConfig(configPath, "") + if err == nil { + t.Errorf("Expected error for empty registry URL") + } + + if !strings.Contains(err.Error(), "remote directory URL cannot be empty") { + t.Errorf("Expected error about empty URL, got %q", err.Error()) + } + }) + + t.Run("invalid config file", func(t *testing.T) { + tmpFile, err := os.CreateTemp(t.TempDir(), "invalid-config-*.json") + if err != nil { + t.Fatalf("Failed to create temp file: %v", err) + } + defer os.Remove(tmpFile.Name()) + + if _, err := tmpFile.WriteString("invalid json"); err != nil { + t.Fatalf("Failed to write invalid JSON: %v", err) + } + + tmpFile.Close() + + err = RemoveRegistryFromSyncConfig(tmpFile.Name(), "registry.example.com") + if err == nil { + t.Errorf("Expected error for invalid config file") + } + }) + + t.Run("file not found", func(t *testing.T) { + err := RemoveRegistryFromSyncConfig("/non/existent/file.json", "registry.example.com") + if err == nil { + t.Errorf("Expected error for non-existent file") + } + }) +} diff --git a/utils/zot/credentials.go b/utils/zot/credentials.go index 81d10ec28..3551ba8bc 100644 --- a/utils/zot/credentials.go +++ b/utils/zot/credentials.go @@ -1,59 +1,59 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package zot - -import ( - "encoding/json" - "errors" - "fmt" - "os" - "strings" - - zotsyncconfig "zotregistry.dev/zot/v2/pkg/extensions/config/sync" -) - -const ( - // DefaultCredentialsPath is the default path to the zot credentials file. - DefaultCredentialsPath = "/etc/zot/credentials.json" //nolint:gosec -) - -// updateCredentialsFile updates a credentials file for zot sync. -func updateCredentialsFile(filePath string, remoteRegistryURL string, credentials zotsyncconfig.Credentials) error { - // Load existing credentials or create empty map - credentialsData := make(zotsyncconfig.CredentialsFile) - if credentialsFile, err := os.ReadFile(filePath); err == nil { - if err := json.Unmarshal(credentialsFile, &credentialsData); err != nil { - return fmt.Errorf("failed to unmarshal credentials file: %w", err) - } - } else if !errors.Is(err, os.ErrNotExist) { - return fmt.Errorf("failed to read credentials file: %w", err) - } else { - logger.Debug("Credentials file not found, creating new one", "path", filePath) - } - - // Normalize URL and create credentials key - normalizedURL, err := normalizeRegistryURL(remoteRegistryURL) - if err != nil { - return fmt.Errorf("failed to normalize registry URL: %w", err) - } - - credKey := strings.TrimPrefix(strings.TrimPrefix(normalizedURL, "https://"), "http://") - - // Update credentials - credentialsData[credKey] = credentials - - // Write credentials file - credentialsJSON, err := json.MarshalIndent(credentialsData, "", " ") - if err != nil { - return fmt.Errorf("failed to marshal credentials: %w", err) - } - - if err := os.WriteFile(filePath, credentialsJSON, 0o600); err != nil { //nolint:gosec,mnd - return fmt.Errorf("failed to write credentials file: %w", err) - } - - logger.Debug("Updated credentials file", "path", filePath, "registry", credKey) - - return nil -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package zot + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "strings" + + zotsyncconfig "zotregistry.dev/zot/v2/pkg/extensions/config/sync" +) + +const ( + // DefaultCredentialsPath is the default path to the zot credentials file. + DefaultCredentialsPath = "/etc/zot/credentials.json" //nolint:gosec +) + +// updateCredentialsFile updates a credentials file for zot sync. +func updateCredentialsFile(filePath string, remoteRegistryURL string, credentials zotsyncconfig.Credentials) error { + // Load existing credentials or create empty map + credentialsData := make(zotsyncconfig.CredentialsFile) + if credentialsFile, err := os.ReadFile(filePath); err == nil { + if err := json.Unmarshal(credentialsFile, &credentialsData); err != nil { + return fmt.Errorf("failed to unmarshal credentials file: %w", err) + } + } else if !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("failed to read credentials file: %w", err) + } else { + logger.Debug("Credentials file not found, creating new one", "path", filePath) + } + + // Normalize URL and create credentials key + normalizedURL, err := normalizeRegistryURL(remoteRegistryURL) + if err != nil { + return fmt.Errorf("failed to normalize registry URL: %w", err) + } + + credKey := strings.TrimPrefix(strings.TrimPrefix(normalizedURL, "https://"), "http://") + + // Update credentials + credentialsData[credKey] = credentials + + // Write credentials file + credentialsJSON, err := json.MarshalIndent(credentialsData, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal credentials: %w", err) + } + + if err := os.WriteFile(filePath, credentialsJSON, 0o600); err != nil { //nolint:gosec,mnd + return fmt.Errorf("failed to write credentials file: %w", err) + } + + logger.Debug("Updated credentials file", "path", filePath, "registry", credKey) + + return nil +} diff --git a/utils/zot/credentials_test.go b/utils/zot/credentials_test.go index dff633a88..3308f559b 100644 --- a/utils/zot/credentials_test.go +++ b/utils/zot/credentials_test.go @@ -1,364 +1,364 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -// nolint -package zot - -import ( - "encoding/json" - "os" - "path/filepath" - "strings" - "testing" - - zotsyncconfig "zotregistry.dev/zot/v2/pkg/extensions/config/sync" -) - -func TestUpdateCredentialsFile(t *testing.T) { - t.Run("create new credentials file", func(t *testing.T) { - // Create temporary directory - tmpDir, err := os.MkdirTemp("", "zot-creds-*") - if err != nil { - t.Fatalf("Failed to create temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - - credPath := filepath.Join(tmpDir, "credentials.json") - testURL := "registry.example.com" - testCreds := zotsyncconfig.Credentials{ - Username: "testuser", - Password: "testpass", - } - - // Test the function - err = updateCredentialsFile(credPath, testURL, testCreds) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - // Verify file was created with correct content - data, err := os.ReadFile(credPath) - if err != nil { - t.Fatalf("Failed to read credentials file: %v", err) - } - - var credData zotsyncconfig.CredentialsFile - if err := json.Unmarshal(data, &credData); err != nil { - t.Fatalf("Failed to unmarshal credentials: %v", err) - } - - // The key should be the normalized URL without protocol - expectedKey := "registry.example.com" - if _, exists := credData[expectedKey]; !exists { - t.Errorf("Expected key %q not found in credentials", expectedKey) - } - - if credData[expectedKey].Username != testCreds.Username { - t.Errorf("Expected username %q, got %q", testCreds.Username, credData[expectedKey].Username) - } - - if credData[expectedKey].Password != testCreds.Password { - t.Errorf("Expected password %q, got %q", testCreds.Password, credData[expectedKey].Password) - } - - // Check file permissions - info, err := os.Stat(credPath) - if err != nil { - t.Fatalf("Failed to stat credentials file: %v", err) - } - - expectedPerm := os.FileMode(0o600) - if info.Mode().Perm() != expectedPerm { - t.Errorf("Expected file permissions %v, got %v", expectedPerm, info.Mode().Perm()) - } - }) - - t.Run("update existing credentials file", func(t *testing.T) { - // Create temporary directory - tmpDir, err := os.MkdirTemp("", "zot-creds-*") - if err != nil { - t.Fatalf("Failed to create temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - - credPath := filepath.Join(tmpDir, "credentials.json") - - // Create existing credentials file - existingCreds := zotsyncconfig.CredentialsFile{ - "existing.registry.com": { - Username: "existinguser", - Password: "existingpass", - }, - } - - existingData, err := json.MarshalIndent(existingCreds, "", " ") - if err != nil { - t.Fatalf("Failed to marshal existing credentials: %v", err) - } - - if err := os.WriteFile(credPath, existingData, 0o600); err != nil { - t.Fatalf("Failed to write existing credentials: %v", err) - } - - // Add new credentials - testURL := "https://new.registry.com" - testCreds := zotsyncconfig.Credentials{ - Username: "newuser", - Password: "newpass", - } - - err = updateCredentialsFile(credPath, testURL, testCreds) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - // Verify both old and new credentials exist - data, err := os.ReadFile(credPath) - if err != nil { - t.Fatalf("Failed to read updated credentials file: %v", err) - } - - var credData zotsyncconfig.CredentialsFile - if err := json.Unmarshal(data, &credData); err != nil { - t.Fatalf("Failed to unmarshal updated credentials: %v", err) - } - - // Check existing credentials are preserved - if _, exists := credData["existing.registry.com"]; !exists { - t.Errorf("Existing credentials were lost") - } - - // Check new credentials were added (normalized URL without https://) - expectedNewKey := "new.registry.com" - if _, exists := credData[expectedNewKey]; !exists { - t.Errorf("New credentials were not added with key %q", expectedNewKey) - } - - if credData[expectedNewKey].Username != testCreds.Username { - t.Errorf("Expected new username %q, got %q", testCreds.Username, credData[expectedNewKey].Username) - } - }) - - t.Run("update existing registry credentials", func(t *testing.T) { - // Create temporary directory - tmpDir, err := os.MkdirTemp("", "zot-creds-*") - if err != nil { - t.Fatalf("Failed to create temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - - credPath := filepath.Join(tmpDir, "credentials.json") - - // Create existing credentials file - existingCreds := zotsyncconfig.CredentialsFile{ - "registry.example.com": { - Username: "olduser", - Password: "oldpass", - }, - } - - existingData, err := json.MarshalIndent(existingCreds, "", " ") - if err != nil { - t.Fatalf("Failed to marshal existing credentials: %v", err) - } - - if err := os.WriteFile(credPath, existingData, 0o600); err != nil { - t.Fatalf("Failed to write existing credentials: %v", err) - } - - // Update existing credentials - testURL := "registry.example.com" - testCreds := zotsyncconfig.Credentials{ - Username: "newuser", - Password: "newpass", - } - - err = updateCredentialsFile(credPath, testURL, testCreds) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - // Verify credentials were updated - data, err := os.ReadFile(credPath) - if err != nil { - t.Fatalf("Failed to read updated credentials file: %v", err) - } - - var credData zotsyncconfig.CredentialsFile - if err := json.Unmarshal(data, &credData); err != nil { - t.Fatalf("Failed to unmarshal updated credentials: %v", err) - } - - if len(credData) != 1 { - t.Errorf("Expected 1 credential entry, got %d", len(credData)) - } - - if credData["registry.example.com"].Username != testCreds.Username { - t.Errorf("Expected updated username %q, got %q", testCreds.Username, credData["registry.example.com"].Username) - } - - if credData["registry.example.com"].Password != testCreds.Password { - t.Errorf("Expected updated password %q, got %q", testCreds.Password, credData["registry.example.com"].Password) - } - }) - - t.Run("handle different URL formats", func(t *testing.T) { - // Create temporary directory - tmpDir, err := os.MkdirTemp("", "zot-creds-*") - if err != nil { - t.Fatalf("Failed to create temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - - testCases := []struct { - name string - inputURL string - expectedKey string - }{ - { - name: "URL without protocol", - inputURL: "registry.example.com", - expectedKey: "registry.example.com", - }, - { - name: "URL with http protocol", - inputURL: "http://registry.example.com", - expectedKey: "registry.example.com", - }, - { - name: "URL with https protocol", - inputURL: "https://registry.example.com", - expectedKey: "registry.example.com", - }, - { - name: "URL with port", - inputURL: "registry.example.com:5000", - expectedKey: "registry.example.com:5000", - }, - { - name: "URL with https and port", - inputURL: "https://registry.example.com:5000", - expectedKey: "registry.example.com:5000", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - credPath := filepath.Join(tmpDir, tc.name+"_credentials.json") - testCreds := zotsyncconfig.Credentials{ - Username: "testuser", - Password: "testpass", - } - - err := updateCredentialsFile(credPath, tc.inputURL, testCreds) - if err != nil { - t.Errorf("Unexpected error for %s: %v", tc.name, err) - } - - // Verify the key is normalized correctly - data, err := os.ReadFile(credPath) - if err != nil { - t.Fatalf("Failed to read credentials file for %s: %v", tc.name, err) - } - - var credData zotsyncconfig.CredentialsFile - if err := json.Unmarshal(data, &credData); err != nil { - t.Fatalf("Failed to unmarshal credentials for %s: %v", tc.name, err) - } - - if _, exists := credData[tc.expectedKey]; !exists { - t.Errorf("Expected key %q not found for %s, got keys: %v", tc.expectedKey, tc.name, getKeys(credData)) - } - }) - } - }) - - t.Run("invalid existing credentials file", func(t *testing.T) { - // Create temporary directory - tmpDir, err := os.MkdirTemp("", "zot-creds-*") - if err != nil { - t.Fatalf("Failed to create temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - - credPath := filepath.Join(tmpDir, "credentials.json") - - // Write invalid JSON - if err := os.WriteFile(credPath, []byte("invalid json"), 0o600); err != nil { - t.Fatalf("Failed to write invalid JSON: %v", err) - } - - testURL := "registry.example.com" - testCreds := zotsyncconfig.Credentials{ - Username: "testuser", - Password: "testpass", - } - - err = updateCredentialsFile(credPath, testURL, testCreds) - if err == nil { - t.Errorf("Expected error for invalid JSON file") - } - - if !strings.Contains(err.Error(), "failed to unmarshal credentials file") { - t.Errorf("Expected unmarshal error, got: %v", err) - } - }) - - t.Run("invalid registry URL", func(t *testing.T) { - // Create temporary directory - tmpDir, err := os.MkdirTemp("", "zot-creds-*") - if err != nil { - t.Fatalf("Failed to create temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - - credPath := filepath.Join(tmpDir, "credentials.json") - testCreds := zotsyncconfig.Credentials{ - Username: "testuser", - Password: "testpass", - } - - err = updateCredentialsFile(credPath, "", testCreds) - if err == nil { - t.Errorf("Expected error for empty registry URL") - } - - if !strings.Contains(err.Error(), "failed to normalize registry URL") { - t.Errorf("Expected URL normalization error, got: %v", err) - } - }) - - t.Run("write to invalid directory", func(t *testing.T) { - invalidPath := "/invalid/directory/credentials.json" - testCreds := zotsyncconfig.Credentials{ - Username: "testuser", - Password: "testpass", - } - - err := updateCredentialsFile(invalidPath, "registry.example.com", testCreds) - if err == nil { - t.Errorf("Expected error for invalid directory path") - } - - if !strings.Contains(err.Error(), "failed to write credentials file") { - t.Errorf("Expected write error, got: %v", err) - } - }) - - t.Run("file read permission error", func(t *testing.T) { - // This test is challenging to implement portably since it requires - // creating a file with specific permissions that cause read errors - // Skip this test for now as it's platform-specific - t.Skip("Skipping file permission test - platform specific") - }) -} - -// Helper function to get keys from CredentialsFile for debugging. -func getKeys(credData zotsyncconfig.CredentialsFile) []string { - keys := make([]string, 0, len(credData)) - for k := range credData { - keys = append(keys, k) - } - - return keys -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +// nolint +package zot + +import ( + "encoding/json" + "os" + "path/filepath" + "strings" + "testing" + + zotsyncconfig "zotregistry.dev/zot/v2/pkg/extensions/config/sync" +) + +func TestUpdateCredentialsFile(t *testing.T) { + t.Run("create new credentials file", func(t *testing.T) { + // Create temporary directory + tmpDir, err := os.MkdirTemp("", "zot-creds-*") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + credPath := filepath.Join(tmpDir, "credentials.json") + testURL := "registry.example.com" + testCreds := zotsyncconfig.Credentials{ + Username: "testuser", + Password: "testpass", + } + + // Test the function + err = updateCredentialsFile(credPath, testURL, testCreds) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // Verify file was created with correct content + data, err := os.ReadFile(credPath) + if err != nil { + t.Fatalf("Failed to read credentials file: %v", err) + } + + var credData zotsyncconfig.CredentialsFile + if err := json.Unmarshal(data, &credData); err != nil { + t.Fatalf("Failed to unmarshal credentials: %v", err) + } + + // The key should be the normalized URL without protocol + expectedKey := "registry.example.com" + if _, exists := credData[expectedKey]; !exists { + t.Errorf("Expected key %q not found in credentials", expectedKey) + } + + if credData[expectedKey].Username != testCreds.Username { + t.Errorf("Expected username %q, got %q", testCreds.Username, credData[expectedKey].Username) + } + + if credData[expectedKey].Password != testCreds.Password { + t.Errorf("Expected password %q, got %q", testCreds.Password, credData[expectedKey].Password) + } + + // Check file permissions + info, err := os.Stat(credPath) + if err != nil { + t.Fatalf("Failed to stat credentials file: %v", err) + } + + expectedPerm := os.FileMode(0o600) + if info.Mode().Perm() != expectedPerm { + t.Errorf("Expected file permissions %v, got %v", expectedPerm, info.Mode().Perm()) + } + }) + + t.Run("update existing credentials file", func(t *testing.T) { + // Create temporary directory + tmpDir, err := os.MkdirTemp("", "zot-creds-*") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + credPath := filepath.Join(tmpDir, "credentials.json") + + // Create existing credentials file + existingCreds := zotsyncconfig.CredentialsFile{ + "existing.registry.com": { + Username: "existinguser", + Password: "existingpass", + }, + } + + existingData, err := json.MarshalIndent(existingCreds, "", " ") + if err != nil { + t.Fatalf("Failed to marshal existing credentials: %v", err) + } + + if err := os.WriteFile(credPath, existingData, 0o600); err != nil { + t.Fatalf("Failed to write existing credentials: %v", err) + } + + // Add new credentials + testURL := "https://new.registry.com" + testCreds := zotsyncconfig.Credentials{ + Username: "newuser", + Password: "newpass", + } + + err = updateCredentialsFile(credPath, testURL, testCreds) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // Verify both old and new credentials exist + data, err := os.ReadFile(credPath) + if err != nil { + t.Fatalf("Failed to read updated credentials file: %v", err) + } + + var credData zotsyncconfig.CredentialsFile + if err := json.Unmarshal(data, &credData); err != nil { + t.Fatalf("Failed to unmarshal updated credentials: %v", err) + } + + // Check existing credentials are preserved + if _, exists := credData["existing.registry.com"]; !exists { + t.Errorf("Existing credentials were lost") + } + + // Check new credentials were added (normalized URL without https://) + expectedNewKey := "new.registry.com" + if _, exists := credData[expectedNewKey]; !exists { + t.Errorf("New credentials were not added with key %q", expectedNewKey) + } + + if credData[expectedNewKey].Username != testCreds.Username { + t.Errorf("Expected new username %q, got %q", testCreds.Username, credData[expectedNewKey].Username) + } + }) + + t.Run("update existing registry credentials", func(t *testing.T) { + // Create temporary directory + tmpDir, err := os.MkdirTemp("", "zot-creds-*") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + credPath := filepath.Join(tmpDir, "credentials.json") + + // Create existing credentials file + existingCreds := zotsyncconfig.CredentialsFile{ + "registry.example.com": { + Username: "olduser", + Password: "oldpass", + }, + } + + existingData, err := json.MarshalIndent(existingCreds, "", " ") + if err != nil { + t.Fatalf("Failed to marshal existing credentials: %v", err) + } + + if err := os.WriteFile(credPath, existingData, 0o600); err != nil { + t.Fatalf("Failed to write existing credentials: %v", err) + } + + // Update existing credentials + testURL := "registry.example.com" + testCreds := zotsyncconfig.Credentials{ + Username: "newuser", + Password: "newpass", + } + + err = updateCredentialsFile(credPath, testURL, testCreds) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + // Verify credentials were updated + data, err := os.ReadFile(credPath) + if err != nil { + t.Fatalf("Failed to read updated credentials file: %v", err) + } + + var credData zotsyncconfig.CredentialsFile + if err := json.Unmarshal(data, &credData); err != nil { + t.Fatalf("Failed to unmarshal updated credentials: %v", err) + } + + if len(credData) != 1 { + t.Errorf("Expected 1 credential entry, got %d", len(credData)) + } + + if credData["registry.example.com"].Username != testCreds.Username { + t.Errorf("Expected updated username %q, got %q", testCreds.Username, credData["registry.example.com"].Username) + } + + if credData["registry.example.com"].Password != testCreds.Password { + t.Errorf("Expected updated password %q, got %q", testCreds.Password, credData["registry.example.com"].Password) + } + }) + + t.Run("handle different URL formats", func(t *testing.T) { + // Create temporary directory + tmpDir, err := os.MkdirTemp("", "zot-creds-*") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + testCases := []struct { + name string + inputURL string + expectedKey string + }{ + { + name: "URL without protocol", + inputURL: "registry.example.com", + expectedKey: "registry.example.com", + }, + { + name: "URL with http protocol", + inputURL: "http://registry.example.com", + expectedKey: "registry.example.com", + }, + { + name: "URL with https protocol", + inputURL: "https://registry.example.com", + expectedKey: "registry.example.com", + }, + { + name: "URL with port", + inputURL: "registry.example.com:5000", + expectedKey: "registry.example.com:5000", + }, + { + name: "URL with https and port", + inputURL: "https://registry.example.com:5000", + expectedKey: "registry.example.com:5000", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + credPath := filepath.Join(tmpDir, tc.name+"_credentials.json") + testCreds := zotsyncconfig.Credentials{ + Username: "testuser", + Password: "testpass", + } + + err := updateCredentialsFile(credPath, tc.inputURL, testCreds) + if err != nil { + t.Errorf("Unexpected error for %s: %v", tc.name, err) + } + + // Verify the key is normalized correctly + data, err := os.ReadFile(credPath) + if err != nil { + t.Fatalf("Failed to read credentials file for %s: %v", tc.name, err) + } + + var credData zotsyncconfig.CredentialsFile + if err := json.Unmarshal(data, &credData); err != nil { + t.Fatalf("Failed to unmarshal credentials for %s: %v", tc.name, err) + } + + if _, exists := credData[tc.expectedKey]; !exists { + t.Errorf("Expected key %q not found for %s, got keys: %v", tc.expectedKey, tc.name, getKeys(credData)) + } + }) + } + }) + + t.Run("invalid existing credentials file", func(t *testing.T) { + // Create temporary directory + tmpDir, err := os.MkdirTemp("", "zot-creds-*") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + credPath := filepath.Join(tmpDir, "credentials.json") + + // Write invalid JSON + if err := os.WriteFile(credPath, []byte("invalid json"), 0o600); err != nil { + t.Fatalf("Failed to write invalid JSON: %v", err) + } + + testURL := "registry.example.com" + testCreds := zotsyncconfig.Credentials{ + Username: "testuser", + Password: "testpass", + } + + err = updateCredentialsFile(credPath, testURL, testCreds) + if err == nil { + t.Errorf("Expected error for invalid JSON file") + } + + if !strings.Contains(err.Error(), "failed to unmarshal credentials file") { + t.Errorf("Expected unmarshal error, got: %v", err) + } + }) + + t.Run("invalid registry URL", func(t *testing.T) { + // Create temporary directory + tmpDir, err := os.MkdirTemp("", "zot-creds-*") + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + credPath := filepath.Join(tmpDir, "credentials.json") + testCreds := zotsyncconfig.Credentials{ + Username: "testuser", + Password: "testpass", + } + + err = updateCredentialsFile(credPath, "", testCreds) + if err == nil { + t.Errorf("Expected error for empty registry URL") + } + + if !strings.Contains(err.Error(), "failed to normalize registry URL") { + t.Errorf("Expected URL normalization error, got: %v", err) + } + }) + + t.Run("write to invalid directory", func(t *testing.T) { + invalidPath := "/invalid/directory/credentials.json" + testCreds := zotsyncconfig.Credentials{ + Username: "testuser", + Password: "testpass", + } + + err := updateCredentialsFile(invalidPath, "registry.example.com", testCreds) + if err == nil { + t.Errorf("Expected error for invalid directory path") + } + + if !strings.Contains(err.Error(), "failed to write credentials file") { + t.Errorf("Expected write error, got: %v", err) + } + }) + + t.Run("file read permission error", func(t *testing.T) { + // This test is challenging to implement portably since it requires + // creating a file with specific permissions that cause read errors + // Skip this test for now as it's platform-specific + t.Skip("Skipping file permission test - platform specific") + }) +} + +// Helper function to get keys from CredentialsFile for debugging. +func getKeys(credData zotsyncconfig.CredentialsFile) []string { + keys := make([]string, 0, len(credData)) + for k := range credData { + keys = append(keys, k) + } + + return keys +} diff --git a/utils/zot/zot.go b/utils/zot/zot.go index fdd1b4543..fbf319315 100644 --- a/utils/zot/zot.go +++ b/utils/zot/zot.go @@ -1,274 +1,274 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -package zot - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "strings" - "time" - - "github.com/agntcy/dir/utils/logging" -) - -var logger = logging.Logger("utils/zot") - -const ( - // DefaultZotConfigPath is the default path to the zot configuration file. - DefaultZotConfigPath = "/etc/zot/config.json" - - // DefaultPollInterval is the default interval for polling new content. - DefaultPollInterval = time.Second * 60 - - // DefaultRetryDelay is the default delay between retries. - DefaultRetryDelay = time.Minute * 5 - - // DefaultMaxRetries is the default maximum number of retries. - DefaultMaxRetries = 3 -) - -// VerifyConfig contains configuration for zot verification. -type VerifyConfig struct { - RegistryAddress string - RepositoryName string - Username string - Password string - AccessToken string - Insecure bool -} - -// UploadPublicKeyOptions contains options for uploading public keys to zot. -type UploadPublicKeyOptions struct { - Config *VerifyConfig - PublicKey string -} - -// VerificationOptions contains options for zot verification. -type VerificationOptions struct { - Config *VerifyConfig - RecordCID string -} - -// VerificationResult contains the result of zot verification. -type VerificationResult struct { - IsSigned bool - IsTrusted bool - Author string - Tool string -} - -// UploadPublicKey uploads a public key to zot for signature verification. -// This enables zot to mark signatures as "trusted" when they can be verified with this key. -func UploadPublicKey(ctx context.Context, opts *UploadPublicKeyOptions) error { - logger.Debug("Uploading public key to zot for signature verification") - - if opts.PublicKey == "" { - return errors.New("public key is required") - } - - // Get registry URL for zot cosign endpoint - registryURL := buildRegistryURL(opts.Config.RegistryAddress, opts.Config.Insecure) - uploadEndpoint := registryURL + "/v2/_zot/ext/cosign" - - // Create HTTP request with public key as body - req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadEndpoint, strings.NewReader(opts.PublicKey)) - if err != nil { - return fmt.Errorf("failed to create upload request: %w", err) - } - - req.Header.Set("Content-Type", "application/octet-stream") - - // Add authentication - addAuthentication(req, opts.Config) - - // Create HTTP client and execute request - client := &http.Client{} - - resp, err := client.Do(req) - if err != nil { - return fmt.Errorf("failed to upload public key: %w", err) - } - - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { - body, _ := io.ReadAll(resp.Body) - - return fmt.Errorf("failed to upload public key, status: %d, response: %s", resp.StatusCode, string(body)) - } - - logger.Debug("Successfully uploaded public key to zot", "endpoint", uploadEndpoint) - - return nil -} - -// Verify queries zot's verification API to check if a signature is valid. -func Verify(ctx context.Context, opts *VerificationOptions) (*VerificationResult, error) { - // Build zot search endpoint URL - registryURL := buildRegistryURL(opts.Config.RegistryAddress, opts.Config.Insecure) - searchEndpoint := registryURL + "/v2/_zot/ext/search" - logger.Debug("Querying zot for signature verification", "endpoint", searchEndpoint, "recordCID", opts.RecordCID) - - // Create GraphQL query for signature verification - query := fmt.Sprintf(`{ - Image(image: "%s:%s") { - Digest - IsSigned - Tag - SignatureInfo { - Tool - IsTrusted - Author - } - } - }`, opts.Config.RepositoryName, opts.RecordCID) - - graphqlQuery := map[string]interface{}{ - "query": query, - } - - jsonData, err := json.Marshal(graphqlQuery) - if err != nil { - return nil, fmt.Errorf("failed to marshal GraphQL query: %w", err) - } - - // Create HTTP request - req, err := http.NewRequestWithContext(ctx, http.MethodPost, searchEndpoint, bytes.NewBuffer(jsonData)) - if err != nil { - return nil, fmt.Errorf("failed to create verification request: %w", err) - } - - req.Header.Set("Content-Type", "application/json") - - // Add authentication - addAuthentication(req, opts.Config) - - // Create HTTP client and execute request - client := &http.Client{} - - resp, err := client.Do(req) - if err != nil { - return nil, fmt.Errorf("failed to query zot verification: %w", err) - } - - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("failed to read verification response: %w", err) - } - - if resp.StatusCode != http.StatusOK { - logger.Debug("Verification query returned non-success status", "status", resp.StatusCode, "body", string(body)) - - return nil, fmt.Errorf("verification query returned status %d: %s", resp.StatusCode, string(body)) - } - - // Parse GraphQL response - var graphqlResp struct { - Data struct { - Image struct { - Digest string `json:"Digest"` - IsSigned bool `json:"IsSigned"` - Tag string `json:"Tag"` - SignatureInfo []struct { - Tool string `json:"Tool"` - IsTrusted bool `json:"IsTrusted"` - Author string `json:"Author"` - } `json:"SignatureInfo"` - } `json:"Image"` - } `json:"data"` - } - - if err := json.Unmarshal(body, &graphqlResp); err != nil { - return nil, fmt.Errorf("failed to decode verification response: %w", err) - } - - // Build result - result := &VerificationResult{ - IsSigned: graphqlResp.Data.Image.IsSigned, - IsTrusted: false, - } - - // Extract signature info if available - if len(graphqlResp.Data.Image.SignatureInfo) > 0 { - sigInfo := graphqlResp.Data.Image.SignatureInfo[0] - result.IsTrusted = sigInfo.IsTrusted - result.Author = sigInfo.Author - result.Tool = sigInfo.Tool - } - - logger.Debug("Zot verification result", "recordCID", opts.RecordCID, "isSigned", result.IsSigned, "isTrusted", result.IsTrusted) - - return result, nil -} - -// buildRegistryURL constructs the registry URL with proper protocol. -func buildRegistryURL(registryURL string, insecure bool) string { - // If URL already has a protocol, return as-is - if strings.HasPrefix(registryURL, "http://") || strings.HasPrefix(registryURL, "https://") { - return registryURL - } - - // Add appropriate protocol based on insecure flag - if insecure { - return "http://" + registryURL - } - - return "https://" + registryURL -} - -// addAuthentication adds authentication headers to HTTP requests. -func addAuthentication(req *http.Request, config *VerifyConfig) { - if config.Username != "" && config.Password != "" { - req.SetBasicAuth(config.Username, config.Password) - } else if config.AccessToken != "" { - req.Header.Set("Authorization", "Bearer "+config.AccessToken) - } -} - -// CheckReadiness checks if Zot is ready to serve traffic by querying its /readyz endpoint. -// Returns true if Zot responds with 200 OK, false otherwise. -func CheckReadiness(ctx context.Context, registryAddress string, insecure bool) bool { - // Build URL to Zot's readiness endpoint - registryURL := buildRegistryURL(registryAddress, insecure) - readyzURL := registryURL + "/readyz" - - // Create HTTP request with context - req, err := http.NewRequestWithContext(ctx, http.MethodGet, readyzURL, nil) - if err != nil { - logger.Debug("Failed to create readiness check request", "error", err, "url", readyzURL) - - return false - } - - // Create HTTP client with timeout - client := &http.Client{ - Timeout: 5 * time.Second, //nolint:mnd - } - - // Execute request - resp, err := client.Do(req) - if err != nil { - logger.Debug("Zot readiness check failed", "error", err, "url", readyzURL) - - return false - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusOK { - logger.Debug("Zot readiness check passed", "address", registryAddress) - - return true - } - - logger.Debug("Zot not ready", "address", registryAddress, "status", resp.StatusCode) - - return false -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +package zot + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/agntcy/dir/utils/logging" +) + +var logger = logging.Logger("utils/zot") + +const ( + // DefaultZotConfigPath is the default path to the zot configuration file. + DefaultZotConfigPath = "/etc/zot/config.json" + + // DefaultPollInterval is the default interval for polling new content. + DefaultPollInterval = time.Second * 60 + + // DefaultRetryDelay is the default delay between retries. + DefaultRetryDelay = time.Minute * 5 + + // DefaultMaxRetries is the default maximum number of retries. + DefaultMaxRetries = 3 +) + +// VerifyConfig contains configuration for zot verification. +type VerifyConfig struct { + RegistryAddress string + RepositoryName string + Username string + Password string + AccessToken string + Insecure bool +} + +// UploadPublicKeyOptions contains options for uploading public keys to zot. +type UploadPublicKeyOptions struct { + Config *VerifyConfig + PublicKey string +} + +// VerificationOptions contains options for zot verification. +type VerificationOptions struct { + Config *VerifyConfig + RecordCID string +} + +// VerificationResult contains the result of zot verification. +type VerificationResult struct { + IsSigned bool + IsTrusted bool + Author string + Tool string +} + +// UploadPublicKey uploads a public key to zot for signature verification. +// This enables zot to mark signatures as "trusted" when they can be verified with this key. +func UploadPublicKey(ctx context.Context, opts *UploadPublicKeyOptions) error { + logger.Debug("Uploading public key to zot for signature verification") + + if opts.PublicKey == "" { + return errors.New("public key is required") + } + + // Get registry URL for zot cosign endpoint + registryURL := buildRegistryURL(opts.Config.RegistryAddress, opts.Config.Insecure) + uploadEndpoint := registryURL + "/v2/_zot/ext/cosign" + + // Create HTTP request with public key as body + req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadEndpoint, strings.NewReader(opts.PublicKey)) + if err != nil { + return fmt.Errorf("failed to create upload request: %w", err) + } + + req.Header.Set("Content-Type", "application/octet-stream") + + // Add authentication + addAuthentication(req, opts.Config) + + // Create HTTP client and execute request + client := &http.Client{} + + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("failed to upload public key: %w", err) + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { + body, _ := io.ReadAll(resp.Body) + + return fmt.Errorf("failed to upload public key, status: %d, response: %s", resp.StatusCode, string(body)) + } + + logger.Debug("Successfully uploaded public key to zot", "endpoint", uploadEndpoint) + + return nil +} + +// Verify queries zot's verification API to check if a signature is valid. +func Verify(ctx context.Context, opts *VerificationOptions) (*VerificationResult, error) { + // Build zot search endpoint URL + registryURL := buildRegistryURL(opts.Config.RegistryAddress, opts.Config.Insecure) + searchEndpoint := registryURL + "/v2/_zot/ext/search" + logger.Debug("Querying zot for signature verification", "endpoint", searchEndpoint, "recordCID", opts.RecordCID) + + // Create GraphQL query for signature verification + query := fmt.Sprintf(`{ + Image(image: "%s:%s") { + Digest + IsSigned + Tag + SignatureInfo { + Tool + IsTrusted + Author + } + } + }`, opts.Config.RepositoryName, opts.RecordCID) + + graphqlQuery := map[string]interface{}{ + "query": query, + } + + jsonData, err := json.Marshal(graphqlQuery) + if err != nil { + return nil, fmt.Errorf("failed to marshal GraphQL query: %w", err) + } + + // Create HTTP request + req, err := http.NewRequestWithContext(ctx, http.MethodPost, searchEndpoint, bytes.NewBuffer(jsonData)) + if err != nil { + return nil, fmt.Errorf("failed to create verification request: %w", err) + } + + req.Header.Set("Content-Type", "application/json") + + // Add authentication + addAuthentication(req, opts.Config) + + // Create HTTP client and execute request + client := &http.Client{} + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to query zot verification: %w", err) + } + + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read verification response: %w", err) + } + + if resp.StatusCode != http.StatusOK { + logger.Debug("Verification query returned non-success status", "status", resp.StatusCode, "body", string(body)) + + return nil, fmt.Errorf("verification query returned status %d: %s", resp.StatusCode, string(body)) + } + + // Parse GraphQL response + var graphqlResp struct { + Data struct { + Image struct { + Digest string `json:"Digest"` + IsSigned bool `json:"IsSigned"` + Tag string `json:"Tag"` + SignatureInfo []struct { + Tool string `json:"Tool"` + IsTrusted bool `json:"IsTrusted"` + Author string `json:"Author"` + } `json:"SignatureInfo"` + } `json:"Image"` + } `json:"data"` + } + + if err := json.Unmarshal(body, &graphqlResp); err != nil { + return nil, fmt.Errorf("failed to decode verification response: %w", err) + } + + // Build result + result := &VerificationResult{ + IsSigned: graphqlResp.Data.Image.IsSigned, + IsTrusted: false, + } + + // Extract signature info if available + if len(graphqlResp.Data.Image.SignatureInfo) > 0 { + sigInfo := graphqlResp.Data.Image.SignatureInfo[0] + result.IsTrusted = sigInfo.IsTrusted + result.Author = sigInfo.Author + result.Tool = sigInfo.Tool + } + + logger.Debug("Zot verification result", "recordCID", opts.RecordCID, "isSigned", result.IsSigned, "isTrusted", result.IsTrusted) + + return result, nil +} + +// buildRegistryURL constructs the registry URL with proper protocol. +func buildRegistryURL(registryURL string, insecure bool) string { + // If URL already has a protocol, return as-is + if strings.HasPrefix(registryURL, "http://") || strings.HasPrefix(registryURL, "https://") { + return registryURL + } + + // Add appropriate protocol based on insecure flag + if insecure { + return "http://" + registryURL + } + + return "https://" + registryURL +} + +// addAuthentication adds authentication headers to HTTP requests. +func addAuthentication(req *http.Request, config *VerifyConfig) { + if config.Username != "" && config.Password != "" { + req.SetBasicAuth(config.Username, config.Password) + } else if config.AccessToken != "" { + req.Header.Set("Authorization", "Bearer "+config.AccessToken) + } +} + +// CheckReadiness checks if Zot is ready to serve traffic by querying its /readyz endpoint. +// Returns true if Zot responds with 200 OK, false otherwise. +func CheckReadiness(ctx context.Context, registryAddress string, insecure bool) bool { + // Build URL to Zot's readiness endpoint + registryURL := buildRegistryURL(registryAddress, insecure) + readyzURL := registryURL + "/readyz" + + // Create HTTP request with context + req, err := http.NewRequestWithContext(ctx, http.MethodGet, readyzURL, nil) + if err != nil { + logger.Debug("Failed to create readiness check request", "error", err, "url", readyzURL) + + return false + } + + // Create HTTP client with timeout + client := &http.Client{ + Timeout: 5 * time.Second, //nolint:mnd + } + + // Execute request + resp, err := client.Do(req) + if err != nil { + logger.Debug("Zot readiness check failed", "error", err, "url", readyzURL) + + return false + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusOK { + logger.Debug("Zot readiness check passed", "address", registryAddress) + + return true + } + + logger.Debug("Zot not ready", "address", registryAddress, "status", resp.StatusCode) + + return false +} diff --git a/utils/zot/zot_test.go b/utils/zot/zot_test.go index def9fe346..914073aa2 100644 --- a/utils/zot/zot_test.go +++ b/utils/zot/zot_test.go @@ -1,586 +1,586 @@ -// Copyright AGNTCY Contributors (https://github.com/agntcy) -// SPDX-License-Identifier: Apache-2.0 - -//nolint -package zot - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestBuildRegistryURL(t *testing.T) { - tests := []struct { - name string - config *VerifyConfig - expected string - }{ - { - name: "address without protocol, secure", - config: &VerifyConfig{ - RegistryAddress: "registry.example.com", - Insecure: false, - }, - expected: "https://registry.example.com", - }, - { - name: "address without protocol, insecure", - config: &VerifyConfig{ - RegistryAddress: "registry.example.com", - Insecure: true, - }, - expected: "http://registry.example.com", - }, - { - name: "address with https protocol", - config: &VerifyConfig{ - RegistryAddress: "https://registry.example.com", - Insecure: true, // Should be ignored - }, - expected: "https://registry.example.com", - }, - { - name: "address with http protocol", - config: &VerifyConfig{ - RegistryAddress: "http://registry.example.com", - Insecure: false, // Should be ignored - }, - expected: "http://registry.example.com", - }, - { - name: "address with port, secure", - config: &VerifyConfig{ - RegistryAddress: "registry.example.com:5000", - Insecure: false, - }, - expected: "https://registry.example.com:5000", - }, - { - name: "address with port, insecure", - config: &VerifyConfig{ - RegistryAddress: "registry.example.com:5000", - Insecure: true, - }, - expected: "http://registry.example.com:5000", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := buildRegistryURL(tt.config.RegistryAddress, tt.config.Insecure) - if result != tt.expected { - t.Errorf("Expected %q, got %q", tt.expected, result) - } - }) - } -} - -func TestAddAuthentication(t *testing.T) { - t.Run("basic auth", func(t *testing.T) { - req, err := http.NewRequest(http.MethodGet, "http://example.com", nil) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - - config := &VerifyConfig{ - Username: "testuser", - Password: "testpass", - } - - addAuthentication(req, config) - - username, password, ok := req.BasicAuth() - if !ok { - t.Errorf("Expected basic auth to be set") - } - - if username != "testuser" { - t.Errorf("Expected username %q, got %q", "testuser", username) - } - - if password != "testpass" { - t.Errorf("Expected password %q, got %q", "testpass", password) - } - }) - - t.Run("bearer token", func(t *testing.T) { - req, err := http.NewRequest(http.MethodGet, "http://example.com", nil) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - - config := &VerifyConfig{ - AccessToken: "test-token", - } - - addAuthentication(req, config) - - authHeader := req.Header.Get("Authorization") - expected := "Bearer test-token" - - if authHeader != expected { - t.Errorf("Expected authorization header %q, got %q", expected, authHeader) - } - }) - - t.Run("basic auth takes precedence over bearer token", func(t *testing.T) { - req, err := http.NewRequest(http.MethodGet, "http://example.com", nil) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - - config := &VerifyConfig{ - Username: "testuser", - Password: "testpass", - AccessToken: "test-token", - } - - addAuthentication(req, config) - - username, password, ok := req.BasicAuth() - if !ok { - t.Errorf("Expected basic auth to be set") - } - - if username != "testuser" || password != "testpass" { - t.Errorf("Expected basic auth, got username=%q, password=%q", username, password) - } - - // Bearer token should not be set when basic auth is present - authHeader := req.Header.Get("Authorization") - if strings.Contains(authHeader, "Bearer") { - t.Errorf("Expected no Bearer token when basic auth is set, got %q", authHeader) - } - }) - - t.Run("no authentication", func(t *testing.T) { - req, err := http.NewRequest(http.MethodGet, "http://example.com", nil) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - - config := &VerifyConfig{} - - addAuthentication(req, config) - - _, _, ok := req.BasicAuth() - if ok { - t.Errorf("Expected no basic auth to be set") - } - - authHeader := req.Header.Get("Authorization") - if authHeader != "" { - t.Errorf("Expected no authorization header, got %q", authHeader) - } - }) -} - -func TestUploadPublicKey(t *testing.T) { - t.Run("successful upload", func(t *testing.T) { - // Create test server - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Verify request method and path - if r.Method != http.MethodPost { - t.Errorf("Expected POST method, got %s", r.Method) - } - - expectedPath := "/v2/_zot/ext/cosign" - if r.URL.Path != expectedPath { - t.Errorf("Expected path %q, got %q", expectedPath, r.URL.Path) - } - - // Verify content type - contentType := r.Header.Get("Content-Type") - if contentType != "application/octet-stream" { - t.Errorf("Expected Content-Type 'application/octet-stream', got %q", contentType) - } - - // Verify authentication - username, password, ok := r.BasicAuth() - if !ok || username != "testuser" || password != "testpass" { - t.Errorf("Expected basic auth with testuser/testpass") - } - - w.WriteHeader(http.StatusOK) - })) - defer server.Close() - - // Extract host from server URL - serverURL := strings.TrimPrefix(server.URL, "http://") - - opts := &UploadPublicKeyOptions{ - Config: &VerifyConfig{ - RegistryAddress: serverURL, - Username: "testuser", - Password: "testpass", - Insecure: true, - }, - PublicKey: "test-public-key", - } - - err := UploadPublicKey(t.Context(), opts) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - }) - - t.Run("empty public key", func(t *testing.T) { - opts := &UploadPublicKeyOptions{ - Config: &VerifyConfig{ - RegistryAddress: "registry.example.com", - }, - PublicKey: "", - } - - err := UploadPublicKey(t.Context(), opts) - if err == nil { - t.Errorf("Expected error for empty public key") - } - - if !strings.Contains(err.Error(), "public key is required") { - t.Errorf("Expected 'public key is required' error, got %q", err.Error()) - } - }) - - t.Run("server error response", func(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusInternalServerError) - w.Write([]byte("Internal server error")) - })) - defer server.Close() - - serverURL := strings.TrimPrefix(server.URL, "http://") - - opts := &UploadPublicKeyOptions{ - Config: &VerifyConfig{ - RegistryAddress: serverURL, - Insecure: true, - }, - PublicKey: "test-public-key", - } - - err := UploadPublicKey(t.Context(), opts) - if err == nil { - t.Errorf("Expected error for server error response") - } - - if !strings.Contains(err.Error(), "failed to upload public key") { - t.Errorf("Expected upload error, got %q", err.Error()) - } - }) -} - -func TestVerify(t *testing.T) { - t.Run("successful verification with signature", func(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Verify request method and path - if r.Method != http.MethodPost { - t.Errorf("Expected POST method, got %s", r.Method) - } - - expectedPath := "/v2/_zot/ext/search" - if r.URL.Path != expectedPath { - t.Errorf("Expected path %q, got %q", expectedPath, r.URL.Path) - } - - // Verify content type - contentType := r.Header.Get("Content-Type") - if contentType != "application/json" { - t.Errorf("Expected Content-Type 'application/json', got %q", contentType) - } - - // Verify GraphQL query in request body - var queryData map[string]interface{} - - err := json.NewDecoder(r.Body).Decode(&queryData) - if err != nil { - t.Errorf("Failed to decode request body: %v", err) - } - - query, ok := queryData["query"].(string) - if !ok { - t.Errorf("Expected query field in request") - } - - if !strings.Contains(query, "test-repo:test-cid") { - t.Errorf("Expected query to contain 'test-repo:test-cid', got %q", query) - } - - // Return successful response - response := map[string]interface{}{ - "data": map[string]interface{}{ - "Image": map[string]interface{}{ - "Digest": "sha256:abcdef123456", - "IsSigned": true, - "Tag": "test-cid", - "SignatureInfo": []map[string]interface{}{ - { - "Tool": "cosign", - "IsTrusted": true, - "Author": "test@example.com", - }, - }, - }, - }, - } - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(response) - })) - defer server.Close() - - serverURL := strings.TrimPrefix(server.URL, "http://") - - opts := &VerificationOptions{ - Config: &VerifyConfig{ - RegistryAddress: serverURL, - RepositoryName: "test-repo", - Username: "testuser", - Password: "testpass", - Insecure: true, - }, - RecordCID: "test-cid", - } - - result, err := Verify(t.Context(), opts) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if result == nil { - t.Fatalf("Expected result to be non-nil") - } - - if !result.IsSigned { - t.Errorf("Expected IsSigned to be true") - } - - if !result.IsTrusted { - t.Errorf("Expected IsTrusted to be true") - } - - if result.Author != "test@example.com" { - t.Errorf("Expected Author 'test@example.com', got %q", result.Author) - } - - if result.Tool != "cosign" { - t.Errorf("Expected Tool 'cosign', got %q", result.Tool) - } - }) - - t.Run("verification without signature", func(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - response := map[string]interface{}{ - "data": map[string]interface{}{ - "Image": map[string]interface{}{ - "Digest": "sha256:abcdef123456", - "IsSigned": false, - "Tag": "test-cid", - "SignatureInfo": []map[string]interface{}{}, - }, - }, - } - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(response) - })) - defer server.Close() - - serverURL := strings.TrimPrefix(server.URL, "http://") - - opts := &VerificationOptions{ - Config: &VerifyConfig{ - RegistryAddress: serverURL, - RepositoryName: "test-repo", - Insecure: true, - }, - RecordCID: "test-cid", - } - - result, err := Verify(t.Context(), opts) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - if result == nil { - t.Fatalf("Expected result to be non-nil") - } - - if result.IsSigned { - t.Errorf("Expected IsSigned to be false") - } - - if result.IsTrusted { - t.Errorf("Expected IsTrusted to be false") - } - - if result.Author != "" { - t.Errorf("Expected empty Author, got %q", result.Author) - } - - if result.Tool != "" { - t.Errorf("Expected empty Tool, got %q", result.Tool) - } - }) - - t.Run("server error response", func(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusInternalServerError) - w.Write([]byte("Internal server error")) - })) - defer server.Close() - - serverURL := strings.TrimPrefix(server.URL, "http://") - - opts := &VerificationOptions{ - Config: &VerifyConfig{ - RegistryAddress: serverURL, - RepositoryName: "test-repo", - Insecure: true, - }, - RecordCID: "test-cid", - } - - result, err := Verify(t.Context(), opts) - if err == nil { - t.Errorf("Expected error for server error response") - } - - if result != nil { - t.Errorf("Expected nil result on error") - } - - if !strings.Contains(err.Error(), "verification query returned status 500") { - t.Errorf("Expected status error, got %q", err.Error()) - } - }) - - t.Run("invalid JSON response", func(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte("invalid json")) - })) - defer server.Close() - - serverURL := strings.TrimPrefix(server.URL, "http://") - - opts := &VerificationOptions{ - Config: &VerifyConfig{ - RegistryAddress: serverURL, - RepositoryName: "test-repo", - Insecure: true, - }, - RecordCID: "test-cid", - } - - result, err := Verify(t.Context(), opts) - if err == nil { - t.Errorf("Expected error for invalid JSON response") - } - - if result != nil { - t.Errorf("Expected nil result on error") - } - - if !strings.Contains(err.Error(), "failed to decode verification response") { - t.Errorf("Expected decode error, got %q", err.Error()) - } - }) -} - -func TestStructs(t *testing.T) { - t.Run("VerifyConfig", func(t *testing.T) { - config := &VerifyConfig{ - RegistryAddress: "registry.example.com", - RepositoryName: "test-repo", - Username: "testuser", - Password: "testpass", - AccessToken: "test-token", - Insecure: true, - } - - if config.RegistryAddress != "registry.example.com" { - t.Errorf("Expected RegistryAddress 'registry.example.com', got %q", config.RegistryAddress) - } - - if config.RepositoryName != "test-repo" { - t.Errorf("Expected RepositoryName 'test-repo', got %q", config.RepositoryName) - } - - if config.Username != "testuser" { - t.Errorf("Expected Username 'testuser', got %q", config.Username) - } - - if config.Password != "testpass" { - t.Errorf("Expected Password 'testpass', got %q", config.Password) - } - - if config.AccessToken != "test-token" { - t.Errorf("Expected AccessToken 'test-token', got %q", config.AccessToken) - } - - if !config.Insecure { - t.Errorf("Expected Insecure to be true") - } - }) - - t.Run("UploadPublicKeyOptions", func(t *testing.T) { - config := &VerifyConfig{RegistryAddress: "registry.example.com"} - opts := &UploadPublicKeyOptions{ - Config: config, - PublicKey: "test-key", - } - - if opts.Config != config { - t.Errorf("Expected Config to be set correctly") - } - - if opts.PublicKey != "test-key" { - t.Errorf("Expected PublicKey 'test-key', got %q", opts.PublicKey) - } - }) - - t.Run("VerificationOptions", func(t *testing.T) { - config := &VerifyConfig{RegistryAddress: "registry.example.com"} - opts := &VerificationOptions{ - Config: config, - RecordCID: "test-cid", - } - - if opts.Config != config { - t.Errorf("Expected Config to be set correctly") - } - - if opts.RecordCID != "test-cid" { - t.Errorf("Expected RecordCID 'test-cid', got %q", opts.RecordCID) - } - }) - - t.Run("VerificationResult", func(t *testing.T) { - result := &VerificationResult{ - IsSigned: true, - IsTrusted: true, - Author: "test@example.com", - Tool: "cosign", - } - - if !result.IsSigned { - t.Errorf("Expected IsSigned to be true") - } - - if !result.IsTrusted { - t.Errorf("Expected IsTrusted to be true") - } - - if result.Author != "test@example.com" { - t.Errorf("Expected Author 'test@example.com', got %q", result.Author) - } - - if result.Tool != "cosign" { - t.Errorf("Expected Tool 'cosign', got %q", result.Tool) - } - }) -} +// Copyright AGNTCY Contributors (https://github.com/agntcy) +// SPDX-License-Identifier: Apache-2.0 + +//nolint +package zot + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestBuildRegistryURL(t *testing.T) { + tests := []struct { + name string + config *VerifyConfig + expected string + }{ + { + name: "address without protocol, secure", + config: &VerifyConfig{ + RegistryAddress: "registry.example.com", + Insecure: false, + }, + expected: "https://registry.example.com", + }, + { + name: "address without protocol, insecure", + config: &VerifyConfig{ + RegistryAddress: "registry.example.com", + Insecure: true, + }, + expected: "http://registry.example.com", + }, + { + name: "address with https protocol", + config: &VerifyConfig{ + RegistryAddress: "https://registry.example.com", + Insecure: true, // Should be ignored + }, + expected: "https://registry.example.com", + }, + { + name: "address with http protocol", + config: &VerifyConfig{ + RegistryAddress: "http://registry.example.com", + Insecure: false, // Should be ignored + }, + expected: "http://registry.example.com", + }, + { + name: "address with port, secure", + config: &VerifyConfig{ + RegistryAddress: "registry.example.com:5000", + Insecure: false, + }, + expected: "https://registry.example.com:5000", + }, + { + name: "address with port, insecure", + config: &VerifyConfig{ + RegistryAddress: "registry.example.com:5000", + Insecure: true, + }, + expected: "http://registry.example.com:5000", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := buildRegistryURL(tt.config.RegistryAddress, tt.config.Insecure) + if result != tt.expected { + t.Errorf("Expected %q, got %q", tt.expected, result) + } + }) + } +} + +func TestAddAuthentication(t *testing.T) { + t.Run("basic auth", func(t *testing.T) { + req, err := http.NewRequest(http.MethodGet, "http://example.com", nil) + if err != nil { + t.Fatalf("Failed to create request: %v", err) + } + + config := &VerifyConfig{ + Username: "testuser", + Password: "testpass", + } + + addAuthentication(req, config) + + username, password, ok := req.BasicAuth() + if !ok { + t.Errorf("Expected basic auth to be set") + } + + if username != "testuser" { + t.Errorf("Expected username %q, got %q", "testuser", username) + } + + if password != "testpass" { + t.Errorf("Expected password %q, got %q", "testpass", password) + } + }) + + t.Run("bearer token", func(t *testing.T) { + req, err := http.NewRequest(http.MethodGet, "http://example.com", nil) + if err != nil { + t.Fatalf("Failed to create request: %v", err) + } + + config := &VerifyConfig{ + AccessToken: "test-token", + } + + addAuthentication(req, config) + + authHeader := req.Header.Get("Authorization") + expected := "Bearer test-token" + + if authHeader != expected { + t.Errorf("Expected authorization header %q, got %q", expected, authHeader) + } + }) + + t.Run("basic auth takes precedence over bearer token", func(t *testing.T) { + req, err := http.NewRequest(http.MethodGet, "http://example.com", nil) + if err != nil { + t.Fatalf("Failed to create request: %v", err) + } + + config := &VerifyConfig{ + Username: "testuser", + Password: "testpass", + AccessToken: "test-token", + } + + addAuthentication(req, config) + + username, password, ok := req.BasicAuth() + if !ok { + t.Errorf("Expected basic auth to be set") + } + + if username != "testuser" || password != "testpass" { + t.Errorf("Expected basic auth, got username=%q, password=%q", username, password) + } + + // Bearer token should not be set when basic auth is present + authHeader := req.Header.Get("Authorization") + if strings.Contains(authHeader, "Bearer") { + t.Errorf("Expected no Bearer token when basic auth is set, got %q", authHeader) + } + }) + + t.Run("no authentication", func(t *testing.T) { + req, err := http.NewRequest(http.MethodGet, "http://example.com", nil) + if err != nil { + t.Fatalf("Failed to create request: %v", err) + } + + config := &VerifyConfig{} + + addAuthentication(req, config) + + _, _, ok := req.BasicAuth() + if ok { + t.Errorf("Expected no basic auth to be set") + } + + authHeader := req.Header.Get("Authorization") + if authHeader != "" { + t.Errorf("Expected no authorization header, got %q", authHeader) + } + }) +} + +func TestUploadPublicKey(t *testing.T) { + t.Run("successful upload", func(t *testing.T) { + // Create test server + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Verify request method and path + if r.Method != http.MethodPost { + t.Errorf("Expected POST method, got %s", r.Method) + } + + expectedPath := "/v2/_zot/ext/cosign" + if r.URL.Path != expectedPath { + t.Errorf("Expected path %q, got %q", expectedPath, r.URL.Path) + } + + // Verify content type + contentType := r.Header.Get("Content-Type") + if contentType != "application/octet-stream" { + t.Errorf("Expected Content-Type 'application/octet-stream', got %q", contentType) + } + + // Verify authentication + username, password, ok := r.BasicAuth() + if !ok || username != "testuser" || password != "testpass" { + t.Errorf("Expected basic auth with testuser/testpass") + } + + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + // Extract host from server URL + serverURL := strings.TrimPrefix(server.URL, "http://") + + opts := &UploadPublicKeyOptions{ + Config: &VerifyConfig{ + RegistryAddress: serverURL, + Username: "testuser", + Password: "testpass", + Insecure: true, + }, + PublicKey: "test-public-key", + } + + err := UploadPublicKey(t.Context(), opts) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + }) + + t.Run("empty public key", func(t *testing.T) { + opts := &UploadPublicKeyOptions{ + Config: &VerifyConfig{ + RegistryAddress: "registry.example.com", + }, + PublicKey: "", + } + + err := UploadPublicKey(t.Context(), opts) + if err == nil { + t.Errorf("Expected error for empty public key") + } + + if !strings.Contains(err.Error(), "public key is required") { + t.Errorf("Expected 'public key is required' error, got %q", err.Error()) + } + }) + + t.Run("server error response", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte("Internal server error")) + })) + defer server.Close() + + serverURL := strings.TrimPrefix(server.URL, "http://") + + opts := &UploadPublicKeyOptions{ + Config: &VerifyConfig{ + RegistryAddress: serverURL, + Insecure: true, + }, + PublicKey: "test-public-key", + } + + err := UploadPublicKey(t.Context(), opts) + if err == nil { + t.Errorf("Expected error for server error response") + } + + if !strings.Contains(err.Error(), "failed to upload public key") { + t.Errorf("Expected upload error, got %q", err.Error()) + } + }) +} + +func TestVerify(t *testing.T) { + t.Run("successful verification with signature", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Verify request method and path + if r.Method != http.MethodPost { + t.Errorf("Expected POST method, got %s", r.Method) + } + + expectedPath := "/v2/_zot/ext/search" + if r.URL.Path != expectedPath { + t.Errorf("Expected path %q, got %q", expectedPath, r.URL.Path) + } + + // Verify content type + contentType := r.Header.Get("Content-Type") + if contentType != "application/json" { + t.Errorf("Expected Content-Type 'application/json', got %q", contentType) + } + + // Verify GraphQL query in request body + var queryData map[string]interface{} + + err := json.NewDecoder(r.Body).Decode(&queryData) + if err != nil { + t.Errorf("Failed to decode request body: %v", err) + } + + query, ok := queryData["query"].(string) + if !ok { + t.Errorf("Expected query field in request") + } + + if !strings.Contains(query, "test-repo:test-cid") { + t.Errorf("Expected query to contain 'test-repo:test-cid', got %q", query) + } + + // Return successful response + response := map[string]interface{}{ + "data": map[string]interface{}{ + "Image": map[string]interface{}{ + "Digest": "sha256:abcdef123456", + "IsSigned": true, + "Tag": "test-cid", + "SignatureInfo": []map[string]interface{}{ + { + "Tool": "cosign", + "IsTrusted": true, + "Author": "test@example.com", + }, + }, + }, + }, + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(response) + })) + defer server.Close() + + serverURL := strings.TrimPrefix(server.URL, "http://") + + opts := &VerificationOptions{ + Config: &VerifyConfig{ + RegistryAddress: serverURL, + RepositoryName: "test-repo", + Username: "testuser", + Password: "testpass", + Insecure: true, + }, + RecordCID: "test-cid", + } + + result, err := Verify(t.Context(), opts) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if result == nil { + t.Fatalf("Expected result to be non-nil") + } + + if !result.IsSigned { + t.Errorf("Expected IsSigned to be true") + } + + if !result.IsTrusted { + t.Errorf("Expected IsTrusted to be true") + } + + if result.Author != "test@example.com" { + t.Errorf("Expected Author 'test@example.com', got %q", result.Author) + } + + if result.Tool != "cosign" { + t.Errorf("Expected Tool 'cosign', got %q", result.Tool) + } + }) + + t.Run("verification without signature", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + response := map[string]interface{}{ + "data": map[string]interface{}{ + "Image": map[string]interface{}{ + "Digest": "sha256:abcdef123456", + "IsSigned": false, + "Tag": "test-cid", + "SignatureInfo": []map[string]interface{}{}, + }, + }, + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(response) + })) + defer server.Close() + + serverURL := strings.TrimPrefix(server.URL, "http://") + + opts := &VerificationOptions{ + Config: &VerifyConfig{ + RegistryAddress: serverURL, + RepositoryName: "test-repo", + Insecure: true, + }, + RecordCID: "test-cid", + } + + result, err := Verify(t.Context(), opts) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if result == nil { + t.Fatalf("Expected result to be non-nil") + } + + if result.IsSigned { + t.Errorf("Expected IsSigned to be false") + } + + if result.IsTrusted { + t.Errorf("Expected IsTrusted to be false") + } + + if result.Author != "" { + t.Errorf("Expected empty Author, got %q", result.Author) + } + + if result.Tool != "" { + t.Errorf("Expected empty Tool, got %q", result.Tool) + } + }) + + t.Run("server error response", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte("Internal server error")) + })) + defer server.Close() + + serverURL := strings.TrimPrefix(server.URL, "http://") + + opts := &VerificationOptions{ + Config: &VerifyConfig{ + RegistryAddress: serverURL, + RepositoryName: "test-repo", + Insecure: true, + }, + RecordCID: "test-cid", + } + + result, err := Verify(t.Context(), opts) + if err == nil { + t.Errorf("Expected error for server error response") + } + + if result != nil { + t.Errorf("Expected nil result on error") + } + + if !strings.Contains(err.Error(), "verification query returned status 500") { + t.Errorf("Expected status error, got %q", err.Error()) + } + }) + + t.Run("invalid JSON response", func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte("invalid json")) + })) + defer server.Close() + + serverURL := strings.TrimPrefix(server.URL, "http://") + + opts := &VerificationOptions{ + Config: &VerifyConfig{ + RegistryAddress: serverURL, + RepositoryName: "test-repo", + Insecure: true, + }, + RecordCID: "test-cid", + } + + result, err := Verify(t.Context(), opts) + if err == nil { + t.Errorf("Expected error for invalid JSON response") + } + + if result != nil { + t.Errorf("Expected nil result on error") + } + + if !strings.Contains(err.Error(), "failed to decode verification response") { + t.Errorf("Expected decode error, got %q", err.Error()) + } + }) +} + +func TestStructs(t *testing.T) { + t.Run("VerifyConfig", func(t *testing.T) { + config := &VerifyConfig{ + RegistryAddress: "registry.example.com", + RepositoryName: "test-repo", + Username: "testuser", + Password: "testpass", + AccessToken: "test-token", + Insecure: true, + } + + if config.RegistryAddress != "registry.example.com" { + t.Errorf("Expected RegistryAddress 'registry.example.com', got %q", config.RegistryAddress) + } + + if config.RepositoryName != "test-repo" { + t.Errorf("Expected RepositoryName 'test-repo', got %q", config.RepositoryName) + } + + if config.Username != "testuser" { + t.Errorf("Expected Username 'testuser', got %q", config.Username) + } + + if config.Password != "testpass" { + t.Errorf("Expected Password 'testpass', got %q", config.Password) + } + + if config.AccessToken != "test-token" { + t.Errorf("Expected AccessToken 'test-token', got %q", config.AccessToken) + } + + if !config.Insecure { + t.Errorf("Expected Insecure to be true") + } + }) + + t.Run("UploadPublicKeyOptions", func(t *testing.T) { + config := &VerifyConfig{RegistryAddress: "registry.example.com"} + opts := &UploadPublicKeyOptions{ + Config: config, + PublicKey: "test-key", + } + + if opts.Config != config { + t.Errorf("Expected Config to be set correctly") + } + + if opts.PublicKey != "test-key" { + t.Errorf("Expected PublicKey 'test-key', got %q", opts.PublicKey) + } + }) + + t.Run("VerificationOptions", func(t *testing.T) { + config := &VerifyConfig{RegistryAddress: "registry.example.com"} + opts := &VerificationOptions{ + Config: config, + RecordCID: "test-cid", + } + + if opts.Config != config { + t.Errorf("Expected Config to be set correctly") + } + + if opts.RecordCID != "test-cid" { + t.Errorf("Expected RecordCID 'test-cid', got %q", opts.RecordCID) + } + }) + + t.Run("VerificationResult", func(t *testing.T) { + result := &VerificationResult{ + IsSigned: true, + IsTrusted: true, + Author: "test@example.com", + Tool: "cosign", + } + + if !result.IsSigned { + t.Errorf("Expected IsSigned to be true") + } + + if !result.IsTrusted { + t.Errorf("Expected IsTrusted to be true") + } + + if result.Author != "test@example.com" { + t.Errorf("Expected Author 'test@example.com', got %q", result.Author) + } + + if result.Tool != "cosign" { + t.Errorf("Expected Tool 'cosign', got %q", result.Tool) + } + }) +} diff --git a/versions.yaml b/versions.yaml index eb0513f3a..ac8985ae0 100644 --- a/versions.yaml +++ b/versions.yaml @@ -1,19 +1,19 @@ -# Copyright AGNTCY Contributors (https://github.com/agntcy) -# SPDX-License-Identifier: Apache-2.0 - -module-sets: - directory: - version: v0.6.0 - modules: - - github.com/agntcy/dir/api - - github.com/agntcy/dir/client - - github.com/agntcy/dir/e2e - - github.com/agntcy/dir/importer - - github.com/agntcy/dir/mcp - - github.com/agntcy/dir/server - - github.com/agntcy/dir/utils - - cli: - version: v0.6.0 - modules: - - github.com/agntcy/dir/cli +# Copyright AGNTCY Contributors (https://github.com/agntcy) +# SPDX-License-Identifier: Apache-2.0 + +module-sets: + directory: + version: v0.6.0 + modules: + - github.com/agntcy/dir/api + - github.com/agntcy/dir/client + - github.com/agntcy/dir/e2e + - github.com/agntcy/dir/importer + - github.com/agntcy/dir/mcp + - github.com/agntcy/dir/server + - github.com/agntcy/dir/utils + + cli: + version: v0.6.0 + modules: + - github.com/agntcy/dir/cli